duke@435: /* jwilhelm@4576: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP stefank@2314: sla@5237: #include "gc_implementation/shared/gcHeapSummary.hpp" stefank@2314: #include "gc_implementation/shared/gSpaceCounters.hpp" stefank@2314: #include "gc_implementation/shared/gcStats.hpp" sla@5237: #include "gc_implementation/shared/gcWhen.hpp" stefank@2314: #include "gc_implementation/shared/generationCounters.hpp" jmasa@3730: #include "memory/freeBlockDictionary.hpp" stefank@2314: #include "memory/generation.hpp" stefank@6992: #include "memory/iterator.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #include "runtime/virtualspace.hpp" stefank@2314: #include "services/memoryService.hpp" stefank@2314: #include "utilities/bitMap.inline.hpp" stefank@2314: #include "utilities/stack.inline.hpp" stefank@2314: #include "utilities/taskqueue.hpp" stefank@2314: #include "utilities/yieldingWorkgroup.hpp" stefank@2314: duke@435: // ConcurrentMarkSweepGeneration is in support of a concurrent duke@435: // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker duke@435: // style. We assume, for now, that this generation is always the coleenp@4037: // seniormost generation and for simplicity duke@435: // in the first implementation, that this generation is a single compactible duke@435: // space. Neither of these restrictions appears essential, and will be duke@435: // relaxed in the future when more time is available to implement the duke@435: // greater generality (and there's a need for it). duke@435: // duke@435: // Concurrent mode failures are currently handled by duke@435: // means of a sliding mark-compact. duke@435: duke@435: class CMSAdaptiveSizePolicy; duke@435: class CMSConcMarkingTask; duke@435: class CMSGCAdaptivePolicyCounters; sla@5237: class CMSTracer; sla@5237: class ConcurrentGCTimer; duke@435: class ConcurrentMarkSweepGeneration; duke@435: class ConcurrentMarkSweepPolicy; duke@435: class ConcurrentMarkSweepThread; duke@435: class CompactibleFreeListSpace; duke@435: class FreeChunk; duke@435: class PromotionInfo; duke@435: class ScanMarkedObjectsAgainCarefullyClosure; jmasa@4900: class TenuredGeneration; sla@5237: class SerialOldTracer; duke@435: duke@435: // A generic CMS bit map. It's the basis for both the CMS marking bit map duke@435: // as well as for the mod union table (in each case only a subset of the duke@435: // methods are used). This is essentially a wrapper around the BitMap class, duke@435: // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, duke@435: // we have _shifter == 0. and for the mod union table we have duke@435: // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) duke@435: // XXX 64-bit issues in BitMap? duke@435: class CMSBitMap VALUE_OBJ_CLASS_SPEC { duke@435: friend class VMStructs; duke@435: duke@435: HeapWord* _bmStartWord; // base address of range covered by map duke@435: size_t _bmWordSize; // map size (in #HeapWords covered) duke@435: const int _shifter; // shifts to convert HeapWord to bit position duke@435: VirtualSpace _virtual_space; // underlying the bit map duke@435: BitMap _bm; // the bit map itself duke@435: public: duke@435: Mutex* const _lock; // mutex protecting _bm; duke@435: duke@435: public: duke@435: // constructor duke@435: CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); duke@435: duke@435: // allocates the actual storage for the map duke@435: bool allocate(MemRegion mr); duke@435: // field getter duke@435: Mutex* lock() const { return _lock; } duke@435: // locking verifier convenience function duke@435: void assert_locked() const PRODUCT_RETURN; duke@435: duke@435: // inquiries duke@435: HeapWord* startWord() const { return _bmStartWord; } duke@435: size_t sizeInWords() const { return _bmWordSize; } duke@435: size_t sizeInBits() const { return _bm.size(); } duke@435: // the following is one past the last word in space duke@435: HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } duke@435: duke@435: // reading marks duke@435: bool isMarked(HeapWord* addr) const; duke@435: bool par_isMarked(HeapWord* addr) const; // do not lock checks duke@435: bool isUnmarked(HeapWord* addr) const; duke@435: bool isAllClear() const; duke@435: duke@435: // writing marks duke@435: void mark(HeapWord* addr); duke@435: // For marking by parallel GC threads; duke@435: // returns true if we did, false if another thread did duke@435: bool par_mark(HeapWord* addr); duke@435: duke@435: void mark_range(MemRegion mr); duke@435: void par_mark_range(MemRegion mr); duke@435: void mark_large_range(MemRegion mr); duke@435: void par_mark_large_range(MemRegion mr); duke@435: void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. duke@435: void clear_range(MemRegion mr); duke@435: void par_clear_range(MemRegion mr); duke@435: void clear_large_range(MemRegion mr); duke@435: void par_clear_large_range(MemRegion mr); duke@435: void clear_all(); duke@435: void clear_all_incrementally(); // Not yet implemented!! duke@435: duke@435: NOT_PRODUCT( duke@435: // checks the memory region for validity duke@435: void region_invariant(MemRegion mr); duke@435: ) duke@435: duke@435: // iteration duke@435: void iterate(BitMapClosure* cl) { duke@435: _bm.iterate(cl); duke@435: } duke@435: void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); duke@435: void dirty_range_iterate_clear(MemRegionClosure* cl); duke@435: void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); duke@435: duke@435: // auxiliary support for iteration duke@435: HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; duke@435: HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, duke@435: HeapWord* end_addr) const; duke@435: HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; duke@435: HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, duke@435: HeapWord* end_addr) const; duke@435: MemRegion getAndClearMarkedRegion(HeapWord* addr); duke@435: MemRegion getAndClearMarkedRegion(HeapWord* start_addr, duke@435: HeapWord* end_addr); duke@435: duke@435: // conversion utilities duke@435: HeapWord* offsetToHeapWord(size_t offset) const; duke@435: size_t heapWordToOffset(HeapWord* addr) const; duke@435: size_t heapWordDiffToOffsetDiff(size_t diff) const; duke@435: stefank@4904: void print_on_error(outputStream* st, const char* prefix) const; stefank@4904: duke@435: // debugging duke@435: // is this address range covered by the bit-map? duke@435: NOT_PRODUCT( duke@435: bool covers(MemRegion mr) const; duke@435: bool covers(HeapWord* start, size_t size = 0) const; duke@435: ) duke@435: void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; duke@435: }; duke@435: duke@435: // Represents a marking stack used by the CMS collector. duke@435: // Ideally this should be GrowableArray<> just like MSC's marking stack(s). zgu@3900: class CMSMarkStack: public CHeapObj { duke@435: // duke@435: friend class CMSCollector; // to get at expasion stats further below duke@435: // duke@435: duke@435: VirtualSpace _virtual_space; // space for the stack duke@435: oop* _base; // bottom of stack duke@435: size_t _index; // one more than last occupied index duke@435: size_t _capacity; // max #elements duke@435: Mutex _par_lock; // an advisory lock used in case of parallel access duke@435: NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run duke@435: duke@435: protected: duke@435: size_t _hit_limit; // we hit max stack size limit duke@435: size_t _failed_double; // we failed expansion before hitting limit duke@435: duke@435: public: duke@435: CMSMarkStack(): duke@435: _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), duke@435: _hit_limit(0), duke@435: _failed_double(0) {} duke@435: duke@435: bool allocate(size_t size); duke@435: duke@435: size_t capacity() const { return _capacity; } duke@435: duke@435: oop pop() { duke@435: if (!isEmpty()) { duke@435: return _base[--_index] ; duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: bool push(oop ptr) { duke@435: if (isFull()) { duke@435: return false; duke@435: } else { duke@435: _base[_index++] = ptr; duke@435: NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: bool isEmpty() const { return _index == 0; } duke@435: bool isFull() const { duke@435: assert(_index <= _capacity, "buffer overflow"); duke@435: return _index == _capacity; duke@435: } duke@435: duke@435: size_t length() { return _index; } duke@435: duke@435: // "Parallel versions" of some of the above duke@435: oop par_pop() { duke@435: // lock and pop duke@435: MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); duke@435: return pop(); duke@435: } duke@435: duke@435: bool par_push(oop ptr) { duke@435: // lock and push duke@435: MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); duke@435: return push(ptr); duke@435: } duke@435: duke@435: // Forcibly reset the stack, losing all of its contents. duke@435: void reset() { duke@435: _index = 0; duke@435: } duke@435: duke@435: // Expand the stack, typically in response to an overflow condition duke@435: void expand(); duke@435: duke@435: // Compute the least valued stack element. duke@435: oop least_value(HeapWord* low) { duke@435: oop least = (oop)low; duke@435: for (size_t i = 0; i < _index; i++) { duke@435: least = MIN2(least, _base[i]); duke@435: } duke@435: return least; duke@435: } duke@435: duke@435: // Exposed here to allow stack expansion in || case duke@435: Mutex* par_lock() { return &_par_lock; } duke@435: }; duke@435: duke@435: class CardTableRS; duke@435: class CMSParGCThreadState; duke@435: duke@435: class ModUnionClosure: public MemRegionClosure { duke@435: protected: duke@435: CMSBitMap* _t; duke@435: public: duke@435: ModUnionClosure(CMSBitMap* t): _t(t) { } duke@435: void do_MemRegion(MemRegion mr); duke@435: }; duke@435: duke@435: class ModUnionClosurePar: public ModUnionClosure { duke@435: public: duke@435: ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } duke@435: void do_MemRegion(MemRegion mr); duke@435: }; duke@435: duke@435: // Survivor Chunk Array in support of parallelization of duke@435: // Survivor Space rescan. zgu@3900: class ChunkArray: public CHeapObj { duke@435: size_t _index; duke@435: size_t _capacity; ysr@2108: size_t _overflows; duke@435: HeapWord** _array; // storage for array duke@435: duke@435: public: ysr@2108: ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} duke@435: ChunkArray(HeapWord** a, size_t c): ysr@2108: _index(0), _capacity(c), _overflows(0), _array(a) {} duke@435: duke@435: HeapWord** array() { return _array; } duke@435: void set_array(HeapWord** a) { _array = a; } duke@435: duke@435: size_t capacity() { return _capacity; } duke@435: void set_capacity(size_t c) { _capacity = c; } duke@435: duke@435: size_t end() { ysr@2108: assert(_index <= capacity(), ysr@2108: err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", ysr@2108: _index, _capacity)); duke@435: return _index; duke@435: } // exclusive duke@435: duke@435: HeapWord* nth(size_t n) { duke@435: assert(n < end(), "Out of bounds access"); duke@435: return _array[n]; duke@435: } duke@435: duke@435: void reset() { duke@435: _index = 0; ysr@2108: if (_overflows > 0 && PrintCMSStatistics > 1) { ysr@2108: warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", ysr@2108: _capacity, _overflows); ysr@2108: } ysr@2108: _overflows = 0; duke@435: } duke@435: duke@435: void record_sample(HeapWord* p, size_t sz) { duke@435: // For now we do not do anything with the size duke@435: if (_index < _capacity) { duke@435: _array[_index++] = p; ysr@2108: } else { ysr@2108: ++_overflows; ysr@2108: assert(_index == _capacity, ysr@2108: err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT ysr@2108: "): out of bounds at overflow#" SIZE_FORMAT, ysr@2108: _index, _capacity, _overflows)); duke@435: } duke@435: } duke@435: }; duke@435: duke@435: // duke@435: // Timing, allocation and promotion statistics for gc scheduling and incremental duke@435: // mode pacing. Most statistics are exponential averages. duke@435: // duke@435: class CMSStats VALUE_OBJ_CLASS_SPEC { duke@435: private: duke@435: ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. duke@435: duke@435: // The following are exponential averages with factor alpha: duke@435: // avg = (100 - alpha) * avg + alpha * cur_sample duke@435: // duke@435: // The durations measure: end_time[n] - start_time[n] duke@435: // The periods measure: start_time[n] - start_time[n-1] duke@435: // duke@435: // The cms period and duration include only concurrent collections; time spent duke@435: // in foreground cms collections due to System.gc() or because of a failure to duke@435: // keep up are not included. duke@435: // duke@435: // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the duke@435: // real value, but is used only after the first period. A value of 100 is duke@435: // used for the first sample so it gets the entire weight. duke@435: unsigned int _saved_alpha; // 0-100 duke@435: unsigned int _gc0_alpha; duke@435: unsigned int _cms_alpha; duke@435: duke@435: double _gc0_duration; duke@435: double _gc0_period; duke@435: size_t _gc0_promoted; // bytes promoted per gc0 duke@435: double _cms_duration; duke@435: double _cms_duration_pre_sweep; // time from initiation to start of sweep duke@435: double _cms_duration_per_mb; duke@435: double _cms_period; duke@435: size_t _cms_allocated; // bytes of direct allocation per gc0 period duke@435: duke@435: // Timers. duke@435: elapsedTimer _cms_timer; duke@435: TimeStamp _gc0_begin_time; duke@435: TimeStamp _cms_begin_time; duke@435: TimeStamp _cms_end_time; duke@435: duke@435: // Snapshots of the amount used in the CMS generation. duke@435: size_t _cms_used_at_gc0_begin; duke@435: size_t _cms_used_at_gc0_end; duke@435: size_t _cms_used_at_cms_begin; duke@435: duke@435: // Used to prevent the duty cycle from being reduced in the middle of a cms duke@435: // cycle. duke@435: bool _allow_duty_cycle_reduction; duke@435: duke@435: enum { duke@435: _GC0_VALID = 0x1, duke@435: _CMS_VALID = 0x2, duke@435: _ALL_VALID = _GC0_VALID | _CMS_VALID duke@435: }; duke@435: duke@435: unsigned int _valid_bits; duke@435: duke@435: unsigned int _icms_duty_cycle; // icms duty cycle (0-100). duke@435: duke@435: protected: duke@435: duke@435: // Return a duty cycle that avoids wild oscillations, by limiting the amount duke@435: // of change between old_duty_cycle and new_duty_cycle (the latter is treated duke@435: // as a recommended value). duke@435: static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, duke@435: unsigned int new_duty_cycle); duke@435: unsigned int icms_update_duty_cycle_impl(); duke@435: ysr@1580: // In support of adjusting of cms trigger ratios based on history ysr@1580: // of concurrent mode failure. ysr@1580: double cms_free_adjustment_factor(size_t free) const; ysr@1580: void adjust_cms_free_adjustment_factor(bool fail, size_t free); ysr@1580: duke@435: public: duke@435: CMSStats(ConcurrentMarkSweepGeneration* cms_gen, duke@435: unsigned int alpha = CMSExpAvgFactor); duke@435: duke@435: // Whether or not the statistics contain valid data; higher level statistics duke@435: // cannot be called until this returns true (they require at least one young duke@435: // gen and one cms cycle to have completed). duke@435: bool valid() const; duke@435: duke@435: // Record statistics. duke@435: void record_gc0_begin(); duke@435: void record_gc0_end(size_t cms_gen_bytes_used); duke@435: void record_cms_begin(); duke@435: void record_cms_end(); duke@435: duke@435: // Allow management of the cms timer, which must be stopped/started around duke@435: // yield points. duke@435: elapsedTimer& cms_timer() { return _cms_timer; } duke@435: void start_cms_timer() { _cms_timer.start(); } duke@435: void stop_cms_timer() { _cms_timer.stop(); } duke@435: duke@435: // Basic statistics; units are seconds or bytes. duke@435: double gc0_period() const { return _gc0_period; } duke@435: double gc0_duration() const { return _gc0_duration; } duke@435: size_t gc0_promoted() const { return _gc0_promoted; } duke@435: double cms_period() const { return _cms_period; } duke@435: double cms_duration() const { return _cms_duration; } duke@435: double cms_duration_per_mb() const { return _cms_duration_per_mb; } duke@435: size_t cms_allocated() const { return _cms_allocated; } duke@435: duke@435: size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} duke@435: duke@435: // Seconds since the last background cms cycle began or ended. duke@435: double cms_time_since_begin() const; duke@435: double cms_time_since_end() const; duke@435: duke@435: // Higher level statistics--caller must check that valid() returns true before duke@435: // calling. duke@435: duke@435: // Returns bytes promoted per second of wall clock time. duke@435: double promotion_rate() const; duke@435: duke@435: // Returns bytes directly allocated per second of wall clock time. duke@435: double cms_allocation_rate() const; duke@435: duke@435: // Rate at which space in the cms generation is being consumed (sum of the duke@435: // above two). duke@435: double cms_consumption_rate() const; duke@435: duke@435: // Returns an estimate of the number of seconds until the cms generation will duke@435: // fill up, assuming no collection work is done. duke@435: double time_until_cms_gen_full() const; duke@435: duke@435: // Returns an estimate of the number of seconds remaining until duke@435: // the cms generation collection should start. duke@435: double time_until_cms_start() const; duke@435: duke@435: // End of higher level statistics. duke@435: duke@435: // Returns the cms incremental mode duty cycle, as a percentage (0-100). duke@435: unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } duke@435: duke@435: // Update the duty cycle and return the new value. duke@435: unsigned int icms_update_duty_cycle(); duke@435: duke@435: // Debugging. duke@435: void print_on(outputStream* st) const PRODUCT_RETURN; duke@435: void print() const { print_on(gclog_or_tty); } duke@435: }; duke@435: duke@435: // A closure related to weak references processing which duke@435: // we embed in the CMSCollector, since we need to pass duke@435: // it to the reference processor for secondary filtering duke@435: // of references based on reachability of referent; duke@435: // see role of _is_alive_non_header closure in the duke@435: // ReferenceProcessor class. duke@435: // For objects in the CMS generation, this closure checks duke@435: // if the object is "live" (reachable). Used in weak duke@435: // reference processing. duke@435: class CMSIsAliveClosure: public BoolObjectClosure { ysr@578: const MemRegion _span; duke@435: const CMSBitMap* _bit_map; duke@435: duke@435: friend class CMSCollector; duke@435: public: duke@435: CMSIsAliveClosure(MemRegion span, duke@435: CMSBitMap* bit_map): duke@435: _span(span), ysr@578: _bit_map(bit_map) { ysr@578: assert(!span.is_empty(), "Empty span could spell trouble"); ysr@578: } ysr@578: duke@435: bool do_object_b(oop obj); duke@435: }; duke@435: duke@435: duke@435: // Implements AbstractRefProcTaskExecutor for CMS. duke@435: class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { duke@435: public: duke@435: duke@435: CMSRefProcTaskExecutor(CMSCollector& collector) duke@435: : _collector(collector) duke@435: { } duke@435: duke@435: // Executes a task using worker threads. duke@435: virtual void execute(ProcessTask& task); duke@435: virtual void execute(EnqueueTask& task); duke@435: private: duke@435: CMSCollector& _collector; duke@435: }; duke@435: duke@435: zgu@3900: class CMSCollector: public CHeapObj { duke@435: friend class VMStructs; duke@435: friend class ConcurrentMarkSweepThread; duke@435: friend class ConcurrentMarkSweepGeneration; duke@435: friend class CompactibleFreeListSpace; jmasa@5461: friend class CMSParMarkTask; jmasa@5461: friend class CMSParInitialMarkTask; duke@435: friend class CMSParRemarkTask; duke@435: friend class CMSConcMarkingTask; duke@435: friend class CMSRefProcTaskProxy; duke@435: friend class CMSRefProcTaskExecutor; duke@435: friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden duke@435: friend class SurvivorSpacePrecleanClosure; // --- ditto ------- duke@435: friend class PushOrMarkClosure; // to access _restart_addr duke@435: friend class Par_PushOrMarkClosure; // to access _restart_addr duke@435: friend class MarkFromRootsClosure; // -- ditto -- duke@435: // ... and for clearing cards duke@435: friend class Par_MarkFromRootsClosure; // to access _restart_addr duke@435: // ... and for clearing cards duke@435: friend class Par_ConcMarkingClosure; // to access _restart_addr etc. duke@435: friend class MarkFromRootsVerifyClosure; // to access _restart_addr duke@435: friend class PushAndMarkVerifyClosure; // -- ditto -- duke@435: friend class MarkRefsIntoAndScanClosure; // to access _overflow_list duke@435: friend class PushAndMarkClosure; // -- ditto -- duke@435: friend class Par_PushAndMarkClosure; // -- ditto -- duke@435: friend class CMSKeepAliveClosure; // -- ditto -- duke@435: friend class CMSDrainMarkingStackClosure; // -- ditto -- duke@435: friend class CMSInnerParMarkAndPushClosure; // -- ditto -- duke@435: NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list duke@435: friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait duke@435: friend class VM_CMS_Operation; duke@435: friend class VM_CMS_Initial_Mark; duke@435: friend class VM_CMS_Final_Remark; kevinw@2058: friend class TraceCMSMemoryManagerStats; duke@435: duke@435: private: duke@435: jlong _time_of_last_gc; duke@435: void update_time_of_last_gc(jlong now) { duke@435: _time_of_last_gc = now; duke@435: } duke@435: duke@435: OopTaskQueueSet* _task_queues; duke@435: duke@435: // Overflow list of grey objects, threaded through mark-word duke@435: // Manipulated with CAS in the parallel/multi-threaded case. duke@435: oop _overflow_list; duke@435: // The following array-pair keeps track of mark words duke@435: // displaced for accomodating overflow list above. duke@435: // This code will likely be revisited under RFE#4922830. zgu@3900: Stack _preserved_oop_stack; zgu@3900: Stack _preserved_mark_stack; duke@435: duke@435: int* _hash_seed; duke@435: duke@435: // In support of multi-threaded concurrent phases duke@435: YieldingFlexibleWorkGang* _conc_workers; duke@435: duke@435: // Performance Counters duke@435: CollectorCounters* _gc_counters; duke@435: duke@435: // Initialization Errors duke@435: bool _completed_initialization; duke@435: duke@435: // In support of ExplicitGCInvokesConcurrent sla@5237: static bool _full_gc_requested; sla@5237: static GCCause::Cause _full_gc_cause; sla@5237: unsigned int _collection_count_start; ysr@529: duke@435: // Should we unload classes this concurrent cycle? ysr@529: bool _should_unload_classes; ysr@529: unsigned int _concurrent_cycles_since_last_unload; ysr@529: unsigned int concurrent_cycles_since_last_unload() const { ysr@529: return _concurrent_cycles_since_last_unload; ysr@529: } duke@435: // Did we (allow) unload classes in the previous concurrent cycle? ysr@529: bool unloaded_classes_last_cycle() const { ysr@529: return concurrent_cycles_since_last_unload() == 0; duke@435: } ysr@1233: // Root scanning options for perm gen ysr@1233: int _roots_scanning_options; ysr@1233: int roots_scanning_options() const { return _roots_scanning_options; } ysr@1233: void add_root_scanning_option(int o) { _roots_scanning_options |= o; } ysr@1233: void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } duke@435: duke@435: // Verification support duke@435: CMSBitMap _verification_mark_bm; duke@435: void verify_after_remark_work_1(); duke@435: void verify_after_remark_work_2(); duke@435: duke@435: // true if any verification flag is on. duke@435: bool _verifying; duke@435: bool verifying() const { return _verifying; } duke@435: void set_verifying(bool v) { _verifying = v; } duke@435: duke@435: // Collector policy duke@435: ConcurrentMarkSweepPolicy* _collector_policy; duke@435: ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } duke@435: jmasa@5076: void set_did_compact(bool v); jmasa@5076: duke@435: // XXX Move these to CMSStats ??? FIX ME !!! ysr@1580: elapsedTimer _inter_sweep_timer; // time between sweeps ysr@1580: elapsedTimer _intra_sweep_timer; // time _in_ sweeps ysr@1580: // padded decaying average estimates of the above ysr@1580: AdaptivePaddedAverage _inter_sweep_estimate; ysr@1580: AdaptivePaddedAverage _intra_sweep_estimate; duke@435: sla@5237: CMSTracer* _gc_tracer_cm; sla@5237: ConcurrentGCTimer* _gc_timer_cm; sla@5237: sla@5237: bool _cms_start_registered; sla@5237: sla@5237: GCHeapSummary _last_heap_summary; sla@5237: MetaspaceSummary _last_metaspace_summary; sla@5237: sla@5237: void register_foreground_gc_start(GCCause::Cause cause); sla@5237: void register_gc_start(GCCause::Cause cause); sla@5237: void register_gc_end(); sla@5237: void save_heap_summary(); sla@5237: void report_heap_summary(GCWhen::Type when); sla@5237: duke@435: protected: duke@435: ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) duke@435: MemRegion _span; // span covering above two duke@435: CardTableRS* _ct; // card table duke@435: duke@435: // CMS marking support structures duke@435: CMSBitMap _markBitMap; duke@435: CMSBitMap _modUnionTable; duke@435: CMSMarkStack _markStack; duke@435: duke@435: HeapWord* _restart_addr; // in support of marking stack overflow duke@435: void lower_restart_addr(HeapWord* low); duke@435: duke@435: // Counters in support of marking stack / work queue overflow handling: duke@435: // a non-zero value indicates certain types of overflow events during duke@435: // the current CMS cycle and could lead to stack resizing efforts at duke@435: // an opportune future time. duke@435: size_t _ser_pmc_preclean_ovflw; duke@435: size_t _ser_pmc_remark_ovflw; duke@435: size_t _par_pmc_remark_ovflw; ysr@887: size_t _ser_kac_preclean_ovflw; duke@435: size_t _ser_kac_ovflw; duke@435: size_t _par_kac_ovflw; ysr@969: NOT_PRODUCT(ssize_t _num_par_pushes;) duke@435: duke@435: // ("Weak") Reference processing support duke@435: ReferenceProcessor* _ref_processor; duke@435: CMSIsAliveClosure _is_alive_closure; ysr@578: // keep this textually after _markBitMap and _span; c'tor dependency duke@435: duke@435: ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work duke@435: ModUnionClosure _modUnionClosure; duke@435: ModUnionClosurePar _modUnionClosurePar; duke@435: duke@435: // CMS abstract state machine duke@435: // initial_state: Idling duke@435: // next_state(Idling) = {Marking} duke@435: // next_state(Marking) = {Precleaning, Sweeping} duke@435: // next_state(Precleaning) = {AbortablePreclean, FinalMarking} duke@435: // next_state(AbortablePreclean) = {FinalMarking} duke@435: // next_state(FinalMarking) = {Sweeping} duke@435: // next_state(Sweeping) = {Resizing} duke@435: // next_state(Resizing) = {Resetting} duke@435: // next_state(Resetting) = {Idling} duke@435: // The numeric values below are chosen so that: duke@435: // . _collectorState <= Idling == post-sweep && pre-mark duke@435: // . _collectorState in (Idling, Sweeping) == {initial,final}marking || duke@435: // precleaning || abortablePrecleanb ysr@1580: public: duke@435: enum CollectorState { duke@435: Resizing = 0, duke@435: Resetting = 1, duke@435: Idling = 2, duke@435: InitialMarking = 3, duke@435: Marking = 4, duke@435: Precleaning = 5, duke@435: AbortablePreclean = 6, duke@435: FinalMarking = 7, duke@435: Sweeping = 8 duke@435: }; ysr@1580: protected: duke@435: static CollectorState _collectorState; duke@435: duke@435: // State related to prologue/epilogue invocation for my generations duke@435: bool _between_prologue_and_epilogue; duke@435: duke@435: // Signalling/State related to coordination between fore- and backgroud GC duke@435: // Note: When the baton has been passed from background GC to foreground GC, duke@435: // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. duke@435: static bool _foregroundGCIsActive; // true iff foreground collector is active or duke@435: // wants to go active duke@435: static bool _foregroundGCShouldWait; // true iff background GC is active and has not duke@435: // yet passed the baton to the foreground GC duke@435: duke@435: // Support for CMSScheduleRemark (abortable preclean) duke@435: bool _abort_preclean; duke@435: bool _start_sampling; duke@435: duke@435: int _numYields; duke@435: size_t _numDirtyCards; ysr@1580: size_t _sweep_count; duke@435: // number of full gc's since the last concurrent gc. duke@435: uint _full_gcs_since_conc_gc; duke@435: duke@435: // occupancy used for bootstrapping stats duke@435: double _bootstrap_occupancy; duke@435: duke@435: // timer duke@435: elapsedTimer _timer; duke@435: duke@435: // Timing, allocation and promotion statistics, used for scheduling. duke@435: CMSStats _stats; duke@435: duke@435: // Allocation limits installed in the young gen, used only in duke@435: // CMSIncrementalMode. When an allocation in the young gen would cross one of duke@435: // these limits, the cms generation is notified and the cms thread is started duke@435: // or stopped, respectively. duke@435: HeapWord* _icms_start_limit; duke@435: HeapWord* _icms_stop_limit; duke@435: duke@435: enum CMS_op_type { duke@435: CMS_op_checkpointRootsInitial, duke@435: CMS_op_checkpointRootsFinal duke@435: }; duke@435: brutisso@3767: void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); duke@435: bool stop_world_and_do(CMS_op_type op); duke@435: duke@435: OopTaskQueueSet* task_queues() { return _task_queues; } duke@435: int* hash_seed(int i) { return &_hash_seed[i]; } duke@435: YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } duke@435: duke@435: // Support for parallelizing Eden rescan in CMS remark phase duke@435: void sample_eden(); // ... sample Eden space top duke@435: duke@435: private: duke@435: // Support for parallelizing young gen rescan in CMS remark phase duke@435: Generation* _young_gen; // the younger gen duke@435: HeapWord** _top_addr; // ... Top of Eden duke@435: HeapWord** _end_addr; // ... End of Eden jmasa@5459: Mutex* _eden_chunk_lock; duke@435: HeapWord** _eden_chunk_array; // ... Eden partitioning array duke@435: size_t _eden_chunk_index; // ... top (exclusive) of array duke@435: size_t _eden_chunk_capacity; // ... max entries in array duke@435: duke@435: // Support for parallelizing survivor space rescan duke@435: HeapWord** _survivor_chunk_array; duke@435: size_t _survivor_chunk_index; duke@435: size_t _survivor_chunk_capacity; duke@435: size_t* _cursor; duke@435: ChunkArray* _survivor_plab_array; duke@435: mgerdin@7470: // A bounded minimum size of PLABs, should not return too small values since mgerdin@7470: // this will affect the size of the data structures used for parallel young gen rescan mgerdin@7470: size_t plab_sample_minimum_size(); mgerdin@7470: duke@435: // Support for marking stack overflow handling duke@435: bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); jmasa@2188: bool par_take_from_overflow_list(size_t num, jmasa@2188: OopTaskQueue* to_work_q, jmasa@2188: int no_of_gc_threads); duke@435: void push_on_overflow_list(oop p); duke@435: void par_push_on_overflow_list(oop p); duke@435: // the following is, obviously, not, in general, "MT-stable" duke@435: bool overflow_list_is_empty() const; duke@435: duke@435: void preserve_mark_if_necessary(oop p); duke@435: void par_preserve_mark_if_necessary(oop p); duke@435: void preserve_mark_work(oop p, markOop m); duke@435: void restore_preserved_marks_if_any(); duke@435: NOT_PRODUCT(bool no_preserved_marks() const;) duke@435: // in support of testing overflow code duke@435: NOT_PRODUCT(int _overflow_counter;) duke@435: NOT_PRODUCT(bool simulate_overflow();) // sequential duke@435: NOT_PRODUCT(bool par_simulate_overflow();) // MT version duke@435: duke@435: // CMS work methods duke@435: void checkpointRootsInitialWork(bool asynch); // initial checkpoint work duke@435: duke@435: // a return value of false indicates failure due to stack overflow duke@435: bool markFromRootsWork(bool asynch); // concurrent marking work duke@435: duke@435: public: // FIX ME!!! only for testing duke@435: bool do_marking_st(bool asynch); // single-threaded marking duke@435: bool do_marking_mt(bool asynch); // multi-threaded marking duke@435: duke@435: private: duke@435: duke@435: // concurrent precleaning work duke@435: size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, duke@435: ScanMarkedObjectsAgainCarefullyClosure* cl); duke@435: size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, duke@435: ScanMarkedObjectsAgainCarefullyClosure* cl); duke@435: // Does precleaning work, returning a quantity indicative of duke@435: // the amount of "useful work" done. duke@435: size_t preclean_work(bool clean_refs, bool clean_survivors); coleenp@4037: void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); duke@435: void abortable_preclean(); // Preclean while looking for possible abort duke@435: void initialize_sequential_subtasks_for_young_gen_rescan(int i); duke@435: // Helper function for above; merge-sorts the per-thread plab samples jmasa@2188: void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); duke@435: // Resets (i.e. clears) the per-thread plab sample vectors duke@435: void reset_survivor_plab_arrays(); duke@435: duke@435: // final (second) checkpoint work duke@435: void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, duke@435: bool init_mark_was_synchronous); duke@435: // work routine for parallel version of remark duke@435: void do_remark_parallel(); duke@435: // work routine for non-parallel version of remark duke@435: void do_remark_non_parallel(); duke@435: // reference processing work routine (during second checkpoint) duke@435: void refProcessingWork(bool asynch, bool clear_all_soft_refs); duke@435: duke@435: // concurrent sweeping work duke@435: void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); duke@435: duke@435: // (concurrent) resetting of support data structures duke@435: void reset(bool asynch); duke@435: duke@435: // Clear _expansion_cause fields of constituent generations duke@435: void clear_expansion_cause(); duke@435: duke@435: // An auxilliary method used to record the ends of duke@435: // used regions of each generation to limit the extent of sweep duke@435: void save_sweep_limits(); duke@435: duke@435: // A work method used by foreground collection to determine duke@435: // what type of collection (compacting or not, continuing or fresh) duke@435: // it should do. duke@435: void decide_foreground_collection_type(bool clear_all_soft_refs, duke@435: bool* should_compact, bool* should_start_over); duke@435: duke@435: // A work method used by the foreground collector to do duke@435: // a mark-sweep-compact. duke@435: void do_compaction_work(bool clear_all_soft_refs); duke@435: duke@435: // A work method used by the foreground collector to do duke@435: // a mark-sweep, after taking over from a possibly on-going duke@435: // concurrent mark-sweep collection. duke@435: void do_mark_sweep_work(bool clear_all_soft_refs, duke@435: CollectorState first_state, bool should_start_over); duke@435: sla@5237: // Work methods for reporting concurrent mode interruption or failure sla@5237: bool is_external_interruption(); sla@5237: void report_concurrent_mode_interruption(); sla@5237: duke@435: // If the backgrould GC is active, acquire control from the background duke@435: // GC and do the collection. duke@435: void acquire_control_and_collect(bool full, bool clear_all_soft_refs); duke@435: duke@435: // For synchronizing passing of control from background to foreground duke@435: // GC. waitForForegroundGC() is called by the background duke@435: // collector. It if had to wait for a foreground collection, duke@435: // it returns true and the background collection should assume duke@435: // that the collection was finished by the foreground duke@435: // collector. duke@435: bool waitForForegroundGC(); duke@435: duke@435: // Incremental mode triggering: recompute the icms duty cycle and set the duke@435: // allocation limits in the young gen. duke@435: void icms_update_allocation_limits(); duke@435: duke@435: size_t block_size_using_printezis_bits(HeapWord* addr) const; duke@435: size_t block_size_if_printezis_bits(HeapWord* addr) const; duke@435: HeapWord* next_card_start_after_block(HeapWord* addr) const; duke@435: duke@435: void setup_cms_unloading_and_verification_state(); duke@435: public: duke@435: CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, duke@435: CardTableRS* ct, duke@435: ConcurrentMarkSweepPolicy* cp); duke@435: ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } duke@435: duke@435: ReferenceProcessor* ref_processor() { return _ref_processor; } duke@435: void ref_processor_init(); duke@435: duke@435: Mutex* bitMapLock() const { return _markBitMap.lock(); } duke@435: static CollectorState abstract_state() { return _collectorState; } duke@435: duke@435: bool should_abort_preclean() const; // Whether preclean should be aborted. duke@435: size_t get_eden_used() const; duke@435: size_t get_eden_capacity() const; duke@435: duke@435: ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } duke@435: duke@435: // locking checks duke@435: NOT_PRODUCT(static bool have_cms_token();) duke@435: duke@435: // XXXPERM bool should_collect(bool full, size_t size, bool tlab); duke@435: bool shouldConcurrentCollect(); duke@435: duke@435: void collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool tlab); sla@5237: void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); sla@5237: void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); duke@435: duke@435: // In support of ExplicitGCInvokesConcurrent sla@5237: static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); duke@435: // Should we unload classes in a particular concurrent cycle? ysr@529: bool should_unload_classes() const { ysr@529: return _should_unload_classes; duke@435: } coleenp@4037: void update_should_unload_classes(); duke@435: duke@435: void direct_allocated(HeapWord* start, size_t size); duke@435: duke@435: // Object is dead if not marked and current phase is sweeping. duke@435: bool is_dead_obj(oop obj) const; duke@435: duke@435: // After a promotion (of "start"), do any necessary marking. duke@435: // If "par", then it's being done by a parallel GC thread. duke@435: // The last two args indicate if we need precise marking duke@435: // and if so the size of the object so it can be dirtied duke@435: // in its entirety. duke@435: void promoted(bool par, HeapWord* start, duke@435: bool is_obj_array, size_t obj_size); duke@435: duke@435: HeapWord* allocation_limit_reached(Space* space, HeapWord* top, duke@435: size_t word_size); duke@435: duke@435: void getFreelistLocks() const; duke@435: void releaseFreelistLocks() const; duke@435: bool haveFreelistLocks() const; duke@435: jmasa@4900: // Adjust size of underlying generation jmasa@4900: void compute_new_size(); jmasa@4900: duke@435: // GC prologue and epilogue duke@435: void gc_prologue(bool full); duke@435: void gc_epilogue(bool full); duke@435: duke@435: jlong time_of_last_gc(jlong now) { duke@435: if (_collectorState <= Idling) { duke@435: // gc not in progress duke@435: return _time_of_last_gc; duke@435: } else { duke@435: // collection in progress duke@435: return now; duke@435: } duke@435: } duke@435: duke@435: // Support for parallel remark of survivor space duke@435: void* get_data_recorder(int thr_num); jmasa@5459: void sample_eden_chunk(); duke@435: duke@435: CMSBitMap* markBitMap() { return &_markBitMap; } duke@435: void directAllocated(HeapWord* start, size_t size); duke@435: duke@435: // main CMS steps and related support duke@435: void checkpointRootsInitial(bool asynch); duke@435: bool markFromRoots(bool asynch); // a return value of false indicates failure duke@435: // due to stack overflow duke@435: void preclean(); duke@435: void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, duke@435: bool init_mark_was_synchronous); duke@435: void sweep(bool asynch); duke@435: duke@435: // Check that the currently executing thread is the expected duke@435: // one (foreground collector or background collector). ysr@1580: static void check_correct_thread_executing() PRODUCT_RETURN; duke@435: // XXXPERM void print_statistics() PRODUCT_RETURN; duke@435: duke@435: bool is_cms_reachable(HeapWord* addr); duke@435: duke@435: // Performance Counter Support duke@435: CollectorCounters* counters() { return _gc_counters; } duke@435: duke@435: // timer stuff duke@435: void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } duke@435: void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } duke@435: void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } duke@435: double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } duke@435: duke@435: int yields() { return _numYields; } duke@435: void resetYields() { _numYields = 0; } duke@435: void incrementYields() { _numYields++; } duke@435: void resetNumDirtyCards() { _numDirtyCards = 0; } duke@435: void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } duke@435: size_t numDirtyCards() { return _numDirtyCards; } duke@435: duke@435: static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } duke@435: static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } duke@435: static bool foregroundGCIsActive() { return _foregroundGCIsActive; } duke@435: static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } ysr@1580: size_t sweep_count() const { return _sweep_count; } ysr@1580: void increment_sweep_count() { _sweep_count++; } duke@435: duke@435: // Timers/stats for gc scheduling and incremental mode pacing. duke@435: CMSStats& stats() { return _stats; } duke@435: duke@435: // Convenience methods that check whether CMSIncrementalMode is enabled and duke@435: // forward to the corresponding methods in ConcurrentMarkSweepThread. duke@435: static void start_icms(); duke@435: static void stop_icms(); // Called at the end of the cms cycle. duke@435: static void disable_icms(); // Called before a foreground collection. duke@435: static void enable_icms(); // Called after a foreground collection. duke@435: void icms_wait(); // Called at yield points. duke@435: duke@435: // Adaptive size policy duke@435: CMSAdaptiveSizePolicy* size_policy(); duke@435: CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); duke@435: stefank@4904: static void print_on_error(outputStream* st); stefank@4904: duke@435: // debugging brutisso@3711: void verify(); stefank@5018: bool verify_after_remark(bool silent = VerifySilently); duke@435: void verify_ok_to_terminate() const PRODUCT_RETURN; duke@435: void verify_work_stacks_empty() const PRODUCT_RETURN; duke@435: void verify_overflow_empty() const PRODUCT_RETURN; duke@435: duke@435: // convenience methods in support of debugging duke@435: static const size_t skip_header_HeapWords() PRODUCT_RETURN0; duke@435: HeapWord* block_start(const void* p) const PRODUCT_RETURN0; duke@435: duke@435: // accessors duke@435: CMSMarkStack* verification_mark_stack() { return &_markStack; } duke@435: CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } duke@435: duke@435: // Initialization errors duke@435: bool completed_initialization() { return _completed_initialization; } jmasa@5459: jmasa@5459: void print_eden_and_survivor_chunk_arrays(); duke@435: }; duke@435: duke@435: class CMSExpansionCause : public AllStatic { duke@435: public: duke@435: enum Cause { duke@435: _no_expansion, duke@435: _satisfy_free_ratio, duke@435: _satisfy_promotion, duke@435: _satisfy_allocation, duke@435: _allocate_par_lab, duke@435: _allocate_par_spooling_space, duke@435: _adaptive_size_policy duke@435: }; duke@435: // Return a string describing the cause of the expansion. duke@435: static const char* to_string(CMSExpansionCause::Cause cause); duke@435: }; duke@435: duke@435: class ConcurrentMarkSweepGeneration: public CardGeneration { duke@435: friend class VMStructs; duke@435: friend class ConcurrentMarkSweepThread; duke@435: friend class ConcurrentMarkSweep; duke@435: friend class CMSCollector; duke@435: protected: duke@435: static CMSCollector* _collector; // the collector that collects us duke@435: CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) duke@435: duke@435: // Performance Counters duke@435: GenerationCounters* _gen_counters; duke@435: GSpaceCounters* _space_counters; duke@435: duke@435: // Words directly allocated, used by CMSStats. duke@435: size_t _direct_allocated_words; duke@435: duke@435: // Non-product stat counters duke@435: NOT_PRODUCT( ysr@2071: size_t _numObjectsPromoted; ysr@2071: size_t _numWordsPromoted; ysr@2071: size_t _numObjectsAllocated; ysr@2071: size_t _numWordsAllocated; duke@435: ) duke@435: duke@435: // Used for sizing decisions duke@435: bool _incremental_collection_failed; duke@435: bool incremental_collection_failed() { duke@435: return _incremental_collection_failed; duke@435: } duke@435: void set_incremental_collection_failed() { duke@435: _incremental_collection_failed = true; duke@435: } duke@435: void clear_incremental_collection_failed() { duke@435: _incremental_collection_failed = false; duke@435: } duke@435: ysr@529: // accessors ysr@529: void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} ysr@529: CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } ysr@529: duke@435: private: duke@435: // For parallel young-gen GC support. duke@435: CMSParGCThreadState** _par_gc_thread_states; duke@435: duke@435: // Reason generation was expanded duke@435: CMSExpansionCause::Cause _expansion_cause; duke@435: duke@435: // In support of MinChunkSize being larger than min object size duke@435: const double _dilatation_factor; duke@435: duke@435: enum CollectionTypes { duke@435: Concurrent_collection_type = 0, duke@435: MS_foreground_collection_type = 1, duke@435: MSC_foreground_collection_type = 2, duke@435: Unknown_collection_type = 3 duke@435: }; duke@435: duke@435: CollectionTypes _debug_collection_type; duke@435: jmasa@5076: // True if a compactiing collection was done. jmasa@5076: bool _did_compact; jmasa@5076: bool did_compact() { return _did_compact; } jmasa@5076: ysr@529: // Fraction of current occupancy at which to start a CMS collection which ysr@529: // will collect this generation (at least). ysr@529: double _initiating_occupancy; ysr@529: duke@435: protected: duke@435: // Shrink generation by specified size (returns false if unable to shrink) jmasa@4900: void shrink_free_list_by(size_t bytes); duke@435: duke@435: // Update statistics for GC duke@435: virtual void update_gc_stats(int level, bool full); duke@435: duke@435: // Maximum available space in the generation (including uncommitted) duke@435: // space. duke@435: size_t max_available() const; duke@435: ysr@529: // getter and initializer for _initiating_occupancy field. ysr@529: double initiating_occupancy() const { return _initiating_occupancy; } jwilhelm@4576: void init_initiating_occupancy(intx io, uintx tr); ysr@529: duke@435: public: duke@435: ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, duke@435: int level, CardTableRS* ct, duke@435: bool use_adaptive_freelists, jmasa@3730: FreeBlockDictionary::DictionaryChoice); duke@435: duke@435: // Accessors duke@435: CMSCollector* collector() const { return _collector; } duke@435: static void set_collector(CMSCollector* collector) { duke@435: assert(_collector == NULL, "already set"); duke@435: _collector = collector; duke@435: } duke@435: CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } duke@435: duke@435: Mutex* freelistLock() const; duke@435: duke@435: virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } duke@435: duke@435: // Adaptive size policy duke@435: CMSAdaptiveSizePolicy* size_policy(); duke@435: jmasa@5076: void set_did_compact(bool v) { _did_compact = v; } jmasa@5076: duke@435: bool refs_discovery_is_atomic() const { return false; } duke@435: bool refs_discovery_is_mt() const { duke@435: // Note: CMS does MT-discovery during the parallel-remark duke@435: // phases. Use ReferenceProcessorMTMutator to make refs duke@435: // discovery MT-safe during such phases or other parallel duke@435: // discovery phases in the future. This may all go away duke@435: // if/when we decide that refs discovery is sufficiently duke@435: // rare that the cost of the CAS's involved is in the duke@435: // noise. That's a measurement that should be done, and duke@435: // the code simplified if that turns out to be the case. ysr@2651: return ConcGCThreads > 1; duke@435: } duke@435: duke@435: // Override duke@435: virtual void ref_processor_init(); duke@435: jmasa@706: // Grow generation by specified size (returns false if unable to grow) jmasa@706: bool grow_by(size_t bytes); jmasa@706: // Grow generation to reserved size. jmasa@706: bool grow_to_reserved(); jmasa@706: duke@435: void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } duke@435: duke@435: // Space enquiries duke@435: size_t capacity() const; duke@435: size_t used() const; duke@435: size_t free() const; ysr@529: double occupancy() const { return ((double)used())/((double)capacity()); } duke@435: size_t contiguous_available() const; duke@435: size_t unsafe_max_alloc_nogc() const; zgu@9793: size_t used_stable() const; duke@435: duke@435: // over-rides duke@435: MemRegion used_region() const; duke@435: MemRegion used_region_at_save_marks() const; duke@435: duke@435: // Does a "full" (forced) collection invoked on this generation collect duke@435: // all younger generations as well? Note that the second conjunct is a duke@435: // hack to allow the collection of the younger gen first if the flag is duke@435: // set. This is better than using th policy's should_collect_gen0_first() duke@435: // since that causes us to do an extra unnecessary pair of restart-&-stop-world. duke@435: virtual bool full_collects_younger_generations() const { duke@435: return UseCMSCompactAtFullCollection && !CollectGen0First; duke@435: } duke@435: duke@435: void space_iterate(SpaceClosure* blk, bool usedOnly = false); duke@435: duke@435: // Support for compaction duke@435: CompactibleSpace* first_compaction_space() const; duke@435: // Adjust quantites in the generation affected by duke@435: // the compaction. duke@435: void reset_after_compaction(); duke@435: duke@435: // Allocation support duke@435: HeapWord* allocate(size_t size, bool tlab); duke@435: HeapWord* have_lock_and_allocate(size_t size, bool tlab); coleenp@548: oop promote(oop obj, size_t obj_size); duke@435: HeapWord* par_allocate(size_t size, bool tlab) { duke@435: return allocate(size, tlab); duke@435: } duke@435: duke@435: // Incremental mode triggering. duke@435: HeapWord* allocation_limit_reached(Space* space, HeapWord* top, duke@435: size_t word_size); duke@435: duke@435: // Used by CMSStats to track direct allocation. The value is sampled and duke@435: // reset after each young gen collection. duke@435: size_t direct_allocated_words() const { return _direct_allocated_words; } duke@435: void reset_direct_allocated_words() { _direct_allocated_words = 0; } duke@435: duke@435: // Overrides for parallel promotion. duke@435: virtual oop par_promote(int thread_num, duke@435: oop obj, markOop m, size_t word_sz); duke@435: // This one should not be called for CMS. duke@435: virtual void par_promote_alloc_undo(int thread_num, duke@435: HeapWord* obj, size_t word_sz); duke@435: virtual void par_promote_alloc_done(int thread_num); duke@435: virtual void par_oop_since_save_marks_iterate_done(int thread_num); duke@435: ysr@2243: virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; duke@435: ysr@1580: // Inform this (non-young) generation that a promotion failure was ysr@1580: // encountered during a collection of a younger generation that ysr@1580: // promotes into this generation. ysr@1580: virtual void promotion_failure_occurred(); ysr@1580: duke@435: bool should_collect(bool full, size_t size, bool tlab); ysr@529: virtual bool should_concurrent_collect() const; ysr@529: virtual bool is_too_full() const; duke@435: void collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool tlab); duke@435: duke@435: HeapWord* expand_and_allocate(size_t word_size, duke@435: bool tlab, duke@435: bool parallel = false); duke@435: duke@435: // GC prologue and epilogue duke@435: void gc_prologue(bool full); duke@435: void gc_prologue_work(bool full, bool registerClosure, duke@435: ModUnionClosure* modUnionClosure); duke@435: void gc_epilogue(bool full); duke@435: void gc_epilogue_work(bool full); duke@435: duke@435: // Time since last GC of this generation duke@435: jlong time_of_last_gc(jlong now) { duke@435: return collector()->time_of_last_gc(now); duke@435: } duke@435: void update_time_of_last_gc(jlong now) { duke@435: collector()-> update_time_of_last_gc(now); duke@435: } duke@435: duke@435: // Allocation failure duke@435: void expand(size_t bytes, size_t expand_bytes, duke@435: CMSExpansionCause::Cause cause); jmasa@706: virtual bool expand(size_t bytes, size_t expand_bytes); duke@435: void shrink(size_t bytes); jmasa@4900: void shrink_by(size_t bytes); duke@435: HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); duke@435: bool expand_and_ensure_spooling_space(PromotionInfo* promo); duke@435: duke@435: // Iteration support and related enquiries duke@435: void save_marks(); duke@435: bool no_allocs_since_save_marks(); duke@435: void younger_refs_iterate(OopsInGenClosure* cl); duke@435: duke@435: // Iteration support specific to CMS generations duke@435: void save_sweep_limit(); duke@435: duke@435: // More iteration support coleenp@4037: virtual void oop_iterate(ExtendedOopClosure* cl); jmasa@952: virtual void safe_object_iterate(ObjectClosure* cl); duke@435: virtual void object_iterate(ObjectClosure* cl); duke@435: duke@435: // Need to declare the full complement of closures, whether we'll duke@435: // override them or not, or get message from the compiler: duke@435: // oop_since_save_marks_iterate_nv hides virtual function... duke@435: #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ duke@435: void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) duke@435: duke@435: // Smart allocation XXX -- move to CFLSpace? duke@435: void setNearLargestChunk(); duke@435: bool isNearLargestChunk(HeapWord* addr); duke@435: duke@435: // Get the chunk at the end of the space. Delagates to duke@435: // the space. duke@435: FreeChunk* find_chunk_at_end(); duke@435: duke@435: void post_compact(); duke@435: duke@435: // Debugging duke@435: void prepare_for_verify(); brutisso@3711: void verify(); duke@435: void print_statistics() PRODUCT_RETURN; duke@435: duke@435: // Performance Counters support duke@435: virtual void update_counters(); duke@435: virtual void update_counters(size_t used); duke@435: void initialize_performance_counters(); duke@435: CollectorCounters* counters() { return collector()->counters(); } duke@435: duke@435: // Support for parallel remark of survivor space duke@435: void* get_data_recorder(int thr_num) { duke@435: //Delegate to collector duke@435: return collector()->get_data_recorder(thr_num); duke@435: } jmasa@5459: void sample_eden_chunk() { jmasa@5459: //Delegate to collector jmasa@5459: return collector()->sample_eden_chunk(); jmasa@5459: } duke@435: duke@435: // Printing duke@435: const char* name() const; duke@435: virtual const char* short_name() const { return "CMS"; } duke@435: void print() const; duke@435: void printOccupancy(const char* s); duke@435: bool must_be_youngest() const { return false; } duke@435: bool must_be_oldest() const { return true; } duke@435: jmasa@4900: // Resize the generation after a compacting GC. The jmasa@4900: // generation can be treated as a contiguous space jmasa@4900: // after the compaction. jmasa@4900: virtual void compute_new_size(); jmasa@4900: // Resize the generation after a non-compacting jmasa@4900: // collection. jmasa@4900: void compute_new_size_free_list(); duke@435: duke@435: CollectionTypes debug_collection_type() { return _debug_collection_type; } duke@435: void rotate_debug_collection_type(); duke@435: }; duke@435: duke@435: class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { duke@435: duke@435: // Return the size policy from the heap's collector duke@435: // policy casted to CMSAdaptiveSizePolicy*. duke@435: CMSAdaptiveSizePolicy* cms_size_policy() const; duke@435: duke@435: // Resize the generation based on the adaptive size duke@435: // policy. duke@435: void resize(size_t cur_promo, size_t desired_promo); duke@435: duke@435: // Return the GC counters from the collector policy duke@435: CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); duke@435: duke@435: virtual void shrink_by(size_t bytes); duke@435: duke@435: public: duke@435: ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, duke@435: int level, CardTableRS* ct, duke@435: bool use_adaptive_freelists, jmasa@3730: FreeBlockDictionary::DictionaryChoice duke@435: dictionaryChoice) : duke@435: ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, duke@435: use_adaptive_freelists, dictionaryChoice) {} duke@435: duke@435: virtual const char* short_name() const { return "ASCMS"; } duke@435: virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } duke@435: duke@435: virtual void update_counters(); duke@435: virtual void update_counters(size_t used); duke@435: }; duke@435: duke@435: // duke@435: // Closures of various sorts used by CMS to accomplish its work duke@435: // duke@435: duke@435: // This closure is used to do concurrent marking from the roots duke@435: // following the first checkpoint. duke@435: class MarkFromRootsClosure: public BitMapClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: CMSBitMap* _bitMap; duke@435: CMSBitMap* _mut; duke@435: CMSMarkStack* _markStack; duke@435: bool _yield; duke@435: int _skipBits; duke@435: HeapWord* _finger; duke@435: HeapWord* _threshold; duke@435: DEBUG_ONLY(bool _verifying;) duke@435: duke@435: public: duke@435: MarkFromRootsClosure(CMSCollector* collector, MemRegion span, duke@435: CMSBitMap* bitMap, duke@435: CMSMarkStack* markStack, duke@435: bool should_yield, bool verifying = false); ysr@777: bool do_bit(size_t offset); duke@435: void reset(HeapWord* addr); duke@435: inline void do_yield_check(); duke@435: duke@435: private: duke@435: void scanOopsInOop(HeapWord* ptr); duke@435: void do_yield_work(); duke@435: }; duke@435: duke@435: // This closure is used to do concurrent multi-threaded duke@435: // marking from the roots following the first checkpoint. duke@435: // XXX This should really be a subclass of The serial version duke@435: // above, but i have not had the time to refactor things cleanly. duke@435: // That willbe done for Dolphin. duke@435: class Par_MarkFromRootsClosure: public BitMapClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _whole_span; duke@435: MemRegion _span; duke@435: CMSBitMap* _bit_map; duke@435: CMSBitMap* _mut; duke@435: OopTaskQueue* _work_queue; duke@435: CMSMarkStack* _overflow_stack; duke@435: bool _yield; duke@435: int _skip_bits; duke@435: HeapWord* _finger; duke@435: HeapWord* _threshold; duke@435: CMSConcMarkingTask* _task; duke@435: public: duke@435: Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, duke@435: MemRegion span, duke@435: CMSBitMap* bit_map, duke@435: OopTaskQueue* work_queue, duke@435: CMSMarkStack* overflow_stack, duke@435: bool should_yield); ysr@777: bool do_bit(size_t offset); duke@435: inline void do_yield_check(); duke@435: duke@435: private: duke@435: void scan_oops_in_oop(HeapWord* ptr); duke@435: void do_yield_work(); duke@435: bool get_work_from_overflow_stack(); duke@435: }; duke@435: duke@435: // The following closures are used to do certain kinds of verification of duke@435: // CMS marking. stefank@6982: class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: CMSBitMap* _verification_bm; duke@435: CMSBitMap* _cms_bm; duke@435: CMSMarkStack* _mark_stack; coleenp@548: protected: coleenp@548: void do_oop(oop p); coleenp@548: template inline void do_oop_work(T *p) { coleenp@4037: oop obj = oopDesc::load_decode_heap_oop(p); coleenp@548: do_oop(obj); coleenp@548: } duke@435: public: duke@435: PushAndMarkVerifyClosure(CMSCollector* cms_collector, duke@435: MemRegion span, duke@435: CMSBitMap* verification_bm, duke@435: CMSBitMap* cms_bm, duke@435: CMSMarkStack* mark_stack); duke@435: void do_oop(oop* p); coleenp@548: void do_oop(narrowOop* p); coleenp@4037: duke@435: // Deal with a stack overflow condition duke@435: void handle_stack_overflow(HeapWord* lost); duke@435: }; duke@435: duke@435: class MarkFromRootsVerifyClosure: public BitMapClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: CMSBitMap* _verification_bm; duke@435: CMSBitMap* _cms_bm; duke@435: CMSMarkStack* _mark_stack; duke@435: HeapWord* _finger; duke@435: PushAndMarkVerifyClosure _pam_verify_closure; duke@435: public: duke@435: MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, duke@435: CMSBitMap* verification_bm, duke@435: CMSBitMap* cms_bm, duke@435: CMSMarkStack* mark_stack); ysr@777: bool do_bit(size_t offset); duke@435: void reset(HeapWord* addr); duke@435: }; duke@435: duke@435: duke@435: // This closure is used to check that a certain set of bits is duke@435: // "empty" (i.e. the bit vector doesn't have any 1-bits). duke@435: class FalseBitMapClosure: public BitMapClosure { duke@435: public: ysr@777: bool do_bit(size_t offset) { duke@435: guarantee(false, "Should not have a 1 bit"); ysr@777: return true; duke@435: } duke@435: }; duke@435: mgerdin@6979: // A version of ObjectClosure with "memory" (see _previous_address below) mgerdin@6979: class UpwardsObjectClosure: public BoolObjectClosure { mgerdin@6979: HeapWord* _previous_address; mgerdin@6979: public: mgerdin@6979: UpwardsObjectClosure() : _previous_address(NULL) { } mgerdin@6979: void set_previous(HeapWord* addr) { _previous_address = addr; } mgerdin@6979: HeapWord* previous() { return _previous_address; } mgerdin@6979: // A return value of "true" can be used by the caller to decide mgerdin@6979: // if this object's end should *NOT* be recorded in mgerdin@6979: // _previous_address above. mgerdin@6979: virtual bool do_object_bm(oop obj, MemRegion mr) = 0; mgerdin@6979: }; mgerdin@6979: duke@435: // This closure is used during the second checkpointing phase duke@435: // to rescan the marked objects on the dirty cards in the mod duke@435: // union table and the card table proper. It's invoked via duke@435: // MarkFromDirtyCardsClosure below. It uses either duke@435: // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) duke@435: // declared in genOopClosures.hpp to accomplish some of its work. duke@435: // In the parallel case the bitMap is shared, so access to duke@435: // it needs to be suitably synchronized for updates by embedded duke@435: // closures that update it; however, this closure itself only duke@435: // reads the bit_map and because it is idempotent, is immune to duke@435: // reading stale values. duke@435: class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { duke@435: #ifdef ASSERT duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: union { duke@435: CMSMarkStack* _mark_stack; duke@435: OopTaskQueue* _work_queue; duke@435: }; duke@435: #endif // ASSERT duke@435: bool _parallel; duke@435: CMSBitMap* _bit_map; duke@435: union { duke@435: MarkRefsIntoAndScanClosure* _scan_closure; duke@435: Par_MarkRefsIntoAndScanClosure* _par_scan_closure; duke@435: }; duke@435: duke@435: public: duke@435: ScanMarkedObjectsAgainClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: ReferenceProcessor* rp, duke@435: CMSBitMap* bit_map, duke@435: CMSMarkStack* mark_stack, duke@435: MarkRefsIntoAndScanClosure* cl): duke@435: #ifdef ASSERT duke@435: _collector(collector), duke@435: _span(span), duke@435: _mark_stack(mark_stack), duke@435: #endif // ASSERT duke@435: _parallel(false), duke@435: _bit_map(bit_map), duke@435: _scan_closure(cl) { } duke@435: duke@435: ScanMarkedObjectsAgainClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: ReferenceProcessor* rp, duke@435: CMSBitMap* bit_map, duke@435: OopTaskQueue* work_queue, duke@435: Par_MarkRefsIntoAndScanClosure* cl): duke@435: #ifdef ASSERT duke@435: _collector(collector), duke@435: _span(span), duke@435: _work_queue(work_queue), duke@435: #endif // ASSERT duke@435: _parallel(true), duke@435: _bit_map(bit_map), duke@435: _par_scan_closure(cl) { } duke@435: duke@435: bool do_object_b(oop obj) { duke@435: guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); duke@435: return false; duke@435: } duke@435: bool do_object_bm(oop p, MemRegion mr); duke@435: }; duke@435: duke@435: // This closure is used during the second checkpointing phase duke@435: // to rescan the marked objects on the dirty cards in the mod duke@435: // union table and the card table proper. It invokes duke@435: // ScanMarkedObjectsAgainClosure above to accomplish much of its work. duke@435: // In the parallel case, the bit map is shared and requires duke@435: // synchronized access. duke@435: class MarkFromDirtyCardsClosure: public MemRegionClosure { duke@435: CompactibleFreeListSpace* _space; duke@435: ScanMarkedObjectsAgainClosure _scan_cl; duke@435: size_t _num_dirty_cards; duke@435: duke@435: public: duke@435: MarkFromDirtyCardsClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: CompactibleFreeListSpace* space, duke@435: CMSBitMap* bit_map, duke@435: CMSMarkStack* mark_stack, duke@435: MarkRefsIntoAndScanClosure* cl): duke@435: _space(space), duke@435: _num_dirty_cards(0), duke@435: _scan_cl(collector, span, collector->ref_processor(), bit_map, coleenp@4037: mark_stack, cl) { } duke@435: duke@435: MarkFromDirtyCardsClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: CompactibleFreeListSpace* space, duke@435: CMSBitMap* bit_map, duke@435: OopTaskQueue* work_queue, duke@435: Par_MarkRefsIntoAndScanClosure* cl): duke@435: _space(space), duke@435: _num_dirty_cards(0), duke@435: _scan_cl(collector, span, collector->ref_processor(), bit_map, coleenp@4037: work_queue, cl) { } duke@435: duke@435: void do_MemRegion(MemRegion mr); duke@435: void set_space(CompactibleFreeListSpace* space) { _space = space; } duke@435: size_t num_dirty_cards() { return _num_dirty_cards; } duke@435: }; duke@435: duke@435: // This closure is used in the non-product build to check duke@435: // that there are no MemRegions with a certain property. duke@435: class FalseMemRegionClosure: public MemRegionClosure { duke@435: void do_MemRegion(MemRegion mr) { duke@435: guarantee(!mr.is_empty(), "Shouldn't be empty"); duke@435: guarantee(false, "Should never be here"); duke@435: } duke@435: }; duke@435: duke@435: // This closure is used during the precleaning phase duke@435: // to "carefully" rescan marked objects on dirty cards. duke@435: // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp duke@435: // to accomplish some of its work. duke@435: class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: bool _yield; duke@435: Mutex* _freelistLock; duke@435: CMSBitMap* _bitMap; duke@435: CMSMarkStack* _markStack; duke@435: MarkRefsIntoAndScanClosure* _scanningClosure; duke@435: duke@435: public: duke@435: ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: CMSBitMap* bitMap, duke@435: CMSMarkStack* markStack, duke@435: MarkRefsIntoAndScanClosure* cl, duke@435: bool should_yield): duke@435: _collector(collector), duke@435: _span(span), duke@435: _yield(should_yield), duke@435: _bitMap(bitMap), duke@435: _markStack(markStack), duke@435: _scanningClosure(cl) { duke@435: } duke@435: duke@435: void do_object(oop p) { duke@435: guarantee(false, "call do_object_careful instead"); duke@435: } duke@435: duke@435: size_t do_object_careful(oop p) { duke@435: guarantee(false, "Unexpected caller"); duke@435: return 0; duke@435: } duke@435: duke@435: size_t do_object_careful_m(oop p, MemRegion mr); duke@435: duke@435: void setFreelistLock(Mutex* m) { duke@435: _freelistLock = m; duke@435: _scanningClosure->set_freelistLock(m); duke@435: } duke@435: duke@435: private: duke@435: inline bool do_yield_check(); duke@435: duke@435: void do_yield_work(); duke@435: }; duke@435: duke@435: class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: bool _yield; duke@435: CMSBitMap* _bit_map; duke@435: CMSMarkStack* _mark_stack; duke@435: PushAndMarkClosure* _scanning_closure; duke@435: unsigned int _before_count; duke@435: duke@435: public: duke@435: SurvivorSpacePrecleanClosure(CMSCollector* collector, duke@435: MemRegion span, duke@435: CMSBitMap* bit_map, duke@435: CMSMarkStack* mark_stack, duke@435: PushAndMarkClosure* cl, duke@435: unsigned int before_count, duke@435: bool should_yield): duke@435: _collector(collector), duke@435: _span(span), duke@435: _yield(should_yield), duke@435: _bit_map(bit_map), duke@435: _mark_stack(mark_stack), duke@435: _scanning_closure(cl), duke@435: _before_count(before_count) duke@435: { } duke@435: duke@435: void do_object(oop p) { duke@435: guarantee(false, "call do_object_careful instead"); duke@435: } duke@435: duke@435: size_t do_object_careful(oop p); duke@435: duke@435: size_t do_object_careful_m(oop p, MemRegion mr) { duke@435: guarantee(false, "Unexpected caller"); duke@435: return 0; duke@435: } duke@435: duke@435: private: duke@435: inline void do_yield_check(); duke@435: void do_yield_work(); duke@435: }; duke@435: duke@435: // This closure is used to accomplish the sweeping work duke@435: // after the second checkpoint but before the concurrent reset duke@435: // phase. duke@435: // duke@435: // Terminology duke@435: // left hand chunk (LHC) - block of one or more chunks currently being duke@435: // coalesced. The LHC is available for coalescing with a new chunk. duke@435: // right hand chunk (RHC) - block that is currently being swept that is duke@435: // free or garbage that can be coalesced with the LHC. duke@435: // _inFreeRange is true if there is currently a LHC duke@435: // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. duke@435: // _freeRangeInFreeLists is true if the LHC is in the free lists. duke@435: // _freeFinger is the address of the current LHC duke@435: class SweepClosure: public BlkClosureCareful { duke@435: CMSCollector* _collector; // collector doing the work duke@435: ConcurrentMarkSweepGeneration* _g; // Generation being swept duke@435: CompactibleFreeListSpace* _sp; // Space being swept ysr@2943: HeapWord* _limit;// the address at or above which the sweep should stop ysr@2943: // because we do not expect newly garbage blocks ysr@2943: // eligible for sweeping past that address. duke@435: Mutex* _freelistLock; // Free list lock (in space) duke@435: CMSBitMap* _bitMap; // Marking bit map (in duke@435: // generation) duke@435: bool _inFreeRange; // Indicates if we are in the duke@435: // midst of a free run duke@435: bool _freeRangeInFreeLists; duke@435: // Often, we have just found duke@435: // a free chunk and started duke@435: // a new free range; we do not duke@435: // eagerly remove this chunk from duke@435: // the free lists unless there is duke@435: // a possibility of coalescing. duke@435: // When true, this flag indicates duke@435: // that the _freeFinger below duke@435: // points to a potentially free chunk duke@435: // that may still be in the free lists duke@435: bool _lastFreeRangeCoalesced; duke@435: // free range contains chunks duke@435: // coalesced duke@435: bool _yield; duke@435: // Whether sweeping should be duke@435: // done with yields. For instance duke@435: // when done by the foreground duke@435: // collector we shouldn't yield. duke@435: HeapWord* _freeFinger; // When _inFreeRange is set, the duke@435: // pointer to the "left hand duke@435: // chunk" duke@435: size_t _freeRangeSize; duke@435: // When _inFreeRange is set, this duke@435: // indicates the accumulated size duke@435: // of the "left hand chunk" duke@435: NOT_PRODUCT( duke@435: size_t _numObjectsFreed; duke@435: size_t _numWordsFreed; duke@435: size_t _numObjectsLive; duke@435: size_t _numWordsLive; duke@435: size_t _numObjectsAlreadyFree; duke@435: size_t _numWordsAlreadyFree; duke@435: FreeChunk* _last_fc; duke@435: ) duke@435: private: duke@435: // Code that is common to a free chunk or garbage when duke@435: // encountered during sweeping. ysr@2452: void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); duke@435: // Process a free chunk during sweeping. ysr@2452: void do_already_free_chunk(FreeChunk *fc); ysr@2943: // Work method called when processing an already free or a ysr@2943: // freshly garbage chunk to do a lookahead and possibly a ysr@2943: // premptive flush if crossing over _limit. ysr@2943: void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); duke@435: // Process a garbage chunk during sweeping. ysr@2452: size_t do_garbage_chunk(FreeChunk *fc); duke@435: // Process a live chunk during sweeping. ysr@2452: size_t do_live_chunk(FreeChunk* fc); duke@435: duke@435: // Accessors. duke@435: HeapWord* freeFinger() const { return _freeFinger; } duke@435: void set_freeFinger(HeapWord* v) { _freeFinger = v; } duke@435: bool inFreeRange() const { return _inFreeRange; } duke@435: void set_inFreeRange(bool v) { _inFreeRange = v; } duke@435: bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } duke@435: void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } duke@435: bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } duke@435: void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } duke@435: duke@435: // Initialize a free range. duke@435: void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); duke@435: // Return this chunk to the free lists. ysr@2452: void flush_cur_free_chunk(HeapWord* chunk, size_t size); duke@435: duke@435: // Check if we should yield and do so when necessary. duke@435: inline void do_yield_check(HeapWord* addr); duke@435: duke@435: // Yield duke@435: void do_yield_work(HeapWord* addr); duke@435: duke@435: // Debugging/Printing ysr@2943: void print_free_block_coalesced(FreeChunk* fc) const; duke@435: duke@435: public: duke@435: SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, duke@435: CMSBitMap* bitMap, bool should_yield); ysr@2943: ~SweepClosure() PRODUCT_RETURN; duke@435: duke@435: size_t do_blk_careful(HeapWord* addr); ysr@2943: void print() const { print_on(tty); } ysr@2943: void print_on(outputStream *st) const; duke@435: }; duke@435: duke@435: // Closures related to weak references processing duke@435: duke@435: // During CMS' weak reference processing, this is a duke@435: // work-routine/closure used to complete transitive duke@435: // marking of objects as live after a certain point duke@435: // in which an initial set has been completely accumulated. ysr@887: // This closure is currently used both during the final ysr@887: // remark stop-world phase, as well as during the concurrent ysr@887: // precleaning of the discovered reference lists. duke@435: class CMSDrainMarkingStackClosure: public VoidClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: CMSMarkStack* _mark_stack; duke@435: CMSBitMap* _bit_map; duke@435: CMSKeepAliveClosure* _keep_alive; ysr@887: bool _concurrent_precleaning; duke@435: public: duke@435: CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, duke@435: CMSBitMap* bit_map, CMSMarkStack* mark_stack, ysr@887: CMSKeepAliveClosure* keep_alive, ysr@887: bool cpc): duke@435: _collector(collector), duke@435: _span(span), duke@435: _bit_map(bit_map), duke@435: _mark_stack(mark_stack), ysr@887: _keep_alive(keep_alive), ysr@887: _concurrent_precleaning(cpc) { ysr@887: assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), ysr@887: "Mismatch"); ysr@887: } duke@435: duke@435: void do_void(); duke@435: }; duke@435: duke@435: // A parallel version of CMSDrainMarkingStackClosure above. duke@435: class CMSParDrainMarkingStackClosure: public VoidClosure { duke@435: CMSCollector* _collector; duke@435: MemRegion _span; duke@435: OopTaskQueue* _work_queue; duke@435: CMSBitMap* _bit_map; duke@435: CMSInnerParMarkAndPushClosure _mark_and_push; duke@435: duke@435: public: duke@435: CMSParDrainMarkingStackClosure(CMSCollector* collector, duke@435: MemRegion span, CMSBitMap* bit_map, duke@435: OopTaskQueue* work_queue): duke@435: _collector(collector), duke@435: _span(span), duke@435: _bit_map(bit_map), duke@435: _work_queue(work_queue), coleenp@4037: _mark_and_push(collector, span, bit_map, work_queue) { } duke@435: duke@435: public: duke@435: void trim_queue(uint max); duke@435: void do_void(); duke@435: }; duke@435: duke@435: // Allow yielding or short-circuiting of reference list duke@435: // prelceaning work. duke@435: class CMSPrecleanRefsYieldClosure: public YieldClosure { duke@435: CMSCollector* _collector; duke@435: void do_yield_work(); duke@435: public: duke@435: CMSPrecleanRefsYieldClosure(CMSCollector* collector): duke@435: _collector(collector) {} duke@435: virtual bool should_return(); duke@435: }; duke@435: duke@435: duke@435: // Convenience class that locks free list locks for given CMS collector duke@435: class FreelistLocker: public StackObj { duke@435: private: duke@435: CMSCollector* _collector; duke@435: public: duke@435: FreelistLocker(CMSCollector* collector): duke@435: _collector(collector) { duke@435: _collector->getFreelistLocks(); duke@435: } duke@435: duke@435: ~FreelistLocker() { duke@435: _collector->releaseFreelistLocks(); duke@435: } duke@435: }; duke@435: duke@435: // Mark all dead objects in a given space. duke@435: class MarkDeadObjectsClosure: public BlkClosure { duke@435: const CMSCollector* _collector; duke@435: const CompactibleFreeListSpace* _sp; duke@435: CMSBitMap* _live_bit_map; duke@435: CMSBitMap* _dead_bit_map; duke@435: public: duke@435: MarkDeadObjectsClosure(const CMSCollector* collector, duke@435: const CompactibleFreeListSpace* sp, duke@435: CMSBitMap *live_bit_map, duke@435: CMSBitMap *dead_bit_map) : duke@435: _collector(collector), duke@435: _sp(sp), duke@435: _live_bit_map(live_bit_map), duke@435: _dead_bit_map(dead_bit_map) {} duke@435: size_t do_blk(HeapWord* addr); duke@435: }; kevinw@2058: kevinw@2058: class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { kevinw@2058: kevinw@2058: public: fparain@2888: TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); kevinw@2058: }; kevinw@2058: stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP