Wed, 23 Sep 2009 23:56:15 -0700
Merge
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // ConcurrentMarkSweepGeneration is in support of a concurrent
26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
27 // style. We assume, for now, that this generation is always the
28 // seniormost generation (modulo the PermGeneration), and for simplicity
29 // in the first implementation, that this generation is a single compactible
30 // space. Neither of these restrictions appears essential, and will be
31 // relaxed in the future when more time is available to implement the
32 // greater generality (and there's a need for it).
33 //
34 // Concurrent mode failures are currently handled by
35 // means of a sliding mark-compact.
37 class CMSAdaptiveSizePolicy;
38 class CMSConcMarkingTask;
39 class CMSGCAdaptivePolicyCounters;
40 class ConcurrentMarkSweepGeneration;
41 class ConcurrentMarkSweepPolicy;
42 class ConcurrentMarkSweepThread;
43 class CompactibleFreeListSpace;
44 class FreeChunk;
45 class PromotionInfo;
46 class ScanMarkedObjectsAgainCarefullyClosure;
48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
49 // as well as for the mod union table (in each case only a subset of the
50 // methods are used). This is essentially a wrapper around the BitMap class,
51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
52 // we have _shifter == 0. and for the mod union table we have
53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
54 // XXX 64-bit issues in BitMap?
55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
56 friend class VMStructs;
58 HeapWord* _bmStartWord; // base address of range covered by map
59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 const int _shifter; // shifts to convert HeapWord to bit position
61 VirtualSpace _virtual_space; // underlying the bit map
62 BitMap _bm; // the bit map itself
63 public:
64 Mutex* const _lock; // mutex protecting _bm;
66 public:
67 // constructor
68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
70 // allocates the actual storage for the map
71 bool allocate(MemRegion mr);
72 // field getter
73 Mutex* lock() const { return _lock; }
74 // locking verifier convenience function
75 void assert_locked() const PRODUCT_RETURN;
77 // inquiries
78 HeapWord* startWord() const { return _bmStartWord; }
79 size_t sizeInWords() const { return _bmWordSize; }
80 size_t sizeInBits() const { return _bm.size(); }
81 // the following is one past the last word in space
82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
84 // reading marks
85 bool isMarked(HeapWord* addr) const;
86 bool par_isMarked(HeapWord* addr) const; // do not lock checks
87 bool isUnmarked(HeapWord* addr) const;
88 bool isAllClear() const;
90 // writing marks
91 void mark(HeapWord* addr);
92 // For marking by parallel GC threads;
93 // returns true if we did, false if another thread did
94 bool par_mark(HeapWord* addr);
96 void mark_range(MemRegion mr);
97 void par_mark_range(MemRegion mr);
98 void mark_large_range(MemRegion mr);
99 void par_mark_large_range(MemRegion mr);
100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
101 void clear_range(MemRegion mr);
102 void par_clear_range(MemRegion mr);
103 void clear_large_range(MemRegion mr);
104 void par_clear_large_range(MemRegion mr);
105 void clear_all();
106 void clear_all_incrementally(); // Not yet implemented!!
108 NOT_PRODUCT(
109 // checks the memory region for validity
110 void region_invariant(MemRegion mr);
111 )
113 // iteration
114 void iterate(BitMapClosure* cl) {
115 _bm.iterate(cl);
116 }
117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
118 void dirty_range_iterate_clear(MemRegionClosure* cl);
119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
121 // auxiliary support for iteration
122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
124 HeapWord* end_addr) const;
125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
127 HeapWord* end_addr) const;
128 MemRegion getAndClearMarkedRegion(HeapWord* addr);
129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
130 HeapWord* end_addr);
132 // conversion utilities
133 HeapWord* offsetToHeapWord(size_t offset) const;
134 size_t heapWordToOffset(HeapWord* addr) const;
135 size_t heapWordDiffToOffsetDiff(size_t diff) const;
137 // debugging
138 // is this address range covered by the bit-map?
139 NOT_PRODUCT(
140 bool covers(MemRegion mr) const;
141 bool covers(HeapWord* start, size_t size = 0) const;
142 )
143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
144 };
146 // Represents a marking stack used by the CMS collector.
147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
148 class CMSMarkStack: public CHeapObj {
149 //
150 friend class CMSCollector; // to get at expasion stats further below
151 //
153 VirtualSpace _virtual_space; // space for the stack
154 oop* _base; // bottom of stack
155 size_t _index; // one more than last occupied index
156 size_t _capacity; // max #elements
157 Mutex _par_lock; // an advisory lock used in case of parallel access
158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
160 protected:
161 size_t _hit_limit; // we hit max stack size limit
162 size_t _failed_double; // we failed expansion before hitting limit
164 public:
165 CMSMarkStack():
166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
167 _hit_limit(0),
168 _failed_double(0) {}
170 bool allocate(size_t size);
172 size_t capacity() const { return _capacity; }
174 oop pop() {
175 if (!isEmpty()) {
176 return _base[--_index] ;
177 }
178 return NULL;
179 }
181 bool push(oop ptr) {
182 if (isFull()) {
183 return false;
184 } else {
185 _base[_index++] = ptr;
186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
187 return true;
188 }
189 }
191 bool isEmpty() const { return _index == 0; }
192 bool isFull() const {
193 assert(_index <= _capacity, "buffer overflow");
194 return _index == _capacity;
195 }
197 size_t length() { return _index; }
199 // "Parallel versions" of some of the above
200 oop par_pop() {
201 // lock and pop
202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
203 return pop();
204 }
206 bool par_push(oop ptr) {
207 // lock and push
208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
209 return push(ptr);
210 }
212 // Forcibly reset the stack, losing all of its contents.
213 void reset() {
214 _index = 0;
215 }
217 // Expand the stack, typically in response to an overflow condition
218 void expand();
220 // Compute the least valued stack element.
221 oop least_value(HeapWord* low) {
222 oop least = (oop)low;
223 for (size_t i = 0; i < _index; i++) {
224 least = MIN2(least, _base[i]);
225 }
226 return least;
227 }
229 // Exposed here to allow stack expansion in || case
230 Mutex* par_lock() { return &_par_lock; }
231 };
233 class CardTableRS;
234 class CMSParGCThreadState;
236 class ModUnionClosure: public MemRegionClosure {
237 protected:
238 CMSBitMap* _t;
239 public:
240 ModUnionClosure(CMSBitMap* t): _t(t) { }
241 void do_MemRegion(MemRegion mr);
242 };
244 class ModUnionClosurePar: public ModUnionClosure {
245 public:
246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
247 void do_MemRegion(MemRegion mr);
248 };
250 // Survivor Chunk Array in support of parallelization of
251 // Survivor Space rescan.
252 class ChunkArray: public CHeapObj {
253 size_t _index;
254 size_t _capacity;
255 HeapWord** _array; // storage for array
257 public:
258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
259 ChunkArray(HeapWord** a, size_t c):
260 _index(0), _capacity(c), _array(a) {}
262 HeapWord** array() { return _array; }
263 void set_array(HeapWord** a) { _array = a; }
265 size_t capacity() { return _capacity; }
266 void set_capacity(size_t c) { _capacity = c; }
268 size_t end() {
269 assert(_index < capacity(), "_index out of bounds");
270 return _index;
271 } // exclusive
273 HeapWord* nth(size_t n) {
274 assert(n < end(), "Out of bounds access");
275 return _array[n];
276 }
278 void reset() {
279 _index = 0;
280 }
282 void record_sample(HeapWord* p, size_t sz) {
283 // For now we do not do anything with the size
284 if (_index < _capacity) {
285 _array[_index++] = p;
286 }
287 }
288 };
290 //
291 // Timing, allocation and promotion statistics for gc scheduling and incremental
292 // mode pacing. Most statistics are exponential averages.
293 //
294 class CMSStats VALUE_OBJ_CLASS_SPEC {
295 private:
296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
298 // The following are exponential averages with factor alpha:
299 // avg = (100 - alpha) * avg + alpha * cur_sample
300 //
301 // The durations measure: end_time[n] - start_time[n]
302 // The periods measure: start_time[n] - start_time[n-1]
303 //
304 // The cms period and duration include only concurrent collections; time spent
305 // in foreground cms collections due to System.gc() or because of a failure to
306 // keep up are not included.
307 //
308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
309 // real value, but is used only after the first period. A value of 100 is
310 // used for the first sample so it gets the entire weight.
311 unsigned int _saved_alpha; // 0-100
312 unsigned int _gc0_alpha;
313 unsigned int _cms_alpha;
315 double _gc0_duration;
316 double _gc0_period;
317 size_t _gc0_promoted; // bytes promoted per gc0
318 double _cms_duration;
319 double _cms_duration_pre_sweep; // time from initiation to start of sweep
320 double _cms_duration_per_mb;
321 double _cms_period;
322 size_t _cms_allocated; // bytes of direct allocation per gc0 period
324 // Timers.
325 elapsedTimer _cms_timer;
326 TimeStamp _gc0_begin_time;
327 TimeStamp _cms_begin_time;
328 TimeStamp _cms_end_time;
330 // Snapshots of the amount used in the CMS generation.
331 size_t _cms_used_at_gc0_begin;
332 size_t _cms_used_at_gc0_end;
333 size_t _cms_used_at_cms_begin;
335 // Used to prevent the duty cycle from being reduced in the middle of a cms
336 // cycle.
337 bool _allow_duty_cycle_reduction;
339 enum {
340 _GC0_VALID = 0x1,
341 _CMS_VALID = 0x2,
342 _ALL_VALID = _GC0_VALID | _CMS_VALID
343 };
345 unsigned int _valid_bits;
347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
349 protected:
351 // Return a duty cycle that avoids wild oscillations, by limiting the amount
352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
353 // as a recommended value).
354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
355 unsigned int new_duty_cycle);
356 unsigned int icms_update_duty_cycle_impl();
358 public:
359 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
360 unsigned int alpha = CMSExpAvgFactor);
362 // Whether or not the statistics contain valid data; higher level statistics
363 // cannot be called until this returns true (they require at least one young
364 // gen and one cms cycle to have completed).
365 bool valid() const;
367 // Record statistics.
368 void record_gc0_begin();
369 void record_gc0_end(size_t cms_gen_bytes_used);
370 void record_cms_begin();
371 void record_cms_end();
373 // Allow management of the cms timer, which must be stopped/started around
374 // yield points.
375 elapsedTimer& cms_timer() { return _cms_timer; }
376 void start_cms_timer() { _cms_timer.start(); }
377 void stop_cms_timer() { _cms_timer.stop(); }
379 // Basic statistics; units are seconds or bytes.
380 double gc0_period() const { return _gc0_period; }
381 double gc0_duration() const { return _gc0_duration; }
382 size_t gc0_promoted() const { return _gc0_promoted; }
383 double cms_period() const { return _cms_period; }
384 double cms_duration() const { return _cms_duration; }
385 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
386 size_t cms_allocated() const { return _cms_allocated; }
388 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
390 // Seconds since the last background cms cycle began or ended.
391 double cms_time_since_begin() const;
392 double cms_time_since_end() const;
394 // Higher level statistics--caller must check that valid() returns true before
395 // calling.
397 // Returns bytes promoted per second of wall clock time.
398 double promotion_rate() const;
400 // Returns bytes directly allocated per second of wall clock time.
401 double cms_allocation_rate() const;
403 // Rate at which space in the cms generation is being consumed (sum of the
404 // above two).
405 double cms_consumption_rate() const;
407 // Returns an estimate of the number of seconds until the cms generation will
408 // fill up, assuming no collection work is done.
409 double time_until_cms_gen_full() const;
411 // Returns an estimate of the number of seconds remaining until
412 // the cms generation collection should start.
413 double time_until_cms_start() const;
415 // End of higher level statistics.
417 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
418 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
420 // Update the duty cycle and return the new value.
421 unsigned int icms_update_duty_cycle();
423 // Debugging.
424 void print_on(outputStream* st) const PRODUCT_RETURN;
425 void print() const { print_on(gclog_or_tty); }
426 };
428 // A closure related to weak references processing which
429 // we embed in the CMSCollector, since we need to pass
430 // it to the reference processor for secondary filtering
431 // of references based on reachability of referent;
432 // see role of _is_alive_non_header closure in the
433 // ReferenceProcessor class.
434 // For objects in the CMS generation, this closure checks
435 // if the object is "live" (reachable). Used in weak
436 // reference processing.
437 class CMSIsAliveClosure: public BoolObjectClosure {
438 const MemRegion _span;
439 const CMSBitMap* _bit_map;
441 friend class CMSCollector;
442 public:
443 CMSIsAliveClosure(MemRegion span,
444 CMSBitMap* bit_map):
445 _span(span),
446 _bit_map(bit_map) {
447 assert(!span.is_empty(), "Empty span could spell trouble");
448 }
450 void do_object(oop obj) {
451 assert(false, "not to be invoked");
452 }
454 bool do_object_b(oop obj);
455 };
458 // Implements AbstractRefProcTaskExecutor for CMS.
459 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
460 public:
462 CMSRefProcTaskExecutor(CMSCollector& collector)
463 : _collector(collector)
464 { }
466 // Executes a task using worker threads.
467 virtual void execute(ProcessTask& task);
468 virtual void execute(EnqueueTask& task);
469 private:
470 CMSCollector& _collector;
471 };
474 class CMSCollector: public CHeapObj {
475 friend class VMStructs;
476 friend class ConcurrentMarkSweepThread;
477 friend class ConcurrentMarkSweepGeneration;
478 friend class CompactibleFreeListSpace;
479 friend class CMSParRemarkTask;
480 friend class CMSConcMarkingTask;
481 friend class CMSRefProcTaskProxy;
482 friend class CMSRefProcTaskExecutor;
483 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
484 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
485 friend class PushOrMarkClosure; // to access _restart_addr
486 friend class Par_PushOrMarkClosure; // to access _restart_addr
487 friend class MarkFromRootsClosure; // -- ditto --
488 // ... and for clearing cards
489 friend class Par_MarkFromRootsClosure; // to access _restart_addr
490 // ... and for clearing cards
491 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
492 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
493 friend class PushAndMarkVerifyClosure; // -- ditto --
494 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
495 friend class PushAndMarkClosure; // -- ditto --
496 friend class Par_PushAndMarkClosure; // -- ditto --
497 friend class CMSKeepAliveClosure; // -- ditto --
498 friend class CMSDrainMarkingStackClosure; // -- ditto --
499 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
500 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
501 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
502 friend class VM_CMS_Operation;
503 friend class VM_CMS_Initial_Mark;
504 friend class VM_CMS_Final_Remark;
506 private:
507 jlong _time_of_last_gc;
508 void update_time_of_last_gc(jlong now) {
509 _time_of_last_gc = now;
510 }
512 OopTaskQueueSet* _task_queues;
514 // Overflow list of grey objects, threaded through mark-word
515 // Manipulated with CAS in the parallel/multi-threaded case.
516 oop _overflow_list;
517 // The following array-pair keeps track of mark words
518 // displaced for accomodating overflow list above.
519 // This code will likely be revisited under RFE#4922830.
520 GrowableArray<oop>* _preserved_oop_stack;
521 GrowableArray<markOop>* _preserved_mark_stack;
523 int* _hash_seed;
525 // In support of multi-threaded concurrent phases
526 YieldingFlexibleWorkGang* _conc_workers;
528 // Performance Counters
529 CollectorCounters* _gc_counters;
531 // Initialization Errors
532 bool _completed_initialization;
534 // In support of ExplicitGCInvokesConcurrent
535 static bool _full_gc_requested;
536 unsigned int _collection_count_start;
538 // Should we unload classes this concurrent cycle?
539 bool _should_unload_classes;
540 unsigned int _concurrent_cycles_since_last_unload;
541 unsigned int concurrent_cycles_since_last_unload() const {
542 return _concurrent_cycles_since_last_unload;
543 }
544 // Did we (allow) unload classes in the previous concurrent cycle?
545 bool unloaded_classes_last_cycle() const {
546 return concurrent_cycles_since_last_unload() == 0;
547 }
548 // Root scanning options for perm gen
549 int _roots_scanning_options;
550 int roots_scanning_options() const { return _roots_scanning_options; }
551 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
552 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
554 // Verification support
555 CMSBitMap _verification_mark_bm;
556 void verify_after_remark_work_1();
557 void verify_after_remark_work_2();
559 // true if any verification flag is on.
560 bool _verifying;
561 bool verifying() const { return _verifying; }
562 void set_verifying(bool v) { _verifying = v; }
564 // Collector policy
565 ConcurrentMarkSweepPolicy* _collector_policy;
566 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
568 // Check whether the gc time limit has been
569 // exceeded and set the size policy flag
570 // appropriately.
571 void check_gc_time_limit();
572 // XXX Move these to CMSStats ??? FIX ME !!!
573 elapsedTimer _sweep_timer;
574 AdaptivePaddedAverage _sweep_estimate;
576 protected:
577 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
578 ConcurrentMarkSweepGeneration* _permGen; // perm gen
579 MemRegion _span; // span covering above two
580 CardTableRS* _ct; // card table
582 // CMS marking support structures
583 CMSBitMap _markBitMap;
584 CMSBitMap _modUnionTable;
585 CMSMarkStack _markStack;
586 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects
587 // to revisit
588 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
590 HeapWord* _restart_addr; // in support of marking stack overflow
591 void lower_restart_addr(HeapWord* low);
593 // Counters in support of marking stack / work queue overflow handling:
594 // a non-zero value indicates certain types of overflow events during
595 // the current CMS cycle and could lead to stack resizing efforts at
596 // an opportune future time.
597 size_t _ser_pmc_preclean_ovflw;
598 size_t _ser_pmc_remark_ovflw;
599 size_t _par_pmc_remark_ovflw;
600 size_t _ser_kac_preclean_ovflw;
601 size_t _ser_kac_ovflw;
602 size_t _par_kac_ovflw;
603 NOT_PRODUCT(ssize_t _num_par_pushes;)
605 // ("Weak") Reference processing support
606 ReferenceProcessor* _ref_processor;
607 CMSIsAliveClosure _is_alive_closure;
608 // keep this textually after _markBitMap and _span; c'tor dependency
610 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
611 ModUnionClosure _modUnionClosure;
612 ModUnionClosurePar _modUnionClosurePar;
614 // CMS abstract state machine
615 // initial_state: Idling
616 // next_state(Idling) = {Marking}
617 // next_state(Marking) = {Precleaning, Sweeping}
618 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
619 // next_state(AbortablePreclean) = {FinalMarking}
620 // next_state(FinalMarking) = {Sweeping}
621 // next_state(Sweeping) = {Resizing}
622 // next_state(Resizing) = {Resetting}
623 // next_state(Resetting) = {Idling}
624 // The numeric values below are chosen so that:
625 // . _collectorState <= Idling == post-sweep && pre-mark
626 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
627 // precleaning || abortablePrecleanb
628 enum CollectorState {
629 Resizing = 0,
630 Resetting = 1,
631 Idling = 2,
632 InitialMarking = 3,
633 Marking = 4,
634 Precleaning = 5,
635 AbortablePreclean = 6,
636 FinalMarking = 7,
637 Sweeping = 8
638 };
639 static CollectorState _collectorState;
641 // State related to prologue/epilogue invocation for my generations
642 bool _between_prologue_and_epilogue;
644 // Signalling/State related to coordination between fore- and backgroud GC
645 // Note: When the baton has been passed from background GC to foreground GC,
646 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
647 static bool _foregroundGCIsActive; // true iff foreground collector is active or
648 // wants to go active
649 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
650 // yet passed the baton to the foreground GC
652 // Support for CMSScheduleRemark (abortable preclean)
653 bool _abort_preclean;
654 bool _start_sampling;
656 int _numYields;
657 size_t _numDirtyCards;
658 uint _sweepCount;
659 // number of full gc's since the last concurrent gc.
660 uint _full_gcs_since_conc_gc;
662 // occupancy used for bootstrapping stats
663 double _bootstrap_occupancy;
665 // timer
666 elapsedTimer _timer;
668 // Timing, allocation and promotion statistics, used for scheduling.
669 CMSStats _stats;
671 // Allocation limits installed in the young gen, used only in
672 // CMSIncrementalMode. When an allocation in the young gen would cross one of
673 // these limits, the cms generation is notified and the cms thread is started
674 // or stopped, respectively.
675 HeapWord* _icms_start_limit;
676 HeapWord* _icms_stop_limit;
678 enum CMS_op_type {
679 CMS_op_checkpointRootsInitial,
680 CMS_op_checkpointRootsFinal
681 };
683 void do_CMS_operation(CMS_op_type op);
684 bool stop_world_and_do(CMS_op_type op);
686 OopTaskQueueSet* task_queues() { return _task_queues; }
687 int* hash_seed(int i) { return &_hash_seed[i]; }
688 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
690 // Support for parallelizing Eden rescan in CMS remark phase
691 void sample_eden(); // ... sample Eden space top
693 private:
694 // Support for parallelizing young gen rescan in CMS remark phase
695 Generation* _young_gen; // the younger gen
696 HeapWord** _top_addr; // ... Top of Eden
697 HeapWord** _end_addr; // ... End of Eden
698 HeapWord** _eden_chunk_array; // ... Eden partitioning array
699 size_t _eden_chunk_index; // ... top (exclusive) of array
700 size_t _eden_chunk_capacity; // ... max entries in array
702 // Support for parallelizing survivor space rescan
703 HeapWord** _survivor_chunk_array;
704 size_t _survivor_chunk_index;
705 size_t _survivor_chunk_capacity;
706 size_t* _cursor;
707 ChunkArray* _survivor_plab_array;
709 // Support for marking stack overflow handling
710 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
711 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
712 void push_on_overflow_list(oop p);
713 void par_push_on_overflow_list(oop p);
714 // the following is, obviously, not, in general, "MT-stable"
715 bool overflow_list_is_empty() const;
717 void preserve_mark_if_necessary(oop p);
718 void par_preserve_mark_if_necessary(oop p);
719 void preserve_mark_work(oop p, markOop m);
720 void restore_preserved_marks_if_any();
721 NOT_PRODUCT(bool no_preserved_marks() const;)
722 // in support of testing overflow code
723 NOT_PRODUCT(int _overflow_counter;)
724 NOT_PRODUCT(bool simulate_overflow();) // sequential
725 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
727 // CMS work methods
728 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
730 // a return value of false indicates failure due to stack overflow
731 bool markFromRootsWork(bool asynch); // concurrent marking work
733 public: // FIX ME!!! only for testing
734 bool do_marking_st(bool asynch); // single-threaded marking
735 bool do_marking_mt(bool asynch); // multi-threaded marking
737 private:
739 // concurrent precleaning work
740 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
741 ScanMarkedObjectsAgainCarefullyClosure* cl);
742 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
743 ScanMarkedObjectsAgainCarefullyClosure* cl);
744 // Does precleaning work, returning a quantity indicative of
745 // the amount of "useful work" done.
746 size_t preclean_work(bool clean_refs, bool clean_survivors);
747 void abortable_preclean(); // Preclean while looking for possible abort
748 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
749 // Helper function for above; merge-sorts the per-thread plab samples
750 void merge_survivor_plab_arrays(ContiguousSpace* surv);
751 // Resets (i.e. clears) the per-thread plab sample vectors
752 void reset_survivor_plab_arrays();
754 // final (second) checkpoint work
755 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
756 bool init_mark_was_synchronous);
757 // work routine for parallel version of remark
758 void do_remark_parallel();
759 // work routine for non-parallel version of remark
760 void do_remark_non_parallel();
761 // reference processing work routine (during second checkpoint)
762 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
764 // concurrent sweeping work
765 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
767 // (concurrent) resetting of support data structures
768 void reset(bool asynch);
770 // Clear _expansion_cause fields of constituent generations
771 void clear_expansion_cause();
773 // An auxilliary method used to record the ends of
774 // used regions of each generation to limit the extent of sweep
775 void save_sweep_limits();
777 // Resize the generations included in the collector.
778 void compute_new_size();
780 // A work method used by foreground collection to determine
781 // what type of collection (compacting or not, continuing or fresh)
782 // it should do.
783 void decide_foreground_collection_type(bool clear_all_soft_refs,
784 bool* should_compact, bool* should_start_over);
786 // A work method used by the foreground collector to do
787 // a mark-sweep-compact.
788 void do_compaction_work(bool clear_all_soft_refs);
790 // A work method used by the foreground collector to do
791 // a mark-sweep, after taking over from a possibly on-going
792 // concurrent mark-sweep collection.
793 void do_mark_sweep_work(bool clear_all_soft_refs,
794 CollectorState first_state, bool should_start_over);
796 // If the backgrould GC is active, acquire control from the background
797 // GC and do the collection.
798 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
800 // For synchronizing passing of control from background to foreground
801 // GC. waitForForegroundGC() is called by the background
802 // collector. It if had to wait for a foreground collection,
803 // it returns true and the background collection should assume
804 // that the collection was finished by the foreground
805 // collector.
806 bool waitForForegroundGC();
808 // Incremental mode triggering: recompute the icms duty cycle and set the
809 // allocation limits in the young gen.
810 void icms_update_allocation_limits();
812 size_t block_size_using_printezis_bits(HeapWord* addr) const;
813 size_t block_size_if_printezis_bits(HeapWord* addr) const;
814 HeapWord* next_card_start_after_block(HeapWord* addr) const;
816 void setup_cms_unloading_and_verification_state();
817 public:
818 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
819 ConcurrentMarkSweepGeneration* permGen,
820 CardTableRS* ct,
821 ConcurrentMarkSweepPolicy* cp);
822 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
824 ReferenceProcessor* ref_processor() { return _ref_processor; }
825 void ref_processor_init();
827 Mutex* bitMapLock() const { return _markBitMap.lock(); }
828 static CollectorState abstract_state() { return _collectorState; }
830 bool should_abort_preclean() const; // Whether preclean should be aborted.
831 size_t get_eden_used() const;
832 size_t get_eden_capacity() const;
834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
836 // locking checks
837 NOT_PRODUCT(static bool have_cms_token();)
839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
840 bool shouldConcurrentCollect();
842 void collect(bool full,
843 bool clear_all_soft_refs,
844 size_t size,
845 bool tlab);
846 void collect_in_background(bool clear_all_soft_refs);
847 void collect_in_foreground(bool clear_all_soft_refs);
849 // In support of ExplicitGCInvokesConcurrent
850 static void request_full_gc(unsigned int full_gc_count);
851 // Should we unload classes in a particular concurrent cycle?
852 bool should_unload_classes() const {
853 return _should_unload_classes;
854 }
855 bool update_should_unload_classes();
857 void direct_allocated(HeapWord* start, size_t size);
859 // Object is dead if not marked and current phase is sweeping.
860 bool is_dead_obj(oop obj) const;
862 // After a promotion (of "start"), do any necessary marking.
863 // If "par", then it's being done by a parallel GC thread.
864 // The last two args indicate if we need precise marking
865 // and if so the size of the object so it can be dirtied
866 // in its entirety.
867 void promoted(bool par, HeapWord* start,
868 bool is_obj_array, size_t obj_size);
870 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
871 size_t word_size);
873 void getFreelistLocks() const;
874 void releaseFreelistLocks() const;
875 bool haveFreelistLocks() const;
877 // GC prologue and epilogue
878 void gc_prologue(bool full);
879 void gc_epilogue(bool full);
881 jlong time_of_last_gc(jlong now) {
882 if (_collectorState <= Idling) {
883 // gc not in progress
884 return _time_of_last_gc;
885 } else {
886 // collection in progress
887 return now;
888 }
889 }
891 // Support for parallel remark of survivor space
892 void* get_data_recorder(int thr_num);
894 CMSBitMap* markBitMap() { return &_markBitMap; }
895 void directAllocated(HeapWord* start, size_t size);
897 // main CMS steps and related support
898 void checkpointRootsInitial(bool asynch);
899 bool markFromRoots(bool asynch); // a return value of false indicates failure
900 // due to stack overflow
901 void preclean();
902 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
903 bool init_mark_was_synchronous);
904 void sweep(bool asynch);
906 // Check that the currently executing thread is the expected
907 // one (foreground collector or background collector).
908 void check_correct_thread_executing() PRODUCT_RETURN;
909 // XXXPERM void print_statistics() PRODUCT_RETURN;
911 bool is_cms_reachable(HeapWord* addr);
913 // Performance Counter Support
914 CollectorCounters* counters() { return _gc_counters; }
916 // timer stuff
917 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
918 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
919 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
920 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
922 int yields() { return _numYields; }
923 void resetYields() { _numYields = 0; }
924 void incrementYields() { _numYields++; }
925 void resetNumDirtyCards() { _numDirtyCards = 0; }
926 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
927 size_t numDirtyCards() { return _numDirtyCards; }
929 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
930 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
931 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
932 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
933 uint sweepCount() const { return _sweepCount; }
934 void incrementSweepCount() { _sweepCount++; }
936 // Timers/stats for gc scheduling and incremental mode pacing.
937 CMSStats& stats() { return _stats; }
939 // Convenience methods that check whether CMSIncrementalMode is enabled and
940 // forward to the corresponding methods in ConcurrentMarkSweepThread.
941 static void start_icms();
942 static void stop_icms(); // Called at the end of the cms cycle.
943 static void disable_icms(); // Called before a foreground collection.
944 static void enable_icms(); // Called after a foreground collection.
945 void icms_wait(); // Called at yield points.
947 // Adaptive size policy
948 CMSAdaptiveSizePolicy* size_policy();
949 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
951 // debugging
952 void verify(bool);
953 bool verify_after_remark();
954 void verify_ok_to_terminate() const PRODUCT_RETURN;
955 void verify_work_stacks_empty() const PRODUCT_RETURN;
956 void verify_overflow_empty() const PRODUCT_RETURN;
958 // convenience methods in support of debugging
959 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
960 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
962 // accessors
963 CMSMarkStack* verification_mark_stack() { return &_markStack; }
964 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
966 // Get the bit map with a perm gen "deadness" information.
967 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; }
969 // Initialization errors
970 bool completed_initialization() { return _completed_initialization; }
971 };
973 class CMSExpansionCause : public AllStatic {
974 public:
975 enum Cause {
976 _no_expansion,
977 _satisfy_free_ratio,
978 _satisfy_promotion,
979 _satisfy_allocation,
980 _allocate_par_lab,
981 _allocate_par_spooling_space,
982 _adaptive_size_policy
983 };
984 // Return a string describing the cause of the expansion.
985 static const char* to_string(CMSExpansionCause::Cause cause);
986 };
988 class ConcurrentMarkSweepGeneration: public CardGeneration {
989 friend class VMStructs;
990 friend class ConcurrentMarkSweepThread;
991 friend class ConcurrentMarkSweep;
992 friend class CMSCollector;
993 protected:
994 static CMSCollector* _collector; // the collector that collects us
995 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
997 // Performance Counters
998 GenerationCounters* _gen_counters;
999 GSpaceCounters* _space_counters;
1001 // Words directly allocated, used by CMSStats.
1002 size_t _direct_allocated_words;
1004 // Non-product stat counters
1005 NOT_PRODUCT(
1006 int _numObjectsPromoted;
1007 int _numWordsPromoted;
1008 int _numObjectsAllocated;
1009 int _numWordsAllocated;
1010 )
1012 // Used for sizing decisions
1013 bool _incremental_collection_failed;
1014 bool incremental_collection_failed() {
1015 return _incremental_collection_failed;
1016 }
1017 void set_incremental_collection_failed() {
1018 _incremental_collection_failed = true;
1019 }
1020 void clear_incremental_collection_failed() {
1021 _incremental_collection_failed = false;
1022 }
1024 // accessors
1025 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1026 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1028 private:
1029 // For parallel young-gen GC support.
1030 CMSParGCThreadState** _par_gc_thread_states;
1032 // Reason generation was expanded
1033 CMSExpansionCause::Cause _expansion_cause;
1035 // In support of MinChunkSize being larger than min object size
1036 const double _dilatation_factor;
1038 enum CollectionTypes {
1039 Concurrent_collection_type = 0,
1040 MS_foreground_collection_type = 1,
1041 MSC_foreground_collection_type = 2,
1042 Unknown_collection_type = 3
1043 };
1045 CollectionTypes _debug_collection_type;
1047 // Fraction of current occupancy at which to start a CMS collection which
1048 // will collect this generation (at least).
1049 double _initiating_occupancy;
1051 protected:
1052 // Shrink generation by specified size (returns false if unable to shrink)
1053 virtual void shrink_by(size_t bytes);
1055 // Update statistics for GC
1056 virtual void update_gc_stats(int level, bool full);
1058 // Maximum available space in the generation (including uncommitted)
1059 // space.
1060 size_t max_available() const;
1062 // getter and initializer for _initiating_occupancy field.
1063 double initiating_occupancy() const { return _initiating_occupancy; }
1064 void init_initiating_occupancy(intx io, intx tr);
1066 public:
1067 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1068 int level, CardTableRS* ct,
1069 bool use_adaptive_freelists,
1070 FreeBlockDictionary::DictionaryChoice);
1072 // Accessors
1073 CMSCollector* collector() const { return _collector; }
1074 static void set_collector(CMSCollector* collector) {
1075 assert(_collector == NULL, "already set");
1076 _collector = collector;
1077 }
1078 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1080 Mutex* freelistLock() const;
1082 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1084 // Adaptive size policy
1085 CMSAdaptiveSizePolicy* size_policy();
1087 bool refs_discovery_is_atomic() const { return false; }
1088 bool refs_discovery_is_mt() const {
1089 // Note: CMS does MT-discovery during the parallel-remark
1090 // phases. Use ReferenceProcessorMTMutator to make refs
1091 // discovery MT-safe during such phases or other parallel
1092 // discovery phases in the future. This may all go away
1093 // if/when we decide that refs discovery is sufficiently
1094 // rare that the cost of the CAS's involved is in the
1095 // noise. That's a measurement that should be done, and
1096 // the code simplified if that turns out to be the case.
1097 return false;
1098 }
1100 // Override
1101 virtual void ref_processor_init();
1103 // Grow generation by specified size (returns false if unable to grow)
1104 bool grow_by(size_t bytes);
1105 // Grow generation to reserved size.
1106 bool grow_to_reserved();
1108 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1110 // Space enquiries
1111 size_t capacity() const;
1112 size_t used() const;
1113 size_t free() const;
1114 double occupancy() const { return ((double)used())/((double)capacity()); }
1115 size_t contiguous_available() const;
1116 size_t unsafe_max_alloc_nogc() const;
1118 // over-rides
1119 MemRegion used_region() const;
1120 MemRegion used_region_at_save_marks() const;
1122 // Does a "full" (forced) collection invoked on this generation collect
1123 // all younger generations as well? Note that the second conjunct is a
1124 // hack to allow the collection of the younger gen first if the flag is
1125 // set. This is better than using th policy's should_collect_gen0_first()
1126 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1127 virtual bool full_collects_younger_generations() const {
1128 return UseCMSCompactAtFullCollection && !CollectGen0First;
1129 }
1131 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1133 // Support for compaction
1134 CompactibleSpace* first_compaction_space() const;
1135 // Adjust quantites in the generation affected by
1136 // the compaction.
1137 void reset_after_compaction();
1139 // Allocation support
1140 HeapWord* allocate(size_t size, bool tlab);
1141 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1142 oop promote(oop obj, size_t obj_size);
1143 HeapWord* par_allocate(size_t size, bool tlab) {
1144 return allocate(size, tlab);
1145 }
1147 // Incremental mode triggering.
1148 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1149 size_t word_size);
1151 // Used by CMSStats to track direct allocation. The value is sampled and
1152 // reset after each young gen collection.
1153 size_t direct_allocated_words() const { return _direct_allocated_words; }
1154 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1156 // Overrides for parallel promotion.
1157 virtual oop par_promote(int thread_num,
1158 oop obj, markOop m, size_t word_sz);
1159 // This one should not be called for CMS.
1160 virtual void par_promote_alloc_undo(int thread_num,
1161 HeapWord* obj, size_t word_sz);
1162 virtual void par_promote_alloc_done(int thread_num);
1163 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1165 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1166 bool younger_handles_promotion_failure) const;
1168 bool should_collect(bool full, size_t size, bool tlab);
1169 virtual bool should_concurrent_collect() const;
1170 virtual bool is_too_full() const;
1171 void collect(bool full,
1172 bool clear_all_soft_refs,
1173 size_t size,
1174 bool tlab);
1176 HeapWord* expand_and_allocate(size_t word_size,
1177 bool tlab,
1178 bool parallel = false);
1180 // GC prologue and epilogue
1181 void gc_prologue(bool full);
1182 void gc_prologue_work(bool full, bool registerClosure,
1183 ModUnionClosure* modUnionClosure);
1184 void gc_epilogue(bool full);
1185 void gc_epilogue_work(bool full);
1187 // Time since last GC of this generation
1188 jlong time_of_last_gc(jlong now) {
1189 return collector()->time_of_last_gc(now);
1190 }
1191 void update_time_of_last_gc(jlong now) {
1192 collector()-> update_time_of_last_gc(now);
1193 }
1195 // Allocation failure
1196 void expand(size_t bytes, size_t expand_bytes,
1197 CMSExpansionCause::Cause cause);
1198 virtual bool expand(size_t bytes, size_t expand_bytes);
1199 void shrink(size_t bytes);
1200 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1201 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1203 // Iteration support and related enquiries
1204 void save_marks();
1205 bool no_allocs_since_save_marks();
1206 void object_iterate_since_last_GC(ObjectClosure* cl);
1207 void younger_refs_iterate(OopsInGenClosure* cl);
1209 // Iteration support specific to CMS generations
1210 void save_sweep_limit();
1212 // More iteration support
1213 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1214 virtual void oop_iterate(OopClosure* cl);
1215 virtual void safe_object_iterate(ObjectClosure* cl);
1216 virtual void object_iterate(ObjectClosure* cl);
1218 // Need to declare the full complement of closures, whether we'll
1219 // override them or not, or get message from the compiler:
1220 // oop_since_save_marks_iterate_nv hides virtual function...
1221 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1222 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1223 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1225 // Smart allocation XXX -- move to CFLSpace?
1226 void setNearLargestChunk();
1227 bool isNearLargestChunk(HeapWord* addr);
1229 // Get the chunk at the end of the space. Delagates to
1230 // the space.
1231 FreeChunk* find_chunk_at_end();
1233 // Overriding of unused functionality (sharing not yet supported with CMS)
1234 void pre_adjust_pointers();
1235 void post_compact();
1237 // Debugging
1238 void prepare_for_verify();
1239 void verify(bool allow_dirty);
1240 void print_statistics() PRODUCT_RETURN;
1242 // Performance Counters support
1243 virtual void update_counters();
1244 virtual void update_counters(size_t used);
1245 void initialize_performance_counters();
1246 CollectorCounters* counters() { return collector()->counters(); }
1248 // Support for parallel remark of survivor space
1249 void* get_data_recorder(int thr_num) {
1250 //Delegate to collector
1251 return collector()->get_data_recorder(thr_num);
1252 }
1254 // Printing
1255 const char* name() const;
1256 virtual const char* short_name() const { return "CMS"; }
1257 void print() const;
1258 void printOccupancy(const char* s);
1259 bool must_be_youngest() const { return false; }
1260 bool must_be_oldest() const { return true; }
1262 void compute_new_size();
1264 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1265 void rotate_debug_collection_type();
1266 };
1268 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1270 // Return the size policy from the heap's collector
1271 // policy casted to CMSAdaptiveSizePolicy*.
1272 CMSAdaptiveSizePolicy* cms_size_policy() const;
1274 // Resize the generation based on the adaptive size
1275 // policy.
1276 void resize(size_t cur_promo, size_t desired_promo);
1278 // Return the GC counters from the collector policy
1279 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1281 virtual void shrink_by(size_t bytes);
1283 public:
1284 virtual void compute_new_size();
1285 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1286 int level, CardTableRS* ct,
1287 bool use_adaptive_freelists,
1288 FreeBlockDictionary::DictionaryChoice
1289 dictionaryChoice) :
1290 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1291 use_adaptive_freelists, dictionaryChoice) {}
1293 virtual const char* short_name() const { return "ASCMS"; }
1294 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1296 virtual void update_counters();
1297 virtual void update_counters(size_t used);
1298 };
1300 //
1301 // Closures of various sorts used by CMS to accomplish its work
1302 //
1304 // This closure is used to check that a certain set of oops is empty.
1305 class FalseClosure: public OopClosure {
1306 public:
1307 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
1308 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
1309 };
1311 // This closure is used to do concurrent marking from the roots
1312 // following the first checkpoint.
1313 class MarkFromRootsClosure: public BitMapClosure {
1314 CMSCollector* _collector;
1315 MemRegion _span;
1316 CMSBitMap* _bitMap;
1317 CMSBitMap* _mut;
1318 CMSMarkStack* _markStack;
1319 CMSMarkStack* _revisitStack;
1320 bool _yield;
1321 int _skipBits;
1322 HeapWord* _finger;
1323 HeapWord* _threshold;
1324 DEBUG_ONLY(bool _verifying;)
1326 public:
1327 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1328 CMSBitMap* bitMap,
1329 CMSMarkStack* markStack,
1330 CMSMarkStack* revisitStack,
1331 bool should_yield, bool verifying = false);
1332 bool do_bit(size_t offset);
1333 void reset(HeapWord* addr);
1334 inline void do_yield_check();
1336 private:
1337 void scanOopsInOop(HeapWord* ptr);
1338 void do_yield_work();
1339 };
1341 // This closure is used to do concurrent multi-threaded
1342 // marking from the roots following the first checkpoint.
1343 // XXX This should really be a subclass of The serial version
1344 // above, but i have not had the time to refactor things cleanly.
1345 // That willbe done for Dolphin.
1346 class Par_MarkFromRootsClosure: public BitMapClosure {
1347 CMSCollector* _collector;
1348 MemRegion _whole_span;
1349 MemRegion _span;
1350 CMSBitMap* _bit_map;
1351 CMSBitMap* _mut;
1352 OopTaskQueue* _work_queue;
1353 CMSMarkStack* _overflow_stack;
1354 CMSMarkStack* _revisit_stack;
1355 bool _yield;
1356 int _skip_bits;
1357 HeapWord* _finger;
1358 HeapWord* _threshold;
1359 CMSConcMarkingTask* _task;
1360 public:
1361 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1362 MemRegion span,
1363 CMSBitMap* bit_map,
1364 OopTaskQueue* work_queue,
1365 CMSMarkStack* overflow_stack,
1366 CMSMarkStack* revisit_stack,
1367 bool should_yield);
1368 bool do_bit(size_t offset);
1369 inline void do_yield_check();
1371 private:
1372 void scan_oops_in_oop(HeapWord* ptr);
1373 void do_yield_work();
1374 bool get_work_from_overflow_stack();
1375 };
1377 // The following closures are used to do certain kinds of verification of
1378 // CMS marking.
1379 class PushAndMarkVerifyClosure: public OopClosure {
1380 CMSCollector* _collector;
1381 MemRegion _span;
1382 CMSBitMap* _verification_bm;
1383 CMSBitMap* _cms_bm;
1384 CMSMarkStack* _mark_stack;
1385 protected:
1386 void do_oop(oop p);
1387 template <class T> inline void do_oop_work(T *p) {
1388 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1389 do_oop(obj);
1390 }
1391 public:
1392 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1393 MemRegion span,
1394 CMSBitMap* verification_bm,
1395 CMSBitMap* cms_bm,
1396 CMSMarkStack* mark_stack);
1397 void do_oop(oop* p);
1398 void do_oop(narrowOop* p);
1399 // Deal with a stack overflow condition
1400 void handle_stack_overflow(HeapWord* lost);
1401 };
1403 class MarkFromRootsVerifyClosure: public BitMapClosure {
1404 CMSCollector* _collector;
1405 MemRegion _span;
1406 CMSBitMap* _verification_bm;
1407 CMSBitMap* _cms_bm;
1408 CMSMarkStack* _mark_stack;
1409 HeapWord* _finger;
1410 PushAndMarkVerifyClosure _pam_verify_closure;
1411 public:
1412 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1413 CMSBitMap* verification_bm,
1414 CMSBitMap* cms_bm,
1415 CMSMarkStack* mark_stack);
1416 bool do_bit(size_t offset);
1417 void reset(HeapWord* addr);
1418 };
1421 // This closure is used to check that a certain set of bits is
1422 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1423 class FalseBitMapClosure: public BitMapClosure {
1424 public:
1425 bool do_bit(size_t offset) {
1426 guarantee(false, "Should not have a 1 bit");
1427 return true;
1428 }
1429 };
1431 // This closure is used during the second checkpointing phase
1432 // to rescan the marked objects on the dirty cards in the mod
1433 // union table and the card table proper. It's invoked via
1434 // MarkFromDirtyCardsClosure below. It uses either
1435 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1436 // declared in genOopClosures.hpp to accomplish some of its work.
1437 // In the parallel case the bitMap is shared, so access to
1438 // it needs to be suitably synchronized for updates by embedded
1439 // closures that update it; however, this closure itself only
1440 // reads the bit_map and because it is idempotent, is immune to
1441 // reading stale values.
1442 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1443 #ifdef ASSERT
1444 CMSCollector* _collector;
1445 MemRegion _span;
1446 union {
1447 CMSMarkStack* _mark_stack;
1448 OopTaskQueue* _work_queue;
1449 };
1450 #endif // ASSERT
1451 bool _parallel;
1452 CMSBitMap* _bit_map;
1453 union {
1454 MarkRefsIntoAndScanClosure* _scan_closure;
1455 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1456 };
1458 public:
1459 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1460 MemRegion span,
1461 ReferenceProcessor* rp,
1462 CMSBitMap* bit_map,
1463 CMSMarkStack* mark_stack,
1464 CMSMarkStack* revisit_stack,
1465 MarkRefsIntoAndScanClosure* cl):
1466 #ifdef ASSERT
1467 _collector(collector),
1468 _span(span),
1469 _mark_stack(mark_stack),
1470 #endif // ASSERT
1471 _parallel(false),
1472 _bit_map(bit_map),
1473 _scan_closure(cl) { }
1475 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1476 MemRegion span,
1477 ReferenceProcessor* rp,
1478 CMSBitMap* bit_map,
1479 OopTaskQueue* work_queue,
1480 CMSMarkStack* revisit_stack,
1481 Par_MarkRefsIntoAndScanClosure* cl):
1482 #ifdef ASSERT
1483 _collector(collector),
1484 _span(span),
1485 _work_queue(work_queue),
1486 #endif // ASSERT
1487 _parallel(true),
1488 _bit_map(bit_map),
1489 _par_scan_closure(cl) { }
1491 void do_object(oop obj) {
1492 guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1493 }
1494 bool do_object_b(oop obj) {
1495 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1496 return false;
1497 }
1498 bool do_object_bm(oop p, MemRegion mr);
1499 };
1501 // This closure is used during the second checkpointing phase
1502 // to rescan the marked objects on the dirty cards in the mod
1503 // union table and the card table proper. It invokes
1504 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1505 // In the parallel case, the bit map is shared and requires
1506 // synchronized access.
1507 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1508 CompactibleFreeListSpace* _space;
1509 ScanMarkedObjectsAgainClosure _scan_cl;
1510 size_t _num_dirty_cards;
1512 public:
1513 MarkFromDirtyCardsClosure(CMSCollector* collector,
1514 MemRegion span,
1515 CompactibleFreeListSpace* space,
1516 CMSBitMap* bit_map,
1517 CMSMarkStack* mark_stack,
1518 CMSMarkStack* revisit_stack,
1519 MarkRefsIntoAndScanClosure* cl):
1520 _space(space),
1521 _num_dirty_cards(0),
1522 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1523 mark_stack, revisit_stack, cl) { }
1525 MarkFromDirtyCardsClosure(CMSCollector* collector,
1526 MemRegion span,
1527 CompactibleFreeListSpace* space,
1528 CMSBitMap* bit_map,
1529 OopTaskQueue* work_queue,
1530 CMSMarkStack* revisit_stack,
1531 Par_MarkRefsIntoAndScanClosure* cl):
1532 _space(space),
1533 _num_dirty_cards(0),
1534 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1535 work_queue, revisit_stack, cl) { }
1537 void do_MemRegion(MemRegion mr);
1538 void set_space(CompactibleFreeListSpace* space) { _space = space; }
1539 size_t num_dirty_cards() { return _num_dirty_cards; }
1540 };
1542 // This closure is used in the non-product build to check
1543 // that there are no MemRegions with a certain property.
1544 class FalseMemRegionClosure: public MemRegionClosure {
1545 void do_MemRegion(MemRegion mr) {
1546 guarantee(!mr.is_empty(), "Shouldn't be empty");
1547 guarantee(false, "Should never be here");
1548 }
1549 };
1551 // This closure is used during the precleaning phase
1552 // to "carefully" rescan marked objects on dirty cards.
1553 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1554 // to accomplish some of its work.
1555 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1556 CMSCollector* _collector;
1557 MemRegion _span;
1558 bool _yield;
1559 Mutex* _freelistLock;
1560 CMSBitMap* _bitMap;
1561 CMSMarkStack* _markStack;
1562 MarkRefsIntoAndScanClosure* _scanningClosure;
1564 public:
1565 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1566 MemRegion span,
1567 CMSBitMap* bitMap,
1568 CMSMarkStack* markStack,
1569 CMSMarkStack* revisitStack,
1570 MarkRefsIntoAndScanClosure* cl,
1571 bool should_yield):
1572 _collector(collector),
1573 _span(span),
1574 _yield(should_yield),
1575 _bitMap(bitMap),
1576 _markStack(markStack),
1577 _scanningClosure(cl) {
1578 }
1580 void do_object(oop p) {
1581 guarantee(false, "call do_object_careful instead");
1582 }
1584 size_t do_object_careful(oop p) {
1585 guarantee(false, "Unexpected caller");
1586 return 0;
1587 }
1589 size_t do_object_careful_m(oop p, MemRegion mr);
1591 void setFreelistLock(Mutex* m) {
1592 _freelistLock = m;
1593 _scanningClosure->set_freelistLock(m);
1594 }
1596 private:
1597 inline bool do_yield_check();
1599 void do_yield_work();
1600 };
1602 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1603 CMSCollector* _collector;
1604 MemRegion _span;
1605 bool _yield;
1606 CMSBitMap* _bit_map;
1607 CMSMarkStack* _mark_stack;
1608 PushAndMarkClosure* _scanning_closure;
1609 unsigned int _before_count;
1611 public:
1612 SurvivorSpacePrecleanClosure(CMSCollector* collector,
1613 MemRegion span,
1614 CMSBitMap* bit_map,
1615 CMSMarkStack* mark_stack,
1616 PushAndMarkClosure* cl,
1617 unsigned int before_count,
1618 bool should_yield):
1619 _collector(collector),
1620 _span(span),
1621 _yield(should_yield),
1622 _bit_map(bit_map),
1623 _mark_stack(mark_stack),
1624 _scanning_closure(cl),
1625 _before_count(before_count)
1626 { }
1628 void do_object(oop p) {
1629 guarantee(false, "call do_object_careful instead");
1630 }
1632 size_t do_object_careful(oop p);
1634 size_t do_object_careful_m(oop p, MemRegion mr) {
1635 guarantee(false, "Unexpected caller");
1636 return 0;
1637 }
1639 private:
1640 inline void do_yield_check();
1641 void do_yield_work();
1642 };
1644 // This closure is used to accomplish the sweeping work
1645 // after the second checkpoint but before the concurrent reset
1646 // phase.
1647 //
1648 // Terminology
1649 // left hand chunk (LHC) - block of one or more chunks currently being
1650 // coalesced. The LHC is available for coalescing with a new chunk.
1651 // right hand chunk (RHC) - block that is currently being swept that is
1652 // free or garbage that can be coalesced with the LHC.
1653 // _inFreeRange is true if there is currently a LHC
1654 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1655 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1656 // _freeFinger is the address of the current LHC
1657 class SweepClosure: public BlkClosureCareful {
1658 CMSCollector* _collector; // collector doing the work
1659 ConcurrentMarkSweepGeneration* _g; // Generation being swept
1660 CompactibleFreeListSpace* _sp; // Space being swept
1661 HeapWord* _limit;
1662 Mutex* _freelistLock; // Free list lock (in space)
1663 CMSBitMap* _bitMap; // Marking bit map (in
1664 // generation)
1665 bool _inFreeRange; // Indicates if we are in the
1666 // midst of a free run
1667 bool _freeRangeInFreeLists;
1668 // Often, we have just found
1669 // a free chunk and started
1670 // a new free range; we do not
1671 // eagerly remove this chunk from
1672 // the free lists unless there is
1673 // a possibility of coalescing.
1674 // When true, this flag indicates
1675 // that the _freeFinger below
1676 // points to a potentially free chunk
1677 // that may still be in the free lists
1678 bool _lastFreeRangeCoalesced;
1679 // free range contains chunks
1680 // coalesced
1681 bool _yield;
1682 // Whether sweeping should be
1683 // done with yields. For instance
1684 // when done by the foreground
1685 // collector we shouldn't yield.
1686 HeapWord* _freeFinger; // When _inFreeRange is set, the
1687 // pointer to the "left hand
1688 // chunk"
1689 size_t _freeRangeSize;
1690 // When _inFreeRange is set, this
1691 // indicates the accumulated size
1692 // of the "left hand chunk"
1693 NOT_PRODUCT(
1694 size_t _numObjectsFreed;
1695 size_t _numWordsFreed;
1696 size_t _numObjectsLive;
1697 size_t _numWordsLive;
1698 size_t _numObjectsAlreadyFree;
1699 size_t _numWordsAlreadyFree;
1700 FreeChunk* _last_fc;
1701 )
1702 private:
1703 // Code that is common to a free chunk or garbage when
1704 // encountered during sweeping.
1705 void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
1706 size_t chunkSize);
1707 // Process a free chunk during sweeping.
1708 void doAlreadyFreeChunk(FreeChunk *fc);
1709 // Process a garbage chunk during sweeping.
1710 size_t doGarbageChunk(FreeChunk *fc);
1711 // Process a live chunk during sweeping.
1712 size_t doLiveChunk(FreeChunk* fc);
1714 // Accessors.
1715 HeapWord* freeFinger() const { return _freeFinger; }
1716 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1717 size_t freeRangeSize() const { return _freeRangeSize; }
1718 void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
1719 bool inFreeRange() const { return _inFreeRange; }
1720 void set_inFreeRange(bool v) { _inFreeRange = v; }
1721 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1722 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1723 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1724 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1726 // Initialize a free range.
1727 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1728 // Return this chunk to the free lists.
1729 void flushCurFreeChunk(HeapWord* chunk, size_t size);
1731 // Check if we should yield and do so when necessary.
1732 inline void do_yield_check(HeapWord* addr);
1734 // Yield
1735 void do_yield_work(HeapWord* addr);
1737 // Debugging/Printing
1738 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1740 public:
1741 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1742 CMSBitMap* bitMap, bool should_yield);
1743 ~SweepClosure();
1745 size_t do_blk_careful(HeapWord* addr);
1746 };
1748 // Closures related to weak references processing
1750 // During CMS' weak reference processing, this is a
1751 // work-routine/closure used to complete transitive
1752 // marking of objects as live after a certain point
1753 // in which an initial set has been completely accumulated.
1754 // This closure is currently used both during the final
1755 // remark stop-world phase, as well as during the concurrent
1756 // precleaning of the discovered reference lists.
1757 class CMSDrainMarkingStackClosure: public VoidClosure {
1758 CMSCollector* _collector;
1759 MemRegion _span;
1760 CMSMarkStack* _mark_stack;
1761 CMSBitMap* _bit_map;
1762 CMSKeepAliveClosure* _keep_alive;
1763 bool _concurrent_precleaning;
1764 public:
1765 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1766 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1767 CMSKeepAliveClosure* keep_alive,
1768 bool cpc):
1769 _collector(collector),
1770 _span(span),
1771 _bit_map(bit_map),
1772 _mark_stack(mark_stack),
1773 _keep_alive(keep_alive),
1774 _concurrent_precleaning(cpc) {
1775 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1776 "Mismatch");
1777 }
1779 void do_void();
1780 };
1782 // A parallel version of CMSDrainMarkingStackClosure above.
1783 class CMSParDrainMarkingStackClosure: public VoidClosure {
1784 CMSCollector* _collector;
1785 MemRegion _span;
1786 OopTaskQueue* _work_queue;
1787 CMSBitMap* _bit_map;
1788 CMSInnerParMarkAndPushClosure _mark_and_push;
1790 public:
1791 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1792 MemRegion span, CMSBitMap* bit_map,
1793 CMSMarkStack* revisit_stack,
1794 OopTaskQueue* work_queue):
1795 _collector(collector),
1796 _span(span),
1797 _bit_map(bit_map),
1798 _work_queue(work_queue),
1799 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
1801 public:
1802 void trim_queue(uint max);
1803 void do_void();
1804 };
1806 // Allow yielding or short-circuiting of reference list
1807 // prelceaning work.
1808 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1809 CMSCollector* _collector;
1810 void do_yield_work();
1811 public:
1812 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1813 _collector(collector) {}
1814 virtual bool should_return();
1815 };
1818 // Convenience class that locks free list locks for given CMS collector
1819 class FreelistLocker: public StackObj {
1820 private:
1821 CMSCollector* _collector;
1822 public:
1823 FreelistLocker(CMSCollector* collector):
1824 _collector(collector) {
1825 _collector->getFreelistLocks();
1826 }
1828 ~FreelistLocker() {
1829 _collector->releaseFreelistLocks();
1830 }
1831 };
1833 // Mark all dead objects in a given space.
1834 class MarkDeadObjectsClosure: public BlkClosure {
1835 const CMSCollector* _collector;
1836 const CompactibleFreeListSpace* _sp;
1837 CMSBitMap* _live_bit_map;
1838 CMSBitMap* _dead_bit_map;
1839 public:
1840 MarkDeadObjectsClosure(const CMSCollector* collector,
1841 const CompactibleFreeListSpace* sp,
1842 CMSBitMap *live_bit_map,
1843 CMSBitMap *dead_bit_map) :
1844 _collector(collector),
1845 _sp(sp),
1846 _live_bit_map(live_bit_map),
1847 _dead_bit_map(dead_bit_map) {}
1848 size_t do_blk(HeapWord* addr);
1849 };