Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
28 #include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
29 #include "gc_implementation/shared/gSpaceCounters.hpp"
30 #include "gc_implementation/shared/gcStats.hpp"
31 #include "gc_implementation/shared/generationCounters.hpp"
32 #include "memory/generation.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/virtualspace.hpp"
35 #include "services/memoryService.hpp"
36 #include "utilities/bitMap.inline.hpp"
37 #include "utilities/stack.inline.hpp"
38 #include "utilities/taskqueue.hpp"
39 #include "utilities/yieldingWorkgroup.hpp"
41 // ConcurrentMarkSweepGeneration is in support of a concurrent
42 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
43 // style. We assume, for now, that this generation is always the
44 // seniormost generation (modulo the PermGeneration), and for simplicity
45 // in the first implementation, that this generation is a single compactible
46 // space. Neither of these restrictions appears essential, and will be
47 // relaxed in the future when more time is available to implement the
48 // greater generality (and there's a need for it).
49 //
50 // Concurrent mode failures are currently handled by
51 // means of a sliding mark-compact.
53 class CMSAdaptiveSizePolicy;
54 class CMSConcMarkingTask;
55 class CMSGCAdaptivePolicyCounters;
56 class ConcurrentMarkSweepGeneration;
57 class ConcurrentMarkSweepPolicy;
58 class ConcurrentMarkSweepThread;
59 class CompactibleFreeListSpace;
60 class FreeChunk;
61 class PromotionInfo;
62 class ScanMarkedObjectsAgainCarefullyClosure;
64 // A generic CMS bit map. It's the basis for both the CMS marking bit map
65 // as well as for the mod union table (in each case only a subset of the
66 // methods are used). This is essentially a wrapper around the BitMap class,
67 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
68 // we have _shifter == 0. and for the mod union table we have
69 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
70 // XXX 64-bit issues in BitMap?
71 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
72 friend class VMStructs;
74 HeapWord* _bmStartWord; // base address of range covered by map
75 size_t _bmWordSize; // map size (in #HeapWords covered)
76 const int _shifter; // shifts to convert HeapWord to bit position
77 VirtualSpace _virtual_space; // underlying the bit map
78 BitMap _bm; // the bit map itself
79 public:
80 Mutex* const _lock; // mutex protecting _bm;
82 public:
83 // constructor
84 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
86 // allocates the actual storage for the map
87 bool allocate(MemRegion mr);
88 // field getter
89 Mutex* lock() const { return _lock; }
90 // locking verifier convenience function
91 void assert_locked() const PRODUCT_RETURN;
93 // inquiries
94 HeapWord* startWord() const { return _bmStartWord; }
95 size_t sizeInWords() const { return _bmWordSize; }
96 size_t sizeInBits() const { return _bm.size(); }
97 // the following is one past the last word in space
98 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
100 // reading marks
101 bool isMarked(HeapWord* addr) const;
102 bool par_isMarked(HeapWord* addr) const; // do not lock checks
103 bool isUnmarked(HeapWord* addr) const;
104 bool isAllClear() const;
106 // writing marks
107 void mark(HeapWord* addr);
108 // For marking by parallel GC threads;
109 // returns true if we did, false if another thread did
110 bool par_mark(HeapWord* addr);
112 void mark_range(MemRegion mr);
113 void par_mark_range(MemRegion mr);
114 void mark_large_range(MemRegion mr);
115 void par_mark_large_range(MemRegion mr);
116 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
117 void clear_range(MemRegion mr);
118 void par_clear_range(MemRegion mr);
119 void clear_large_range(MemRegion mr);
120 void par_clear_large_range(MemRegion mr);
121 void clear_all();
122 void clear_all_incrementally(); // Not yet implemented!!
124 NOT_PRODUCT(
125 // checks the memory region for validity
126 void region_invariant(MemRegion mr);
127 )
129 // iteration
130 void iterate(BitMapClosure* cl) {
131 _bm.iterate(cl);
132 }
133 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
134 void dirty_range_iterate_clear(MemRegionClosure* cl);
135 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
137 // auxiliary support for iteration
138 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
139 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
140 HeapWord* end_addr) const;
141 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
142 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
143 HeapWord* end_addr) const;
144 MemRegion getAndClearMarkedRegion(HeapWord* addr);
145 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
146 HeapWord* end_addr);
148 // conversion utilities
149 HeapWord* offsetToHeapWord(size_t offset) const;
150 size_t heapWordToOffset(HeapWord* addr) const;
151 size_t heapWordDiffToOffsetDiff(size_t diff) const;
153 // debugging
154 // is this address range covered by the bit-map?
155 NOT_PRODUCT(
156 bool covers(MemRegion mr) const;
157 bool covers(HeapWord* start, size_t size = 0) const;
158 )
159 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
160 };
162 // Represents a marking stack used by the CMS collector.
163 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
164 class CMSMarkStack: public CHeapObj {
165 //
166 friend class CMSCollector; // to get at expasion stats further below
167 //
169 VirtualSpace _virtual_space; // space for the stack
170 oop* _base; // bottom of stack
171 size_t _index; // one more than last occupied index
172 size_t _capacity; // max #elements
173 Mutex _par_lock; // an advisory lock used in case of parallel access
174 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
176 protected:
177 size_t _hit_limit; // we hit max stack size limit
178 size_t _failed_double; // we failed expansion before hitting limit
180 public:
181 CMSMarkStack():
182 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
183 _hit_limit(0),
184 _failed_double(0) {}
186 bool allocate(size_t size);
188 size_t capacity() const { return _capacity; }
190 oop pop() {
191 if (!isEmpty()) {
192 return _base[--_index] ;
193 }
194 return NULL;
195 }
197 bool push(oop ptr) {
198 if (isFull()) {
199 return false;
200 } else {
201 _base[_index++] = ptr;
202 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
203 return true;
204 }
205 }
207 bool isEmpty() const { return _index == 0; }
208 bool isFull() const {
209 assert(_index <= _capacity, "buffer overflow");
210 return _index == _capacity;
211 }
213 size_t length() { return _index; }
215 // "Parallel versions" of some of the above
216 oop par_pop() {
217 // lock and pop
218 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
219 return pop();
220 }
222 bool par_push(oop ptr) {
223 // lock and push
224 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
225 return push(ptr);
226 }
228 // Forcibly reset the stack, losing all of its contents.
229 void reset() {
230 _index = 0;
231 }
233 // Expand the stack, typically in response to an overflow condition
234 void expand();
236 // Compute the least valued stack element.
237 oop least_value(HeapWord* low) {
238 oop least = (oop)low;
239 for (size_t i = 0; i < _index; i++) {
240 least = MIN2(least, _base[i]);
241 }
242 return least;
243 }
245 // Exposed here to allow stack expansion in || case
246 Mutex* par_lock() { return &_par_lock; }
247 };
249 class CardTableRS;
250 class CMSParGCThreadState;
252 class ModUnionClosure: public MemRegionClosure {
253 protected:
254 CMSBitMap* _t;
255 public:
256 ModUnionClosure(CMSBitMap* t): _t(t) { }
257 void do_MemRegion(MemRegion mr);
258 };
260 class ModUnionClosurePar: public ModUnionClosure {
261 public:
262 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
263 void do_MemRegion(MemRegion mr);
264 };
266 // Survivor Chunk Array in support of parallelization of
267 // Survivor Space rescan.
268 class ChunkArray: public CHeapObj {
269 size_t _index;
270 size_t _capacity;
271 size_t _overflows;
272 HeapWord** _array; // storage for array
274 public:
275 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
276 ChunkArray(HeapWord** a, size_t c):
277 _index(0), _capacity(c), _overflows(0), _array(a) {}
279 HeapWord** array() { return _array; }
280 void set_array(HeapWord** a) { _array = a; }
282 size_t capacity() { return _capacity; }
283 void set_capacity(size_t c) { _capacity = c; }
285 size_t end() {
286 assert(_index <= capacity(),
287 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
288 _index, _capacity));
289 return _index;
290 } // exclusive
292 HeapWord* nth(size_t n) {
293 assert(n < end(), "Out of bounds access");
294 return _array[n];
295 }
297 void reset() {
298 _index = 0;
299 if (_overflows > 0 && PrintCMSStatistics > 1) {
300 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
301 _capacity, _overflows);
302 }
303 _overflows = 0;
304 }
306 void record_sample(HeapWord* p, size_t sz) {
307 // For now we do not do anything with the size
308 if (_index < _capacity) {
309 _array[_index++] = p;
310 } else {
311 ++_overflows;
312 assert(_index == _capacity,
313 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
314 "): out of bounds at overflow#" SIZE_FORMAT,
315 _index, _capacity, _overflows));
316 }
317 }
318 };
320 //
321 // Timing, allocation and promotion statistics for gc scheduling and incremental
322 // mode pacing. Most statistics are exponential averages.
323 //
324 class CMSStats VALUE_OBJ_CLASS_SPEC {
325 private:
326 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
328 // The following are exponential averages with factor alpha:
329 // avg = (100 - alpha) * avg + alpha * cur_sample
330 //
331 // The durations measure: end_time[n] - start_time[n]
332 // The periods measure: start_time[n] - start_time[n-1]
333 //
334 // The cms period and duration include only concurrent collections; time spent
335 // in foreground cms collections due to System.gc() or because of a failure to
336 // keep up are not included.
337 //
338 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
339 // real value, but is used only after the first period. A value of 100 is
340 // used for the first sample so it gets the entire weight.
341 unsigned int _saved_alpha; // 0-100
342 unsigned int _gc0_alpha;
343 unsigned int _cms_alpha;
345 double _gc0_duration;
346 double _gc0_period;
347 size_t _gc0_promoted; // bytes promoted per gc0
348 double _cms_duration;
349 double _cms_duration_pre_sweep; // time from initiation to start of sweep
350 double _cms_duration_per_mb;
351 double _cms_period;
352 size_t _cms_allocated; // bytes of direct allocation per gc0 period
354 // Timers.
355 elapsedTimer _cms_timer;
356 TimeStamp _gc0_begin_time;
357 TimeStamp _cms_begin_time;
358 TimeStamp _cms_end_time;
360 // Snapshots of the amount used in the CMS generation.
361 size_t _cms_used_at_gc0_begin;
362 size_t _cms_used_at_gc0_end;
363 size_t _cms_used_at_cms_begin;
365 // Used to prevent the duty cycle from being reduced in the middle of a cms
366 // cycle.
367 bool _allow_duty_cycle_reduction;
369 enum {
370 _GC0_VALID = 0x1,
371 _CMS_VALID = 0x2,
372 _ALL_VALID = _GC0_VALID | _CMS_VALID
373 };
375 unsigned int _valid_bits;
377 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
379 protected:
381 // Return a duty cycle that avoids wild oscillations, by limiting the amount
382 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
383 // as a recommended value).
384 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
385 unsigned int new_duty_cycle);
386 unsigned int icms_update_duty_cycle_impl();
388 // In support of adjusting of cms trigger ratios based on history
389 // of concurrent mode failure.
390 double cms_free_adjustment_factor(size_t free) const;
391 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
393 public:
394 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
395 unsigned int alpha = CMSExpAvgFactor);
397 // Whether or not the statistics contain valid data; higher level statistics
398 // cannot be called until this returns true (they require at least one young
399 // gen and one cms cycle to have completed).
400 bool valid() const;
402 // Record statistics.
403 void record_gc0_begin();
404 void record_gc0_end(size_t cms_gen_bytes_used);
405 void record_cms_begin();
406 void record_cms_end();
408 // Allow management of the cms timer, which must be stopped/started around
409 // yield points.
410 elapsedTimer& cms_timer() { return _cms_timer; }
411 void start_cms_timer() { _cms_timer.start(); }
412 void stop_cms_timer() { _cms_timer.stop(); }
414 // Basic statistics; units are seconds or bytes.
415 double gc0_period() const { return _gc0_period; }
416 double gc0_duration() const { return _gc0_duration; }
417 size_t gc0_promoted() const { return _gc0_promoted; }
418 double cms_period() const { return _cms_period; }
419 double cms_duration() const { return _cms_duration; }
420 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
421 size_t cms_allocated() const { return _cms_allocated; }
423 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
425 // Seconds since the last background cms cycle began or ended.
426 double cms_time_since_begin() const;
427 double cms_time_since_end() const;
429 // Higher level statistics--caller must check that valid() returns true before
430 // calling.
432 // Returns bytes promoted per second of wall clock time.
433 double promotion_rate() const;
435 // Returns bytes directly allocated per second of wall clock time.
436 double cms_allocation_rate() const;
438 // Rate at which space in the cms generation is being consumed (sum of the
439 // above two).
440 double cms_consumption_rate() const;
442 // Returns an estimate of the number of seconds until the cms generation will
443 // fill up, assuming no collection work is done.
444 double time_until_cms_gen_full() const;
446 // Returns an estimate of the number of seconds remaining until
447 // the cms generation collection should start.
448 double time_until_cms_start() const;
450 // End of higher level statistics.
452 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
453 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
455 // Update the duty cycle and return the new value.
456 unsigned int icms_update_duty_cycle();
458 // Debugging.
459 void print_on(outputStream* st) const PRODUCT_RETURN;
460 void print() const { print_on(gclog_or_tty); }
461 };
463 // A closure related to weak references processing which
464 // we embed in the CMSCollector, since we need to pass
465 // it to the reference processor for secondary filtering
466 // of references based on reachability of referent;
467 // see role of _is_alive_non_header closure in the
468 // ReferenceProcessor class.
469 // For objects in the CMS generation, this closure checks
470 // if the object is "live" (reachable). Used in weak
471 // reference processing.
472 class CMSIsAliveClosure: public BoolObjectClosure {
473 const MemRegion _span;
474 const CMSBitMap* _bit_map;
476 friend class CMSCollector;
477 public:
478 CMSIsAliveClosure(MemRegion span,
479 CMSBitMap* bit_map):
480 _span(span),
481 _bit_map(bit_map) {
482 assert(!span.is_empty(), "Empty span could spell trouble");
483 }
485 void do_object(oop obj) {
486 assert(false, "not to be invoked");
487 }
489 bool do_object_b(oop obj);
490 };
493 // Implements AbstractRefProcTaskExecutor for CMS.
494 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
495 public:
497 CMSRefProcTaskExecutor(CMSCollector& collector)
498 : _collector(collector)
499 { }
501 // Executes a task using worker threads.
502 virtual void execute(ProcessTask& task);
503 virtual void execute(EnqueueTask& task);
504 private:
505 CMSCollector& _collector;
506 };
509 class CMSCollector: public CHeapObj {
510 friend class VMStructs;
511 friend class ConcurrentMarkSweepThread;
512 friend class ConcurrentMarkSweepGeneration;
513 friend class CompactibleFreeListSpace;
514 friend class CMSParRemarkTask;
515 friend class CMSConcMarkingTask;
516 friend class CMSRefProcTaskProxy;
517 friend class CMSRefProcTaskExecutor;
518 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
519 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
520 friend class PushOrMarkClosure; // to access _restart_addr
521 friend class Par_PushOrMarkClosure; // to access _restart_addr
522 friend class MarkFromRootsClosure; // -- ditto --
523 // ... and for clearing cards
524 friend class Par_MarkFromRootsClosure; // to access _restart_addr
525 // ... and for clearing cards
526 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
527 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
528 friend class PushAndMarkVerifyClosure; // -- ditto --
529 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
530 friend class PushAndMarkClosure; // -- ditto --
531 friend class Par_PushAndMarkClosure; // -- ditto --
532 friend class CMSKeepAliveClosure; // -- ditto --
533 friend class CMSDrainMarkingStackClosure; // -- ditto --
534 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
535 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
536 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
537 friend class VM_CMS_Operation;
538 friend class VM_CMS_Initial_Mark;
539 friend class VM_CMS_Final_Remark;
540 friend class TraceCMSMemoryManagerStats;
542 private:
543 jlong _time_of_last_gc;
544 void update_time_of_last_gc(jlong now) {
545 _time_of_last_gc = now;
546 }
548 OopTaskQueueSet* _task_queues;
550 // Overflow list of grey objects, threaded through mark-word
551 // Manipulated with CAS in the parallel/multi-threaded case.
552 oop _overflow_list;
553 // The following array-pair keeps track of mark words
554 // displaced for accomodating overflow list above.
555 // This code will likely be revisited under RFE#4922830.
556 Stack<oop> _preserved_oop_stack;
557 Stack<markOop> _preserved_mark_stack;
559 int* _hash_seed;
561 // In support of multi-threaded concurrent phases
562 YieldingFlexibleWorkGang* _conc_workers;
564 // Performance Counters
565 CollectorCounters* _gc_counters;
567 // Initialization Errors
568 bool _completed_initialization;
570 // In support of ExplicitGCInvokesConcurrent
571 static bool _full_gc_requested;
572 unsigned int _collection_count_start;
574 // Should we unload classes this concurrent cycle?
575 bool _should_unload_classes;
576 unsigned int _concurrent_cycles_since_last_unload;
577 unsigned int concurrent_cycles_since_last_unload() const {
578 return _concurrent_cycles_since_last_unload;
579 }
580 // Did we (allow) unload classes in the previous concurrent cycle?
581 bool unloaded_classes_last_cycle() const {
582 return concurrent_cycles_since_last_unload() == 0;
583 }
584 // Root scanning options for perm gen
585 int _roots_scanning_options;
586 int roots_scanning_options() const { return _roots_scanning_options; }
587 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
588 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
590 // Verification support
591 CMSBitMap _verification_mark_bm;
592 void verify_after_remark_work_1();
593 void verify_after_remark_work_2();
595 // true if any verification flag is on.
596 bool _verifying;
597 bool verifying() const { return _verifying; }
598 void set_verifying(bool v) { _verifying = v; }
600 // Collector policy
601 ConcurrentMarkSweepPolicy* _collector_policy;
602 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
604 // XXX Move these to CMSStats ??? FIX ME !!!
605 elapsedTimer _inter_sweep_timer; // time between sweeps
606 elapsedTimer _intra_sweep_timer; // time _in_ sweeps
607 // padded decaying average estimates of the above
608 AdaptivePaddedAverage _inter_sweep_estimate;
609 AdaptivePaddedAverage _intra_sweep_estimate;
611 protected:
612 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
613 ConcurrentMarkSweepGeneration* _permGen; // perm gen
614 MemRegion _span; // span covering above two
615 CardTableRS* _ct; // card table
617 // CMS marking support structures
618 CMSBitMap _markBitMap;
619 CMSBitMap _modUnionTable;
620 CMSMarkStack _markStack;
621 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects
622 // to revisit
623 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
625 HeapWord* _restart_addr; // in support of marking stack overflow
626 void lower_restart_addr(HeapWord* low);
628 // Counters in support of marking stack / work queue overflow handling:
629 // a non-zero value indicates certain types of overflow events during
630 // the current CMS cycle and could lead to stack resizing efforts at
631 // an opportune future time.
632 size_t _ser_pmc_preclean_ovflw;
633 size_t _ser_pmc_remark_ovflw;
634 size_t _par_pmc_remark_ovflw;
635 size_t _ser_kac_preclean_ovflw;
636 size_t _ser_kac_ovflw;
637 size_t _par_kac_ovflw;
638 NOT_PRODUCT(ssize_t _num_par_pushes;)
640 // ("Weak") Reference processing support
641 ReferenceProcessor* _ref_processor;
642 CMSIsAliveClosure _is_alive_closure;
643 // keep this textually after _markBitMap and _span; c'tor dependency
645 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
646 ModUnionClosure _modUnionClosure;
647 ModUnionClosurePar _modUnionClosurePar;
649 // CMS abstract state machine
650 // initial_state: Idling
651 // next_state(Idling) = {Marking}
652 // next_state(Marking) = {Precleaning, Sweeping}
653 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
654 // next_state(AbortablePreclean) = {FinalMarking}
655 // next_state(FinalMarking) = {Sweeping}
656 // next_state(Sweeping) = {Resizing}
657 // next_state(Resizing) = {Resetting}
658 // next_state(Resetting) = {Idling}
659 // The numeric values below are chosen so that:
660 // . _collectorState <= Idling == post-sweep && pre-mark
661 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
662 // precleaning || abortablePrecleanb
663 public:
664 enum CollectorState {
665 Resizing = 0,
666 Resetting = 1,
667 Idling = 2,
668 InitialMarking = 3,
669 Marking = 4,
670 Precleaning = 5,
671 AbortablePreclean = 6,
672 FinalMarking = 7,
673 Sweeping = 8
674 };
675 protected:
676 static CollectorState _collectorState;
678 // State related to prologue/epilogue invocation for my generations
679 bool _between_prologue_and_epilogue;
681 // Signalling/State related to coordination between fore- and backgroud GC
682 // Note: When the baton has been passed from background GC to foreground GC,
683 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
684 static bool _foregroundGCIsActive; // true iff foreground collector is active or
685 // wants to go active
686 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
687 // yet passed the baton to the foreground GC
689 // Support for CMSScheduleRemark (abortable preclean)
690 bool _abort_preclean;
691 bool _start_sampling;
693 int _numYields;
694 size_t _numDirtyCards;
695 size_t _sweep_count;
696 // number of full gc's since the last concurrent gc.
697 uint _full_gcs_since_conc_gc;
699 // occupancy used for bootstrapping stats
700 double _bootstrap_occupancy;
702 // timer
703 elapsedTimer _timer;
705 // Timing, allocation and promotion statistics, used for scheduling.
706 CMSStats _stats;
708 // Allocation limits installed in the young gen, used only in
709 // CMSIncrementalMode. When an allocation in the young gen would cross one of
710 // these limits, the cms generation is notified and the cms thread is started
711 // or stopped, respectively.
712 HeapWord* _icms_start_limit;
713 HeapWord* _icms_stop_limit;
715 enum CMS_op_type {
716 CMS_op_checkpointRootsInitial,
717 CMS_op_checkpointRootsFinal
718 };
720 void do_CMS_operation(CMS_op_type op);
721 bool stop_world_and_do(CMS_op_type op);
723 OopTaskQueueSet* task_queues() { return _task_queues; }
724 int* hash_seed(int i) { return &_hash_seed[i]; }
725 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
727 // Support for parallelizing Eden rescan in CMS remark phase
728 void sample_eden(); // ... sample Eden space top
730 private:
731 // Support for parallelizing young gen rescan in CMS remark phase
732 Generation* _young_gen; // the younger gen
733 HeapWord** _top_addr; // ... Top of Eden
734 HeapWord** _end_addr; // ... End of Eden
735 HeapWord** _eden_chunk_array; // ... Eden partitioning array
736 size_t _eden_chunk_index; // ... top (exclusive) of array
737 size_t _eden_chunk_capacity; // ... max entries in array
739 // Support for parallelizing survivor space rescan
740 HeapWord** _survivor_chunk_array;
741 size_t _survivor_chunk_index;
742 size_t _survivor_chunk_capacity;
743 size_t* _cursor;
744 ChunkArray* _survivor_plab_array;
746 // Support for marking stack overflow handling
747 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
748 bool par_take_from_overflow_list(size_t num,
749 OopTaskQueue* to_work_q,
750 int no_of_gc_threads);
751 void push_on_overflow_list(oop p);
752 void par_push_on_overflow_list(oop p);
753 // the following is, obviously, not, in general, "MT-stable"
754 bool overflow_list_is_empty() const;
756 void preserve_mark_if_necessary(oop p);
757 void par_preserve_mark_if_necessary(oop p);
758 void preserve_mark_work(oop p, markOop m);
759 void restore_preserved_marks_if_any();
760 NOT_PRODUCT(bool no_preserved_marks() const;)
761 // in support of testing overflow code
762 NOT_PRODUCT(int _overflow_counter;)
763 NOT_PRODUCT(bool simulate_overflow();) // sequential
764 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
766 // CMS work methods
767 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
769 // a return value of false indicates failure due to stack overflow
770 bool markFromRootsWork(bool asynch); // concurrent marking work
772 public: // FIX ME!!! only for testing
773 bool do_marking_st(bool asynch); // single-threaded marking
774 bool do_marking_mt(bool asynch); // multi-threaded marking
776 private:
778 // concurrent precleaning work
779 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
780 ScanMarkedObjectsAgainCarefullyClosure* cl);
781 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
782 ScanMarkedObjectsAgainCarefullyClosure* cl);
783 // Does precleaning work, returning a quantity indicative of
784 // the amount of "useful work" done.
785 size_t preclean_work(bool clean_refs, bool clean_survivors);
786 void abortable_preclean(); // Preclean while looking for possible abort
787 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
788 // Helper function for above; merge-sorts the per-thread plab samples
789 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
790 // Resets (i.e. clears) the per-thread plab sample vectors
791 void reset_survivor_plab_arrays();
793 // final (second) checkpoint work
794 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
795 bool init_mark_was_synchronous);
796 // work routine for parallel version of remark
797 void do_remark_parallel();
798 // work routine for non-parallel version of remark
799 void do_remark_non_parallel();
800 // reference processing work routine (during second checkpoint)
801 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
803 // concurrent sweeping work
804 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
806 // (concurrent) resetting of support data structures
807 void reset(bool asynch);
809 // Clear _expansion_cause fields of constituent generations
810 void clear_expansion_cause();
812 // An auxilliary method used to record the ends of
813 // used regions of each generation to limit the extent of sweep
814 void save_sweep_limits();
816 // Resize the generations included in the collector.
817 void compute_new_size();
819 // A work method used by foreground collection to determine
820 // what type of collection (compacting or not, continuing or fresh)
821 // it should do.
822 void decide_foreground_collection_type(bool clear_all_soft_refs,
823 bool* should_compact, bool* should_start_over);
825 // A work method used by the foreground collector to do
826 // a mark-sweep-compact.
827 void do_compaction_work(bool clear_all_soft_refs);
829 // A work method used by the foreground collector to do
830 // a mark-sweep, after taking over from a possibly on-going
831 // concurrent mark-sweep collection.
832 void do_mark_sweep_work(bool clear_all_soft_refs,
833 CollectorState first_state, bool should_start_over);
835 // If the backgrould GC is active, acquire control from the background
836 // GC and do the collection.
837 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
839 // For synchronizing passing of control from background to foreground
840 // GC. waitForForegroundGC() is called by the background
841 // collector. It if had to wait for a foreground collection,
842 // it returns true and the background collection should assume
843 // that the collection was finished by the foreground
844 // collector.
845 bool waitForForegroundGC();
847 // Incremental mode triggering: recompute the icms duty cycle and set the
848 // allocation limits in the young gen.
849 void icms_update_allocation_limits();
851 size_t block_size_using_printezis_bits(HeapWord* addr) const;
852 size_t block_size_if_printezis_bits(HeapWord* addr) const;
853 HeapWord* next_card_start_after_block(HeapWord* addr) const;
855 void setup_cms_unloading_and_verification_state();
856 public:
857 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
858 ConcurrentMarkSweepGeneration* permGen,
859 CardTableRS* ct,
860 ConcurrentMarkSweepPolicy* cp);
861 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
863 ReferenceProcessor* ref_processor() { return _ref_processor; }
864 void ref_processor_init();
866 Mutex* bitMapLock() const { return _markBitMap.lock(); }
867 static CollectorState abstract_state() { return _collectorState; }
869 bool should_abort_preclean() const; // Whether preclean should be aborted.
870 size_t get_eden_used() const;
871 size_t get_eden_capacity() const;
873 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
875 // locking checks
876 NOT_PRODUCT(static bool have_cms_token();)
878 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
879 bool shouldConcurrentCollect();
881 void collect(bool full,
882 bool clear_all_soft_refs,
883 size_t size,
884 bool tlab);
885 void collect_in_background(bool clear_all_soft_refs);
886 void collect_in_foreground(bool clear_all_soft_refs);
888 // In support of ExplicitGCInvokesConcurrent
889 static void request_full_gc(unsigned int full_gc_count);
890 // Should we unload classes in a particular concurrent cycle?
891 bool should_unload_classes() const {
892 return _should_unload_classes;
893 }
894 bool update_should_unload_classes();
896 void direct_allocated(HeapWord* start, size_t size);
898 // Object is dead if not marked and current phase is sweeping.
899 bool is_dead_obj(oop obj) const;
901 // After a promotion (of "start"), do any necessary marking.
902 // If "par", then it's being done by a parallel GC thread.
903 // The last two args indicate if we need precise marking
904 // and if so the size of the object so it can be dirtied
905 // in its entirety.
906 void promoted(bool par, HeapWord* start,
907 bool is_obj_array, size_t obj_size);
909 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
910 size_t word_size);
912 void getFreelistLocks() const;
913 void releaseFreelistLocks() const;
914 bool haveFreelistLocks() const;
916 // GC prologue and epilogue
917 void gc_prologue(bool full);
918 void gc_epilogue(bool full);
920 jlong time_of_last_gc(jlong now) {
921 if (_collectorState <= Idling) {
922 // gc not in progress
923 return _time_of_last_gc;
924 } else {
925 // collection in progress
926 return now;
927 }
928 }
930 // Support for parallel remark of survivor space
931 void* get_data_recorder(int thr_num);
933 CMSBitMap* markBitMap() { return &_markBitMap; }
934 void directAllocated(HeapWord* start, size_t size);
936 // main CMS steps and related support
937 void checkpointRootsInitial(bool asynch);
938 bool markFromRoots(bool asynch); // a return value of false indicates failure
939 // due to stack overflow
940 void preclean();
941 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
942 bool init_mark_was_synchronous);
943 void sweep(bool asynch);
945 // Check that the currently executing thread is the expected
946 // one (foreground collector or background collector).
947 static void check_correct_thread_executing() PRODUCT_RETURN;
948 // XXXPERM void print_statistics() PRODUCT_RETURN;
950 bool is_cms_reachable(HeapWord* addr);
952 // Performance Counter Support
953 CollectorCounters* counters() { return _gc_counters; }
955 // timer stuff
956 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
957 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
958 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
959 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
961 int yields() { return _numYields; }
962 void resetYields() { _numYields = 0; }
963 void incrementYields() { _numYields++; }
964 void resetNumDirtyCards() { _numDirtyCards = 0; }
965 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
966 size_t numDirtyCards() { return _numDirtyCards; }
968 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
969 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
970 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
971 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
972 size_t sweep_count() const { return _sweep_count; }
973 void increment_sweep_count() { _sweep_count++; }
975 // Timers/stats for gc scheduling and incremental mode pacing.
976 CMSStats& stats() { return _stats; }
978 // Convenience methods that check whether CMSIncrementalMode is enabled and
979 // forward to the corresponding methods in ConcurrentMarkSweepThread.
980 static void start_icms();
981 static void stop_icms(); // Called at the end of the cms cycle.
982 static void disable_icms(); // Called before a foreground collection.
983 static void enable_icms(); // Called after a foreground collection.
984 void icms_wait(); // Called at yield points.
986 // Adaptive size policy
987 CMSAdaptiveSizePolicy* size_policy();
988 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
990 // debugging
991 void verify(bool);
992 bool verify_after_remark();
993 void verify_ok_to_terminate() const PRODUCT_RETURN;
994 void verify_work_stacks_empty() const PRODUCT_RETURN;
995 void verify_overflow_empty() const PRODUCT_RETURN;
997 // convenience methods in support of debugging
998 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
999 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1001 // accessors
1002 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1003 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1005 // Get the bit map with a perm gen "deadness" information.
1006 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; }
1008 // Initialization errors
1009 bool completed_initialization() { return _completed_initialization; }
1010 };
1012 class CMSExpansionCause : public AllStatic {
1013 public:
1014 enum Cause {
1015 _no_expansion,
1016 _satisfy_free_ratio,
1017 _satisfy_promotion,
1018 _satisfy_allocation,
1019 _allocate_par_lab,
1020 _allocate_par_spooling_space,
1021 _adaptive_size_policy
1022 };
1023 // Return a string describing the cause of the expansion.
1024 static const char* to_string(CMSExpansionCause::Cause cause);
1025 };
1027 class ConcurrentMarkSweepGeneration: public CardGeneration {
1028 friend class VMStructs;
1029 friend class ConcurrentMarkSweepThread;
1030 friend class ConcurrentMarkSweep;
1031 friend class CMSCollector;
1032 protected:
1033 static CMSCollector* _collector; // the collector that collects us
1034 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
1036 // Performance Counters
1037 GenerationCounters* _gen_counters;
1038 GSpaceCounters* _space_counters;
1040 // Words directly allocated, used by CMSStats.
1041 size_t _direct_allocated_words;
1043 // Non-product stat counters
1044 NOT_PRODUCT(
1045 size_t _numObjectsPromoted;
1046 size_t _numWordsPromoted;
1047 size_t _numObjectsAllocated;
1048 size_t _numWordsAllocated;
1049 )
1051 // Used for sizing decisions
1052 bool _incremental_collection_failed;
1053 bool incremental_collection_failed() {
1054 return _incremental_collection_failed;
1055 }
1056 void set_incremental_collection_failed() {
1057 _incremental_collection_failed = true;
1058 }
1059 void clear_incremental_collection_failed() {
1060 _incremental_collection_failed = false;
1061 }
1063 // accessors
1064 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1065 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1067 private:
1068 // For parallel young-gen GC support.
1069 CMSParGCThreadState** _par_gc_thread_states;
1071 // Reason generation was expanded
1072 CMSExpansionCause::Cause _expansion_cause;
1074 // In support of MinChunkSize being larger than min object size
1075 const double _dilatation_factor;
1077 enum CollectionTypes {
1078 Concurrent_collection_type = 0,
1079 MS_foreground_collection_type = 1,
1080 MSC_foreground_collection_type = 2,
1081 Unknown_collection_type = 3
1082 };
1084 CollectionTypes _debug_collection_type;
1086 // Fraction of current occupancy at which to start a CMS collection which
1087 // will collect this generation (at least).
1088 double _initiating_occupancy;
1090 protected:
1091 // Shrink generation by specified size (returns false if unable to shrink)
1092 virtual void shrink_by(size_t bytes);
1094 // Update statistics for GC
1095 virtual void update_gc_stats(int level, bool full);
1097 // Maximum available space in the generation (including uncommitted)
1098 // space.
1099 size_t max_available() const;
1101 // getter and initializer for _initiating_occupancy field.
1102 double initiating_occupancy() const { return _initiating_occupancy; }
1103 void init_initiating_occupancy(intx io, intx tr);
1105 public:
1106 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1107 int level, CardTableRS* ct,
1108 bool use_adaptive_freelists,
1109 FreeBlockDictionary::DictionaryChoice);
1111 // Accessors
1112 CMSCollector* collector() const { return _collector; }
1113 static void set_collector(CMSCollector* collector) {
1114 assert(_collector == NULL, "already set");
1115 _collector = collector;
1116 }
1117 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1119 Mutex* freelistLock() const;
1121 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1123 // Adaptive size policy
1124 CMSAdaptiveSizePolicy* size_policy();
1126 bool refs_discovery_is_atomic() const { return false; }
1127 bool refs_discovery_is_mt() const {
1128 // Note: CMS does MT-discovery during the parallel-remark
1129 // phases. Use ReferenceProcessorMTMutator to make refs
1130 // discovery MT-safe during such phases or other parallel
1131 // discovery phases in the future. This may all go away
1132 // if/when we decide that refs discovery is sufficiently
1133 // rare that the cost of the CAS's involved is in the
1134 // noise. That's a measurement that should be done, and
1135 // the code simplified if that turns out to be the case.
1136 return false;
1137 }
1139 // Override
1140 virtual void ref_processor_init();
1142 // Grow generation by specified size (returns false if unable to grow)
1143 bool grow_by(size_t bytes);
1144 // Grow generation to reserved size.
1145 bool grow_to_reserved();
1147 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1149 // Space enquiries
1150 size_t capacity() const;
1151 size_t used() const;
1152 size_t free() const;
1153 double occupancy() const { return ((double)used())/((double)capacity()); }
1154 size_t contiguous_available() const;
1155 size_t unsafe_max_alloc_nogc() const;
1157 // over-rides
1158 MemRegion used_region() const;
1159 MemRegion used_region_at_save_marks() const;
1161 // Does a "full" (forced) collection invoked on this generation collect
1162 // all younger generations as well? Note that the second conjunct is a
1163 // hack to allow the collection of the younger gen first if the flag is
1164 // set. This is better than using th policy's should_collect_gen0_first()
1165 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1166 virtual bool full_collects_younger_generations() const {
1167 return UseCMSCompactAtFullCollection && !CollectGen0First;
1168 }
1170 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1172 // Support for compaction
1173 CompactibleSpace* first_compaction_space() const;
1174 // Adjust quantites in the generation affected by
1175 // the compaction.
1176 void reset_after_compaction();
1178 // Allocation support
1179 HeapWord* allocate(size_t size, bool tlab);
1180 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1181 oop promote(oop obj, size_t obj_size);
1182 HeapWord* par_allocate(size_t size, bool tlab) {
1183 return allocate(size, tlab);
1184 }
1186 // Incremental mode triggering.
1187 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1188 size_t word_size);
1190 // Used by CMSStats to track direct allocation. The value is sampled and
1191 // reset after each young gen collection.
1192 size_t direct_allocated_words() const { return _direct_allocated_words; }
1193 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1195 // Overrides for parallel promotion.
1196 virtual oop par_promote(int thread_num,
1197 oop obj, markOop m, size_t word_sz);
1198 // This one should not be called for CMS.
1199 virtual void par_promote_alloc_undo(int thread_num,
1200 HeapWord* obj, size_t word_sz);
1201 virtual void par_promote_alloc_done(int thread_num);
1202 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1204 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1206 // Inform this (non-young) generation that a promotion failure was
1207 // encountered during a collection of a younger generation that
1208 // promotes into this generation.
1209 virtual void promotion_failure_occurred();
1211 bool should_collect(bool full, size_t size, bool tlab);
1212 virtual bool should_concurrent_collect() const;
1213 virtual bool is_too_full() const;
1214 void collect(bool full,
1215 bool clear_all_soft_refs,
1216 size_t size,
1217 bool tlab);
1219 HeapWord* expand_and_allocate(size_t word_size,
1220 bool tlab,
1221 bool parallel = false);
1223 // GC prologue and epilogue
1224 void gc_prologue(bool full);
1225 void gc_prologue_work(bool full, bool registerClosure,
1226 ModUnionClosure* modUnionClosure);
1227 void gc_epilogue(bool full);
1228 void gc_epilogue_work(bool full);
1230 // Time since last GC of this generation
1231 jlong time_of_last_gc(jlong now) {
1232 return collector()->time_of_last_gc(now);
1233 }
1234 void update_time_of_last_gc(jlong now) {
1235 collector()-> update_time_of_last_gc(now);
1236 }
1238 // Allocation failure
1239 void expand(size_t bytes, size_t expand_bytes,
1240 CMSExpansionCause::Cause cause);
1241 virtual bool expand(size_t bytes, size_t expand_bytes);
1242 void shrink(size_t bytes);
1243 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1244 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1246 // Iteration support and related enquiries
1247 void save_marks();
1248 bool no_allocs_since_save_marks();
1249 void object_iterate_since_last_GC(ObjectClosure* cl);
1250 void younger_refs_iterate(OopsInGenClosure* cl);
1252 // Iteration support specific to CMS generations
1253 void save_sweep_limit();
1255 // More iteration support
1256 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1257 virtual void oop_iterate(OopClosure* cl);
1258 virtual void safe_object_iterate(ObjectClosure* cl);
1259 virtual void object_iterate(ObjectClosure* cl);
1261 // Need to declare the full complement of closures, whether we'll
1262 // override them or not, or get message from the compiler:
1263 // oop_since_save_marks_iterate_nv hides virtual function...
1264 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1265 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1266 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1268 // Smart allocation XXX -- move to CFLSpace?
1269 void setNearLargestChunk();
1270 bool isNearLargestChunk(HeapWord* addr);
1272 // Get the chunk at the end of the space. Delagates to
1273 // the space.
1274 FreeChunk* find_chunk_at_end();
1276 // Overriding of unused functionality (sharing not yet supported with CMS)
1277 void pre_adjust_pointers();
1278 void post_compact();
1280 // Debugging
1281 void prepare_for_verify();
1282 void verify(bool allow_dirty);
1283 void print_statistics() PRODUCT_RETURN;
1285 // Performance Counters support
1286 virtual void update_counters();
1287 virtual void update_counters(size_t used);
1288 void initialize_performance_counters();
1289 CollectorCounters* counters() { return collector()->counters(); }
1291 // Support for parallel remark of survivor space
1292 void* get_data_recorder(int thr_num) {
1293 //Delegate to collector
1294 return collector()->get_data_recorder(thr_num);
1295 }
1297 // Printing
1298 const char* name() const;
1299 virtual const char* short_name() const { return "CMS"; }
1300 void print() const;
1301 void printOccupancy(const char* s);
1302 bool must_be_youngest() const { return false; }
1303 bool must_be_oldest() const { return true; }
1305 void compute_new_size();
1307 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1308 void rotate_debug_collection_type();
1309 };
1311 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1313 // Return the size policy from the heap's collector
1314 // policy casted to CMSAdaptiveSizePolicy*.
1315 CMSAdaptiveSizePolicy* cms_size_policy() const;
1317 // Resize the generation based on the adaptive size
1318 // policy.
1319 void resize(size_t cur_promo, size_t desired_promo);
1321 // Return the GC counters from the collector policy
1322 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1324 virtual void shrink_by(size_t bytes);
1326 public:
1327 virtual void compute_new_size();
1328 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1329 int level, CardTableRS* ct,
1330 bool use_adaptive_freelists,
1331 FreeBlockDictionary::DictionaryChoice
1332 dictionaryChoice) :
1333 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1334 use_adaptive_freelists, dictionaryChoice) {}
1336 virtual const char* short_name() const { return "ASCMS"; }
1337 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1339 virtual void update_counters();
1340 virtual void update_counters(size_t used);
1341 };
1343 //
1344 // Closures of various sorts used by CMS to accomplish its work
1345 //
1347 // This closure is used to check that a certain set of oops is empty.
1348 class FalseClosure: public OopClosure {
1349 public:
1350 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
1351 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
1352 };
1354 // This closure is used to do concurrent marking from the roots
1355 // following the first checkpoint.
1356 class MarkFromRootsClosure: public BitMapClosure {
1357 CMSCollector* _collector;
1358 MemRegion _span;
1359 CMSBitMap* _bitMap;
1360 CMSBitMap* _mut;
1361 CMSMarkStack* _markStack;
1362 CMSMarkStack* _revisitStack;
1363 bool _yield;
1364 int _skipBits;
1365 HeapWord* _finger;
1366 HeapWord* _threshold;
1367 DEBUG_ONLY(bool _verifying;)
1369 public:
1370 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1371 CMSBitMap* bitMap,
1372 CMSMarkStack* markStack,
1373 CMSMarkStack* revisitStack,
1374 bool should_yield, bool verifying = false);
1375 bool do_bit(size_t offset);
1376 void reset(HeapWord* addr);
1377 inline void do_yield_check();
1379 private:
1380 void scanOopsInOop(HeapWord* ptr);
1381 void do_yield_work();
1382 };
1384 // This closure is used to do concurrent multi-threaded
1385 // marking from the roots following the first checkpoint.
1386 // XXX This should really be a subclass of The serial version
1387 // above, but i have not had the time to refactor things cleanly.
1388 // That willbe done for Dolphin.
1389 class Par_MarkFromRootsClosure: public BitMapClosure {
1390 CMSCollector* _collector;
1391 MemRegion _whole_span;
1392 MemRegion _span;
1393 CMSBitMap* _bit_map;
1394 CMSBitMap* _mut;
1395 OopTaskQueue* _work_queue;
1396 CMSMarkStack* _overflow_stack;
1397 CMSMarkStack* _revisit_stack;
1398 bool _yield;
1399 int _skip_bits;
1400 HeapWord* _finger;
1401 HeapWord* _threshold;
1402 CMSConcMarkingTask* _task;
1403 public:
1404 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1405 MemRegion span,
1406 CMSBitMap* bit_map,
1407 OopTaskQueue* work_queue,
1408 CMSMarkStack* overflow_stack,
1409 CMSMarkStack* revisit_stack,
1410 bool should_yield);
1411 bool do_bit(size_t offset);
1412 inline void do_yield_check();
1414 private:
1415 void scan_oops_in_oop(HeapWord* ptr);
1416 void do_yield_work();
1417 bool get_work_from_overflow_stack();
1418 };
1420 // The following closures are used to do certain kinds of verification of
1421 // CMS marking.
1422 class PushAndMarkVerifyClosure: public OopClosure {
1423 CMSCollector* _collector;
1424 MemRegion _span;
1425 CMSBitMap* _verification_bm;
1426 CMSBitMap* _cms_bm;
1427 CMSMarkStack* _mark_stack;
1428 protected:
1429 void do_oop(oop p);
1430 template <class T> inline void do_oop_work(T *p) {
1431 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1432 do_oop(obj);
1433 }
1434 public:
1435 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1436 MemRegion span,
1437 CMSBitMap* verification_bm,
1438 CMSBitMap* cms_bm,
1439 CMSMarkStack* mark_stack);
1440 void do_oop(oop* p);
1441 void do_oop(narrowOop* p);
1442 // Deal with a stack overflow condition
1443 void handle_stack_overflow(HeapWord* lost);
1444 };
1446 class MarkFromRootsVerifyClosure: public BitMapClosure {
1447 CMSCollector* _collector;
1448 MemRegion _span;
1449 CMSBitMap* _verification_bm;
1450 CMSBitMap* _cms_bm;
1451 CMSMarkStack* _mark_stack;
1452 HeapWord* _finger;
1453 PushAndMarkVerifyClosure _pam_verify_closure;
1454 public:
1455 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1456 CMSBitMap* verification_bm,
1457 CMSBitMap* cms_bm,
1458 CMSMarkStack* mark_stack);
1459 bool do_bit(size_t offset);
1460 void reset(HeapWord* addr);
1461 };
1464 // This closure is used to check that a certain set of bits is
1465 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1466 class FalseBitMapClosure: public BitMapClosure {
1467 public:
1468 bool do_bit(size_t offset) {
1469 guarantee(false, "Should not have a 1 bit");
1470 return true;
1471 }
1472 };
1474 // This closure is used during the second checkpointing phase
1475 // to rescan the marked objects on the dirty cards in the mod
1476 // union table and the card table proper. It's invoked via
1477 // MarkFromDirtyCardsClosure below. It uses either
1478 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1479 // declared in genOopClosures.hpp to accomplish some of its work.
1480 // In the parallel case the bitMap is shared, so access to
1481 // it needs to be suitably synchronized for updates by embedded
1482 // closures that update it; however, this closure itself only
1483 // reads the bit_map and because it is idempotent, is immune to
1484 // reading stale values.
1485 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1486 #ifdef ASSERT
1487 CMSCollector* _collector;
1488 MemRegion _span;
1489 union {
1490 CMSMarkStack* _mark_stack;
1491 OopTaskQueue* _work_queue;
1492 };
1493 #endif // ASSERT
1494 bool _parallel;
1495 CMSBitMap* _bit_map;
1496 union {
1497 MarkRefsIntoAndScanClosure* _scan_closure;
1498 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1499 };
1501 public:
1502 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1503 MemRegion span,
1504 ReferenceProcessor* rp,
1505 CMSBitMap* bit_map,
1506 CMSMarkStack* mark_stack,
1507 CMSMarkStack* revisit_stack,
1508 MarkRefsIntoAndScanClosure* cl):
1509 #ifdef ASSERT
1510 _collector(collector),
1511 _span(span),
1512 _mark_stack(mark_stack),
1513 #endif // ASSERT
1514 _parallel(false),
1515 _bit_map(bit_map),
1516 _scan_closure(cl) { }
1518 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1519 MemRegion span,
1520 ReferenceProcessor* rp,
1521 CMSBitMap* bit_map,
1522 OopTaskQueue* work_queue,
1523 CMSMarkStack* revisit_stack,
1524 Par_MarkRefsIntoAndScanClosure* cl):
1525 #ifdef ASSERT
1526 _collector(collector),
1527 _span(span),
1528 _work_queue(work_queue),
1529 #endif // ASSERT
1530 _parallel(true),
1531 _bit_map(bit_map),
1532 _par_scan_closure(cl) { }
1534 void do_object(oop obj) {
1535 guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1536 }
1537 bool do_object_b(oop obj) {
1538 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1539 return false;
1540 }
1541 bool do_object_bm(oop p, MemRegion mr);
1542 };
1544 // This closure is used during the second checkpointing phase
1545 // to rescan the marked objects on the dirty cards in the mod
1546 // union table and the card table proper. It invokes
1547 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1548 // In the parallel case, the bit map is shared and requires
1549 // synchronized access.
1550 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1551 CompactibleFreeListSpace* _space;
1552 ScanMarkedObjectsAgainClosure _scan_cl;
1553 size_t _num_dirty_cards;
1555 public:
1556 MarkFromDirtyCardsClosure(CMSCollector* collector,
1557 MemRegion span,
1558 CompactibleFreeListSpace* space,
1559 CMSBitMap* bit_map,
1560 CMSMarkStack* mark_stack,
1561 CMSMarkStack* revisit_stack,
1562 MarkRefsIntoAndScanClosure* cl):
1563 _space(space),
1564 _num_dirty_cards(0),
1565 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1566 mark_stack, revisit_stack, cl) { }
1568 MarkFromDirtyCardsClosure(CMSCollector* collector,
1569 MemRegion span,
1570 CompactibleFreeListSpace* space,
1571 CMSBitMap* bit_map,
1572 OopTaskQueue* work_queue,
1573 CMSMarkStack* revisit_stack,
1574 Par_MarkRefsIntoAndScanClosure* cl):
1575 _space(space),
1576 _num_dirty_cards(0),
1577 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1578 work_queue, revisit_stack, cl) { }
1580 void do_MemRegion(MemRegion mr);
1581 void set_space(CompactibleFreeListSpace* space) { _space = space; }
1582 size_t num_dirty_cards() { return _num_dirty_cards; }
1583 };
1585 // This closure is used in the non-product build to check
1586 // that there are no MemRegions with a certain property.
1587 class FalseMemRegionClosure: public MemRegionClosure {
1588 void do_MemRegion(MemRegion mr) {
1589 guarantee(!mr.is_empty(), "Shouldn't be empty");
1590 guarantee(false, "Should never be here");
1591 }
1592 };
1594 // This closure is used during the precleaning phase
1595 // to "carefully" rescan marked objects on dirty cards.
1596 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1597 // to accomplish some of its work.
1598 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1599 CMSCollector* _collector;
1600 MemRegion _span;
1601 bool _yield;
1602 Mutex* _freelistLock;
1603 CMSBitMap* _bitMap;
1604 CMSMarkStack* _markStack;
1605 MarkRefsIntoAndScanClosure* _scanningClosure;
1607 public:
1608 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1609 MemRegion span,
1610 CMSBitMap* bitMap,
1611 CMSMarkStack* markStack,
1612 CMSMarkStack* revisitStack,
1613 MarkRefsIntoAndScanClosure* cl,
1614 bool should_yield):
1615 _collector(collector),
1616 _span(span),
1617 _yield(should_yield),
1618 _bitMap(bitMap),
1619 _markStack(markStack),
1620 _scanningClosure(cl) {
1621 }
1623 void do_object(oop p) {
1624 guarantee(false, "call do_object_careful instead");
1625 }
1627 size_t do_object_careful(oop p) {
1628 guarantee(false, "Unexpected caller");
1629 return 0;
1630 }
1632 size_t do_object_careful_m(oop p, MemRegion mr);
1634 void setFreelistLock(Mutex* m) {
1635 _freelistLock = m;
1636 _scanningClosure->set_freelistLock(m);
1637 }
1639 private:
1640 inline bool do_yield_check();
1642 void do_yield_work();
1643 };
1645 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1646 CMSCollector* _collector;
1647 MemRegion _span;
1648 bool _yield;
1649 CMSBitMap* _bit_map;
1650 CMSMarkStack* _mark_stack;
1651 PushAndMarkClosure* _scanning_closure;
1652 unsigned int _before_count;
1654 public:
1655 SurvivorSpacePrecleanClosure(CMSCollector* collector,
1656 MemRegion span,
1657 CMSBitMap* bit_map,
1658 CMSMarkStack* mark_stack,
1659 PushAndMarkClosure* cl,
1660 unsigned int before_count,
1661 bool should_yield):
1662 _collector(collector),
1663 _span(span),
1664 _yield(should_yield),
1665 _bit_map(bit_map),
1666 _mark_stack(mark_stack),
1667 _scanning_closure(cl),
1668 _before_count(before_count)
1669 { }
1671 void do_object(oop p) {
1672 guarantee(false, "call do_object_careful instead");
1673 }
1675 size_t do_object_careful(oop p);
1677 size_t do_object_careful_m(oop p, MemRegion mr) {
1678 guarantee(false, "Unexpected caller");
1679 return 0;
1680 }
1682 private:
1683 inline void do_yield_check();
1684 void do_yield_work();
1685 };
1687 // This closure is used to accomplish the sweeping work
1688 // after the second checkpoint but before the concurrent reset
1689 // phase.
1690 //
1691 // Terminology
1692 // left hand chunk (LHC) - block of one or more chunks currently being
1693 // coalesced. The LHC is available for coalescing with a new chunk.
1694 // right hand chunk (RHC) - block that is currently being swept that is
1695 // free or garbage that can be coalesced with the LHC.
1696 // _inFreeRange is true if there is currently a LHC
1697 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1698 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1699 // _freeFinger is the address of the current LHC
1700 class SweepClosure: public BlkClosureCareful {
1701 CMSCollector* _collector; // collector doing the work
1702 ConcurrentMarkSweepGeneration* _g; // Generation being swept
1703 CompactibleFreeListSpace* _sp; // Space being swept
1704 HeapWord* _limit;
1705 Mutex* _freelistLock; // Free list lock (in space)
1706 CMSBitMap* _bitMap; // Marking bit map (in
1707 // generation)
1708 bool _inFreeRange; // Indicates if we are in the
1709 // midst of a free run
1710 bool _freeRangeInFreeLists;
1711 // Often, we have just found
1712 // a free chunk and started
1713 // a new free range; we do not
1714 // eagerly remove this chunk from
1715 // the free lists unless there is
1716 // a possibility of coalescing.
1717 // When true, this flag indicates
1718 // that the _freeFinger below
1719 // points to a potentially free chunk
1720 // that may still be in the free lists
1721 bool _lastFreeRangeCoalesced;
1722 // free range contains chunks
1723 // coalesced
1724 bool _yield;
1725 // Whether sweeping should be
1726 // done with yields. For instance
1727 // when done by the foreground
1728 // collector we shouldn't yield.
1729 HeapWord* _freeFinger; // When _inFreeRange is set, the
1730 // pointer to the "left hand
1731 // chunk"
1732 size_t _freeRangeSize;
1733 // When _inFreeRange is set, this
1734 // indicates the accumulated size
1735 // of the "left hand chunk"
1736 NOT_PRODUCT(
1737 size_t _numObjectsFreed;
1738 size_t _numWordsFreed;
1739 size_t _numObjectsLive;
1740 size_t _numWordsLive;
1741 size_t _numObjectsAlreadyFree;
1742 size_t _numWordsAlreadyFree;
1743 FreeChunk* _last_fc;
1744 )
1745 private:
1746 // Code that is common to a free chunk or garbage when
1747 // encountered during sweeping.
1748 void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
1749 size_t chunkSize);
1750 // Process a free chunk during sweeping.
1751 void doAlreadyFreeChunk(FreeChunk *fc);
1752 // Process a garbage chunk during sweeping.
1753 size_t doGarbageChunk(FreeChunk *fc);
1754 // Process a live chunk during sweeping.
1755 size_t doLiveChunk(FreeChunk* fc);
1757 // Accessors.
1758 HeapWord* freeFinger() const { return _freeFinger; }
1759 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1760 size_t freeRangeSize() const { return _freeRangeSize; }
1761 void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
1762 bool inFreeRange() const { return _inFreeRange; }
1763 void set_inFreeRange(bool v) { _inFreeRange = v; }
1764 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1765 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1766 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1767 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1769 // Initialize a free range.
1770 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1771 // Return this chunk to the free lists.
1772 void flushCurFreeChunk(HeapWord* chunk, size_t size);
1774 // Check if we should yield and do so when necessary.
1775 inline void do_yield_check(HeapWord* addr);
1777 // Yield
1778 void do_yield_work(HeapWord* addr);
1780 // Debugging/Printing
1781 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1783 public:
1784 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1785 CMSBitMap* bitMap, bool should_yield);
1786 ~SweepClosure();
1788 size_t do_blk_careful(HeapWord* addr);
1789 };
1791 // Closures related to weak references processing
1793 // During CMS' weak reference processing, this is a
1794 // work-routine/closure used to complete transitive
1795 // marking of objects as live after a certain point
1796 // in which an initial set has been completely accumulated.
1797 // This closure is currently used both during the final
1798 // remark stop-world phase, as well as during the concurrent
1799 // precleaning of the discovered reference lists.
1800 class CMSDrainMarkingStackClosure: public VoidClosure {
1801 CMSCollector* _collector;
1802 MemRegion _span;
1803 CMSMarkStack* _mark_stack;
1804 CMSBitMap* _bit_map;
1805 CMSKeepAliveClosure* _keep_alive;
1806 bool _concurrent_precleaning;
1807 public:
1808 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1809 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1810 CMSKeepAliveClosure* keep_alive,
1811 bool cpc):
1812 _collector(collector),
1813 _span(span),
1814 _bit_map(bit_map),
1815 _mark_stack(mark_stack),
1816 _keep_alive(keep_alive),
1817 _concurrent_precleaning(cpc) {
1818 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1819 "Mismatch");
1820 }
1822 void do_void();
1823 };
1825 // A parallel version of CMSDrainMarkingStackClosure above.
1826 class CMSParDrainMarkingStackClosure: public VoidClosure {
1827 CMSCollector* _collector;
1828 MemRegion _span;
1829 OopTaskQueue* _work_queue;
1830 CMSBitMap* _bit_map;
1831 CMSInnerParMarkAndPushClosure _mark_and_push;
1833 public:
1834 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1835 MemRegion span, CMSBitMap* bit_map,
1836 CMSMarkStack* revisit_stack,
1837 OopTaskQueue* work_queue):
1838 _collector(collector),
1839 _span(span),
1840 _bit_map(bit_map),
1841 _work_queue(work_queue),
1842 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
1844 public:
1845 void trim_queue(uint max);
1846 void do_void();
1847 };
1849 // Allow yielding or short-circuiting of reference list
1850 // prelceaning work.
1851 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1852 CMSCollector* _collector;
1853 void do_yield_work();
1854 public:
1855 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1856 _collector(collector) {}
1857 virtual bool should_return();
1858 };
1861 // Convenience class that locks free list locks for given CMS collector
1862 class FreelistLocker: public StackObj {
1863 private:
1864 CMSCollector* _collector;
1865 public:
1866 FreelistLocker(CMSCollector* collector):
1867 _collector(collector) {
1868 _collector->getFreelistLocks();
1869 }
1871 ~FreelistLocker() {
1872 _collector->releaseFreelistLocks();
1873 }
1874 };
1876 // Mark all dead objects in a given space.
1877 class MarkDeadObjectsClosure: public BlkClosure {
1878 const CMSCollector* _collector;
1879 const CompactibleFreeListSpace* _sp;
1880 CMSBitMap* _live_bit_map;
1881 CMSBitMap* _dead_bit_map;
1882 public:
1883 MarkDeadObjectsClosure(const CMSCollector* collector,
1884 const CompactibleFreeListSpace* sp,
1885 CMSBitMap *live_bit_map,
1886 CMSBitMap *dead_bit_map) :
1887 _collector(collector),
1888 _sp(sp),
1889 _live_bit_map(live_bit_map),
1890 _dead_bit_map(dead_bit_map) {}
1891 size_t do_blk(HeapWord* addr);
1892 };
1894 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1896 public:
1897 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase);
1898 TraceCMSMemoryManagerStats();
1899 };
1902 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP