Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
28 #include "gc_implementation/shared/gcHeapSummary.hpp"
29 #include "gc_implementation/shared/gSpaceCounters.hpp"
30 #include "gc_implementation/shared/gcStats.hpp"
31 #include "gc_implementation/shared/gcWhen.hpp"
32 #include "gc_implementation/shared/generationCounters.hpp"
33 #include "memory/freeBlockDictionary.hpp"
34 #include "memory/generation.hpp"
35 #include "memory/iterator.hpp"
36 #include "runtime/mutexLocker.hpp"
37 #include "runtime/virtualspace.hpp"
38 #include "services/memoryService.hpp"
39 #include "utilities/bitMap.inline.hpp"
40 #include "utilities/stack.inline.hpp"
41 #include "utilities/taskqueue.hpp"
42 #include "utilities/yieldingWorkgroup.hpp"
44 // ConcurrentMarkSweepGeneration is in support of a concurrent
45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
46 // style. We assume, for now, that this generation is always the
47 // seniormost generation and for simplicity
48 // in the first implementation, that this generation is a single compactible
49 // space. Neither of these restrictions appears essential, and will be
50 // relaxed in the future when more time is available to implement the
51 // greater generality (and there's a need for it).
52 //
53 // Concurrent mode failures are currently handled by
54 // means of a sliding mark-compact.
56 class CMSAdaptiveSizePolicy;
57 class CMSConcMarkingTask;
58 class CMSGCAdaptivePolicyCounters;
59 class CMSTracer;
60 class ConcurrentGCTimer;
61 class ConcurrentMarkSweepGeneration;
62 class ConcurrentMarkSweepPolicy;
63 class ConcurrentMarkSweepThread;
64 class CompactibleFreeListSpace;
65 class FreeChunk;
66 class PromotionInfo;
67 class ScanMarkedObjectsAgainCarefullyClosure;
68 class TenuredGeneration;
69 class SerialOldTracer;
71 // A generic CMS bit map. It's the basis for both the CMS marking bit map
72 // as well as for the mod union table (in each case only a subset of the
73 // methods are used). This is essentially a wrapper around the BitMap class,
74 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
75 // we have _shifter == 0. and for the mod union table we have
76 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
77 // XXX 64-bit issues in BitMap?
78 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
79 friend class VMStructs;
81 HeapWord* _bmStartWord; // base address of range covered by map
82 size_t _bmWordSize; // map size (in #HeapWords covered)
83 const int _shifter; // shifts to convert HeapWord to bit position
84 VirtualSpace _virtual_space; // underlying the bit map
85 BitMap _bm; // the bit map itself
86 public:
87 Mutex* const _lock; // mutex protecting _bm;
89 public:
90 // constructor
91 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
93 // allocates the actual storage for the map
94 bool allocate(MemRegion mr);
95 // field getter
96 Mutex* lock() const { return _lock; }
97 // locking verifier convenience function
98 void assert_locked() const PRODUCT_RETURN;
100 // inquiries
101 HeapWord* startWord() const { return _bmStartWord; }
102 size_t sizeInWords() const { return _bmWordSize; }
103 size_t sizeInBits() const { return _bm.size(); }
104 // the following is one past the last word in space
105 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
107 // reading marks
108 bool isMarked(HeapWord* addr) const;
109 bool par_isMarked(HeapWord* addr) const; // do not lock checks
110 bool isUnmarked(HeapWord* addr) const;
111 bool isAllClear() const;
113 // writing marks
114 void mark(HeapWord* addr);
115 // For marking by parallel GC threads;
116 // returns true if we did, false if another thread did
117 bool par_mark(HeapWord* addr);
119 void mark_range(MemRegion mr);
120 void par_mark_range(MemRegion mr);
121 void mark_large_range(MemRegion mr);
122 void par_mark_large_range(MemRegion mr);
123 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
124 void clear_range(MemRegion mr);
125 void par_clear_range(MemRegion mr);
126 void clear_large_range(MemRegion mr);
127 void par_clear_large_range(MemRegion mr);
128 void clear_all();
129 void clear_all_incrementally(); // Not yet implemented!!
131 NOT_PRODUCT(
132 // checks the memory region for validity
133 void region_invariant(MemRegion mr);
134 )
136 // iteration
137 void iterate(BitMapClosure* cl) {
138 _bm.iterate(cl);
139 }
140 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
141 void dirty_range_iterate_clear(MemRegionClosure* cl);
142 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
144 // auxiliary support for iteration
145 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
146 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
147 HeapWord* end_addr) const;
148 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
149 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
150 HeapWord* end_addr) const;
151 MemRegion getAndClearMarkedRegion(HeapWord* addr);
152 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
153 HeapWord* end_addr);
155 // conversion utilities
156 HeapWord* offsetToHeapWord(size_t offset) const;
157 size_t heapWordToOffset(HeapWord* addr) const;
158 size_t heapWordDiffToOffsetDiff(size_t diff) const;
160 void print_on_error(outputStream* st, const char* prefix) const;
162 // debugging
163 // is this address range covered by the bit-map?
164 NOT_PRODUCT(
165 bool covers(MemRegion mr) const;
166 bool covers(HeapWord* start, size_t size = 0) const;
167 )
168 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
169 };
171 // Represents a marking stack used by the CMS collector.
172 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
173 class CMSMarkStack: public CHeapObj<mtGC> {
174 //
175 friend class CMSCollector; // to get at expasion stats further below
176 //
178 VirtualSpace _virtual_space; // space for the stack
179 oop* _base; // bottom of stack
180 size_t _index; // one more than last occupied index
181 size_t _capacity; // max #elements
182 Mutex _par_lock; // an advisory lock used in case of parallel access
183 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
185 protected:
186 size_t _hit_limit; // we hit max stack size limit
187 size_t _failed_double; // we failed expansion before hitting limit
189 public:
190 CMSMarkStack():
191 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
192 _hit_limit(0),
193 _failed_double(0) {}
195 bool allocate(size_t size);
197 size_t capacity() const { return _capacity; }
199 oop pop() {
200 if (!isEmpty()) {
201 return _base[--_index] ;
202 }
203 return NULL;
204 }
206 bool push(oop ptr) {
207 if (isFull()) {
208 return false;
209 } else {
210 _base[_index++] = ptr;
211 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
212 return true;
213 }
214 }
216 bool isEmpty() const { return _index == 0; }
217 bool isFull() const {
218 assert(_index <= _capacity, "buffer overflow");
219 return _index == _capacity;
220 }
222 size_t length() { return _index; }
224 // "Parallel versions" of some of the above
225 oop par_pop() {
226 // lock and pop
227 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
228 return pop();
229 }
231 bool par_push(oop ptr) {
232 // lock and push
233 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
234 return push(ptr);
235 }
237 // Forcibly reset the stack, losing all of its contents.
238 void reset() {
239 _index = 0;
240 }
242 // Expand the stack, typically in response to an overflow condition
243 void expand();
245 // Compute the least valued stack element.
246 oop least_value(HeapWord* low) {
247 oop least = (oop)low;
248 for (size_t i = 0; i < _index; i++) {
249 least = MIN2(least, _base[i]);
250 }
251 return least;
252 }
254 // Exposed here to allow stack expansion in || case
255 Mutex* par_lock() { return &_par_lock; }
256 };
258 class CardTableRS;
259 class CMSParGCThreadState;
261 class ModUnionClosure: public MemRegionClosure {
262 protected:
263 CMSBitMap* _t;
264 public:
265 ModUnionClosure(CMSBitMap* t): _t(t) { }
266 void do_MemRegion(MemRegion mr);
267 };
269 class ModUnionClosurePar: public ModUnionClosure {
270 public:
271 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
272 void do_MemRegion(MemRegion mr);
273 };
275 // Survivor Chunk Array in support of parallelization of
276 // Survivor Space rescan.
277 class ChunkArray: public CHeapObj<mtGC> {
278 size_t _index;
279 size_t _capacity;
280 size_t _overflows;
281 HeapWord** _array; // storage for array
283 public:
284 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
285 ChunkArray(HeapWord** a, size_t c):
286 _index(0), _capacity(c), _overflows(0), _array(a) {}
288 HeapWord** array() { return _array; }
289 void set_array(HeapWord** a) { _array = a; }
291 size_t capacity() { return _capacity; }
292 void set_capacity(size_t c) { _capacity = c; }
294 size_t end() {
295 assert(_index <= capacity(),
296 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
297 _index, _capacity));
298 return _index;
299 } // exclusive
301 HeapWord* nth(size_t n) {
302 assert(n < end(), "Out of bounds access");
303 return _array[n];
304 }
306 void reset() {
307 _index = 0;
308 if (_overflows > 0 && PrintCMSStatistics > 1) {
309 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
310 _capacity, _overflows);
311 }
312 _overflows = 0;
313 }
315 void record_sample(HeapWord* p, size_t sz) {
316 // For now we do not do anything with the size
317 if (_index < _capacity) {
318 _array[_index++] = p;
319 } else {
320 ++_overflows;
321 assert(_index == _capacity,
322 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
323 "): out of bounds at overflow#" SIZE_FORMAT,
324 _index, _capacity, _overflows));
325 }
326 }
327 };
329 //
330 // Timing, allocation and promotion statistics for gc scheduling and incremental
331 // mode pacing. Most statistics are exponential averages.
332 //
333 class CMSStats VALUE_OBJ_CLASS_SPEC {
334 private:
335 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
337 // The following are exponential averages with factor alpha:
338 // avg = (100 - alpha) * avg + alpha * cur_sample
339 //
340 // The durations measure: end_time[n] - start_time[n]
341 // The periods measure: start_time[n] - start_time[n-1]
342 //
343 // The cms period and duration include only concurrent collections; time spent
344 // in foreground cms collections due to System.gc() or because of a failure to
345 // keep up are not included.
346 //
347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
348 // real value, but is used only after the first period. A value of 100 is
349 // used for the first sample so it gets the entire weight.
350 unsigned int _saved_alpha; // 0-100
351 unsigned int _gc0_alpha;
352 unsigned int _cms_alpha;
354 double _gc0_duration;
355 double _gc0_period;
356 size_t _gc0_promoted; // bytes promoted per gc0
357 double _cms_duration;
358 double _cms_duration_pre_sweep; // time from initiation to start of sweep
359 double _cms_duration_per_mb;
360 double _cms_period;
361 size_t _cms_allocated; // bytes of direct allocation per gc0 period
363 // Timers.
364 elapsedTimer _cms_timer;
365 TimeStamp _gc0_begin_time;
366 TimeStamp _cms_begin_time;
367 TimeStamp _cms_end_time;
369 // Snapshots of the amount used in the CMS generation.
370 size_t _cms_used_at_gc0_begin;
371 size_t _cms_used_at_gc0_end;
372 size_t _cms_used_at_cms_begin;
374 // Used to prevent the duty cycle from being reduced in the middle of a cms
375 // cycle.
376 bool _allow_duty_cycle_reduction;
378 enum {
379 _GC0_VALID = 0x1,
380 _CMS_VALID = 0x2,
381 _ALL_VALID = _GC0_VALID | _CMS_VALID
382 };
384 unsigned int _valid_bits;
386 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
388 protected:
390 // Return a duty cycle that avoids wild oscillations, by limiting the amount
391 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
392 // as a recommended value).
393 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
394 unsigned int new_duty_cycle);
395 unsigned int icms_update_duty_cycle_impl();
397 // In support of adjusting of cms trigger ratios based on history
398 // of concurrent mode failure.
399 double cms_free_adjustment_factor(size_t free) const;
400 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
402 public:
403 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
404 unsigned int alpha = CMSExpAvgFactor);
406 // Whether or not the statistics contain valid data; higher level statistics
407 // cannot be called until this returns true (they require at least one young
408 // gen and one cms cycle to have completed).
409 bool valid() const;
411 // Record statistics.
412 void record_gc0_begin();
413 void record_gc0_end(size_t cms_gen_bytes_used);
414 void record_cms_begin();
415 void record_cms_end();
417 // Allow management of the cms timer, which must be stopped/started around
418 // yield points.
419 elapsedTimer& cms_timer() { return _cms_timer; }
420 void start_cms_timer() { _cms_timer.start(); }
421 void stop_cms_timer() { _cms_timer.stop(); }
423 // Basic statistics; units are seconds or bytes.
424 double gc0_period() const { return _gc0_period; }
425 double gc0_duration() const { return _gc0_duration; }
426 size_t gc0_promoted() const { return _gc0_promoted; }
427 double cms_period() const { return _cms_period; }
428 double cms_duration() const { return _cms_duration; }
429 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
430 size_t cms_allocated() const { return _cms_allocated; }
432 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
434 // Seconds since the last background cms cycle began or ended.
435 double cms_time_since_begin() const;
436 double cms_time_since_end() const;
438 // Higher level statistics--caller must check that valid() returns true before
439 // calling.
441 // Returns bytes promoted per second of wall clock time.
442 double promotion_rate() const;
444 // Returns bytes directly allocated per second of wall clock time.
445 double cms_allocation_rate() const;
447 // Rate at which space in the cms generation is being consumed (sum of the
448 // above two).
449 double cms_consumption_rate() const;
451 // Returns an estimate of the number of seconds until the cms generation will
452 // fill up, assuming no collection work is done.
453 double time_until_cms_gen_full() const;
455 // Returns an estimate of the number of seconds remaining until
456 // the cms generation collection should start.
457 double time_until_cms_start() const;
459 // End of higher level statistics.
461 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
462 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
464 // Update the duty cycle and return the new value.
465 unsigned int icms_update_duty_cycle();
467 // Debugging.
468 void print_on(outputStream* st) const PRODUCT_RETURN;
469 void print() const { print_on(gclog_or_tty); }
470 };
472 // A closure related to weak references processing which
473 // we embed in the CMSCollector, since we need to pass
474 // it to the reference processor for secondary filtering
475 // of references based on reachability of referent;
476 // see role of _is_alive_non_header closure in the
477 // ReferenceProcessor class.
478 // For objects in the CMS generation, this closure checks
479 // if the object is "live" (reachable). Used in weak
480 // reference processing.
481 class CMSIsAliveClosure: public BoolObjectClosure {
482 const MemRegion _span;
483 const CMSBitMap* _bit_map;
485 friend class CMSCollector;
486 public:
487 CMSIsAliveClosure(MemRegion span,
488 CMSBitMap* bit_map):
489 _span(span),
490 _bit_map(bit_map) {
491 assert(!span.is_empty(), "Empty span could spell trouble");
492 }
494 bool do_object_b(oop obj);
495 };
498 // Implements AbstractRefProcTaskExecutor for CMS.
499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
500 public:
502 CMSRefProcTaskExecutor(CMSCollector& collector)
503 : _collector(collector)
504 { }
506 // Executes a task using worker threads.
507 virtual void execute(ProcessTask& task);
508 virtual void execute(EnqueueTask& task);
509 private:
510 CMSCollector& _collector;
511 };
514 class CMSCollector: public CHeapObj<mtGC> {
515 friend class VMStructs;
516 friend class ConcurrentMarkSweepThread;
517 friend class ConcurrentMarkSweepGeneration;
518 friend class CompactibleFreeListSpace;
519 friend class CMSParMarkTask;
520 friend class CMSParInitialMarkTask;
521 friend class CMSParRemarkTask;
522 friend class CMSConcMarkingTask;
523 friend class CMSRefProcTaskProxy;
524 friend class CMSRefProcTaskExecutor;
525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
526 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
527 friend class PushOrMarkClosure; // to access _restart_addr
528 friend class Par_PushOrMarkClosure; // to access _restart_addr
529 friend class MarkFromRootsClosure; // -- ditto --
530 // ... and for clearing cards
531 friend class Par_MarkFromRootsClosure; // to access _restart_addr
532 // ... and for clearing cards
533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
535 friend class PushAndMarkVerifyClosure; // -- ditto --
536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
537 friend class PushAndMarkClosure; // -- ditto --
538 friend class Par_PushAndMarkClosure; // -- ditto --
539 friend class CMSKeepAliveClosure; // -- ditto --
540 friend class CMSDrainMarkingStackClosure; // -- ditto --
541 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
544 friend class VM_CMS_Operation;
545 friend class VM_CMS_Initial_Mark;
546 friend class VM_CMS_Final_Remark;
547 friend class TraceCMSMemoryManagerStats;
549 private:
550 jlong _time_of_last_gc;
551 void update_time_of_last_gc(jlong now) {
552 _time_of_last_gc = now;
553 }
555 OopTaskQueueSet* _task_queues;
557 // Overflow list of grey objects, threaded through mark-word
558 // Manipulated with CAS in the parallel/multi-threaded case.
559 oop _overflow_list;
560 // The following array-pair keeps track of mark words
561 // displaced for accomodating overflow list above.
562 // This code will likely be revisited under RFE#4922830.
563 Stack<oop, mtGC> _preserved_oop_stack;
564 Stack<markOop, mtGC> _preserved_mark_stack;
566 int* _hash_seed;
568 // In support of multi-threaded concurrent phases
569 YieldingFlexibleWorkGang* _conc_workers;
571 // Performance Counters
572 CollectorCounters* _gc_counters;
574 // Initialization Errors
575 bool _completed_initialization;
577 // In support of ExplicitGCInvokesConcurrent
578 static bool _full_gc_requested;
579 static GCCause::Cause _full_gc_cause;
580 unsigned int _collection_count_start;
582 // Should we unload classes this concurrent cycle?
583 bool _should_unload_classes;
584 unsigned int _concurrent_cycles_since_last_unload;
585 unsigned int concurrent_cycles_since_last_unload() const {
586 return _concurrent_cycles_since_last_unload;
587 }
588 // Did we (allow) unload classes in the previous concurrent cycle?
589 bool unloaded_classes_last_cycle() const {
590 return concurrent_cycles_since_last_unload() == 0;
591 }
592 // Root scanning options for perm gen
593 int _roots_scanning_options;
594 int roots_scanning_options() const { return _roots_scanning_options; }
595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
598 // Verification support
599 CMSBitMap _verification_mark_bm;
600 void verify_after_remark_work_1();
601 void verify_after_remark_work_2();
603 // true if any verification flag is on.
604 bool _verifying;
605 bool verifying() const { return _verifying; }
606 void set_verifying(bool v) { _verifying = v; }
608 // Collector policy
609 ConcurrentMarkSweepPolicy* _collector_policy;
610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
612 void set_did_compact(bool v);
614 // XXX Move these to CMSStats ??? FIX ME !!!
615 elapsedTimer _inter_sweep_timer; // time between sweeps
616 elapsedTimer _intra_sweep_timer; // time _in_ sweeps
617 // padded decaying average estimates of the above
618 AdaptivePaddedAverage _inter_sweep_estimate;
619 AdaptivePaddedAverage _intra_sweep_estimate;
621 CMSTracer* _gc_tracer_cm;
622 ConcurrentGCTimer* _gc_timer_cm;
624 bool _cms_start_registered;
626 GCHeapSummary _last_heap_summary;
627 MetaspaceSummary _last_metaspace_summary;
629 void register_foreground_gc_start(GCCause::Cause cause);
630 void register_gc_start(GCCause::Cause cause);
631 void register_gc_end();
632 void save_heap_summary();
633 void report_heap_summary(GCWhen::Type when);
635 protected:
636 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
637 MemRegion _span; // span covering above two
638 CardTableRS* _ct; // card table
640 // CMS marking support structures
641 CMSBitMap _markBitMap;
642 CMSBitMap _modUnionTable;
643 CMSMarkStack _markStack;
645 HeapWord* _restart_addr; // in support of marking stack overflow
646 void lower_restart_addr(HeapWord* low);
648 // Counters in support of marking stack / work queue overflow handling:
649 // a non-zero value indicates certain types of overflow events during
650 // the current CMS cycle and could lead to stack resizing efforts at
651 // an opportune future time.
652 size_t _ser_pmc_preclean_ovflw;
653 size_t _ser_pmc_remark_ovflw;
654 size_t _par_pmc_remark_ovflw;
655 size_t _ser_kac_preclean_ovflw;
656 size_t _ser_kac_ovflw;
657 size_t _par_kac_ovflw;
658 NOT_PRODUCT(ssize_t _num_par_pushes;)
660 // ("Weak") Reference processing support
661 ReferenceProcessor* _ref_processor;
662 CMSIsAliveClosure _is_alive_closure;
663 // keep this textually after _markBitMap and _span; c'tor dependency
665 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
666 ModUnionClosure _modUnionClosure;
667 ModUnionClosurePar _modUnionClosurePar;
669 // CMS abstract state machine
670 // initial_state: Idling
671 // next_state(Idling) = {Marking}
672 // next_state(Marking) = {Precleaning, Sweeping}
673 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
674 // next_state(AbortablePreclean) = {FinalMarking}
675 // next_state(FinalMarking) = {Sweeping}
676 // next_state(Sweeping) = {Resizing}
677 // next_state(Resizing) = {Resetting}
678 // next_state(Resetting) = {Idling}
679 // The numeric values below are chosen so that:
680 // . _collectorState <= Idling == post-sweep && pre-mark
681 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
682 // precleaning || abortablePrecleanb
683 public:
684 enum CollectorState {
685 Resizing = 0,
686 Resetting = 1,
687 Idling = 2,
688 InitialMarking = 3,
689 Marking = 4,
690 Precleaning = 5,
691 AbortablePreclean = 6,
692 FinalMarking = 7,
693 Sweeping = 8
694 };
695 protected:
696 static CollectorState _collectorState;
698 // State related to prologue/epilogue invocation for my generations
699 bool _between_prologue_and_epilogue;
701 // Signalling/State related to coordination between fore- and backgroud GC
702 // Note: When the baton has been passed from background GC to foreground GC,
703 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
704 static bool _foregroundGCIsActive; // true iff foreground collector is active or
705 // wants to go active
706 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
707 // yet passed the baton to the foreground GC
709 // Support for CMSScheduleRemark (abortable preclean)
710 bool _abort_preclean;
711 bool _start_sampling;
713 int _numYields;
714 size_t _numDirtyCards;
715 size_t _sweep_count;
716 // number of full gc's since the last concurrent gc.
717 uint _full_gcs_since_conc_gc;
719 // occupancy used for bootstrapping stats
720 double _bootstrap_occupancy;
722 // timer
723 elapsedTimer _timer;
725 // Timing, allocation and promotion statistics, used for scheduling.
726 CMSStats _stats;
728 // Allocation limits installed in the young gen, used only in
729 // CMSIncrementalMode. When an allocation in the young gen would cross one of
730 // these limits, the cms generation is notified and the cms thread is started
731 // or stopped, respectively.
732 HeapWord* _icms_start_limit;
733 HeapWord* _icms_stop_limit;
735 enum CMS_op_type {
736 CMS_op_checkpointRootsInitial,
737 CMS_op_checkpointRootsFinal
738 };
740 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
741 bool stop_world_and_do(CMS_op_type op);
743 OopTaskQueueSet* task_queues() { return _task_queues; }
744 int* hash_seed(int i) { return &_hash_seed[i]; }
745 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
747 // Support for parallelizing Eden rescan in CMS remark phase
748 void sample_eden(); // ... sample Eden space top
750 private:
751 // Support for parallelizing young gen rescan in CMS remark phase
752 Generation* _young_gen; // the younger gen
753 HeapWord** _top_addr; // ... Top of Eden
754 HeapWord** _end_addr; // ... End of Eden
755 Mutex* _eden_chunk_lock;
756 HeapWord** _eden_chunk_array; // ... Eden partitioning array
757 size_t _eden_chunk_index; // ... top (exclusive) of array
758 size_t _eden_chunk_capacity; // ... max entries in array
760 // Support for parallelizing survivor space rescan
761 HeapWord** _survivor_chunk_array;
762 size_t _survivor_chunk_index;
763 size_t _survivor_chunk_capacity;
764 size_t* _cursor;
765 ChunkArray* _survivor_plab_array;
767 // Support for marking stack overflow handling
768 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
769 bool par_take_from_overflow_list(size_t num,
770 OopTaskQueue* to_work_q,
771 int no_of_gc_threads);
772 void push_on_overflow_list(oop p);
773 void par_push_on_overflow_list(oop p);
774 // the following is, obviously, not, in general, "MT-stable"
775 bool overflow_list_is_empty() const;
777 void preserve_mark_if_necessary(oop p);
778 void par_preserve_mark_if_necessary(oop p);
779 void preserve_mark_work(oop p, markOop m);
780 void restore_preserved_marks_if_any();
781 NOT_PRODUCT(bool no_preserved_marks() const;)
782 // in support of testing overflow code
783 NOT_PRODUCT(int _overflow_counter;)
784 NOT_PRODUCT(bool simulate_overflow();) // sequential
785 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
787 // CMS work methods
788 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
790 // a return value of false indicates failure due to stack overflow
791 bool markFromRootsWork(bool asynch); // concurrent marking work
793 public: // FIX ME!!! only for testing
794 bool do_marking_st(bool asynch); // single-threaded marking
795 bool do_marking_mt(bool asynch); // multi-threaded marking
797 private:
799 // concurrent precleaning work
800 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
801 ScanMarkedObjectsAgainCarefullyClosure* cl);
802 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
803 ScanMarkedObjectsAgainCarefullyClosure* cl);
804 // Does precleaning work, returning a quantity indicative of
805 // the amount of "useful work" done.
806 size_t preclean_work(bool clean_refs, bool clean_survivors);
807 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
808 void abortable_preclean(); // Preclean while looking for possible abort
809 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
810 // Helper function for above; merge-sorts the per-thread plab samples
811 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
812 // Resets (i.e. clears) the per-thread plab sample vectors
813 void reset_survivor_plab_arrays();
815 // final (second) checkpoint work
816 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
817 bool init_mark_was_synchronous);
818 // work routine for parallel version of remark
819 void do_remark_parallel();
820 // work routine for non-parallel version of remark
821 void do_remark_non_parallel();
822 // reference processing work routine (during second checkpoint)
823 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
825 // concurrent sweeping work
826 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
828 // (concurrent) resetting of support data structures
829 void reset(bool asynch);
831 // Clear _expansion_cause fields of constituent generations
832 void clear_expansion_cause();
834 // An auxilliary method used to record the ends of
835 // used regions of each generation to limit the extent of sweep
836 void save_sweep_limits();
838 // A work method used by foreground collection to determine
839 // what type of collection (compacting or not, continuing or fresh)
840 // it should do.
841 void decide_foreground_collection_type(bool clear_all_soft_refs,
842 bool* should_compact, bool* should_start_over);
844 // A work method used by the foreground collector to do
845 // a mark-sweep-compact.
846 void do_compaction_work(bool clear_all_soft_refs);
848 // A work method used by the foreground collector to do
849 // a mark-sweep, after taking over from a possibly on-going
850 // concurrent mark-sweep collection.
851 void do_mark_sweep_work(bool clear_all_soft_refs,
852 CollectorState first_state, bool should_start_over);
854 // Work methods for reporting concurrent mode interruption or failure
855 bool is_external_interruption();
856 void report_concurrent_mode_interruption();
858 // If the backgrould GC is active, acquire control from the background
859 // GC and do the collection.
860 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
862 // For synchronizing passing of control from background to foreground
863 // GC. waitForForegroundGC() is called by the background
864 // collector. It if had to wait for a foreground collection,
865 // it returns true and the background collection should assume
866 // that the collection was finished by the foreground
867 // collector.
868 bool waitForForegroundGC();
870 // Incremental mode triggering: recompute the icms duty cycle and set the
871 // allocation limits in the young gen.
872 void icms_update_allocation_limits();
874 size_t block_size_using_printezis_bits(HeapWord* addr) const;
875 size_t block_size_if_printezis_bits(HeapWord* addr) const;
876 HeapWord* next_card_start_after_block(HeapWord* addr) const;
878 void setup_cms_unloading_and_verification_state();
879 public:
880 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
881 CardTableRS* ct,
882 ConcurrentMarkSweepPolicy* cp);
883 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
885 ReferenceProcessor* ref_processor() { return _ref_processor; }
886 void ref_processor_init();
888 Mutex* bitMapLock() const { return _markBitMap.lock(); }
889 static CollectorState abstract_state() { return _collectorState; }
891 bool should_abort_preclean() const; // Whether preclean should be aborted.
892 size_t get_eden_used() const;
893 size_t get_eden_capacity() const;
895 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
897 // locking checks
898 NOT_PRODUCT(static bool have_cms_token();)
900 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
901 bool shouldConcurrentCollect();
903 void collect(bool full,
904 bool clear_all_soft_refs,
905 size_t size,
906 bool tlab);
907 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
908 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
910 // In support of ExplicitGCInvokesConcurrent
911 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
912 // Should we unload classes in a particular concurrent cycle?
913 bool should_unload_classes() const {
914 return _should_unload_classes;
915 }
916 void update_should_unload_classes();
918 void direct_allocated(HeapWord* start, size_t size);
920 // Object is dead if not marked and current phase is sweeping.
921 bool is_dead_obj(oop obj) const;
923 // After a promotion (of "start"), do any necessary marking.
924 // If "par", then it's being done by a parallel GC thread.
925 // The last two args indicate if we need precise marking
926 // and if so the size of the object so it can be dirtied
927 // in its entirety.
928 void promoted(bool par, HeapWord* start,
929 bool is_obj_array, size_t obj_size);
931 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
932 size_t word_size);
934 void getFreelistLocks() const;
935 void releaseFreelistLocks() const;
936 bool haveFreelistLocks() const;
938 // Adjust size of underlying generation
939 void compute_new_size();
941 // GC prologue and epilogue
942 void gc_prologue(bool full);
943 void gc_epilogue(bool full);
945 jlong time_of_last_gc(jlong now) {
946 if (_collectorState <= Idling) {
947 // gc not in progress
948 return _time_of_last_gc;
949 } else {
950 // collection in progress
951 return now;
952 }
953 }
955 // Support for parallel remark of survivor space
956 void* get_data_recorder(int thr_num);
957 void sample_eden_chunk();
959 CMSBitMap* markBitMap() { return &_markBitMap; }
960 void directAllocated(HeapWord* start, size_t size);
962 // main CMS steps and related support
963 void checkpointRootsInitial(bool asynch);
964 bool markFromRoots(bool asynch); // a return value of false indicates failure
965 // due to stack overflow
966 void preclean();
967 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
968 bool init_mark_was_synchronous);
969 void sweep(bool asynch);
971 // Check that the currently executing thread is the expected
972 // one (foreground collector or background collector).
973 static void check_correct_thread_executing() PRODUCT_RETURN;
974 // XXXPERM void print_statistics() PRODUCT_RETURN;
976 bool is_cms_reachable(HeapWord* addr);
978 // Performance Counter Support
979 CollectorCounters* counters() { return _gc_counters; }
981 // timer stuff
982 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
983 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
984 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
985 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
987 int yields() { return _numYields; }
988 void resetYields() { _numYields = 0; }
989 void incrementYields() { _numYields++; }
990 void resetNumDirtyCards() { _numDirtyCards = 0; }
991 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
992 size_t numDirtyCards() { return _numDirtyCards; }
994 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
995 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
996 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
997 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
998 size_t sweep_count() const { return _sweep_count; }
999 void increment_sweep_count() { _sweep_count++; }
1001 // Timers/stats for gc scheduling and incremental mode pacing.
1002 CMSStats& stats() { return _stats; }
1004 // Convenience methods that check whether CMSIncrementalMode is enabled and
1005 // forward to the corresponding methods in ConcurrentMarkSweepThread.
1006 static void start_icms();
1007 static void stop_icms(); // Called at the end of the cms cycle.
1008 static void disable_icms(); // Called before a foreground collection.
1009 static void enable_icms(); // Called after a foreground collection.
1010 void icms_wait(); // Called at yield points.
1012 // Adaptive size policy
1013 CMSAdaptiveSizePolicy* size_policy();
1014 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1016 static void print_on_error(outputStream* st);
1018 // debugging
1019 void verify();
1020 bool verify_after_remark(bool silent = VerifySilently);
1021 void verify_ok_to_terminate() const PRODUCT_RETURN;
1022 void verify_work_stacks_empty() const PRODUCT_RETURN;
1023 void verify_overflow_empty() const PRODUCT_RETURN;
1025 // convenience methods in support of debugging
1026 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1027 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1029 // accessors
1030 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1031 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1033 // Initialization errors
1034 bool completed_initialization() { return _completed_initialization; }
1036 void print_eden_and_survivor_chunk_arrays();
1037 };
1039 class CMSExpansionCause : public AllStatic {
1040 public:
1041 enum Cause {
1042 _no_expansion,
1043 _satisfy_free_ratio,
1044 _satisfy_promotion,
1045 _satisfy_allocation,
1046 _allocate_par_lab,
1047 _allocate_par_spooling_space,
1048 _adaptive_size_policy
1049 };
1050 // Return a string describing the cause of the expansion.
1051 static const char* to_string(CMSExpansionCause::Cause cause);
1052 };
1054 class ConcurrentMarkSweepGeneration: public CardGeneration {
1055 friend class VMStructs;
1056 friend class ConcurrentMarkSweepThread;
1057 friend class ConcurrentMarkSweep;
1058 friend class CMSCollector;
1059 protected:
1060 static CMSCollector* _collector; // the collector that collects us
1061 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
1063 // Performance Counters
1064 GenerationCounters* _gen_counters;
1065 GSpaceCounters* _space_counters;
1067 // Words directly allocated, used by CMSStats.
1068 size_t _direct_allocated_words;
1070 // Non-product stat counters
1071 NOT_PRODUCT(
1072 size_t _numObjectsPromoted;
1073 size_t _numWordsPromoted;
1074 size_t _numObjectsAllocated;
1075 size_t _numWordsAllocated;
1076 )
1078 // Used for sizing decisions
1079 bool _incremental_collection_failed;
1080 bool incremental_collection_failed() {
1081 return _incremental_collection_failed;
1082 }
1083 void set_incremental_collection_failed() {
1084 _incremental_collection_failed = true;
1085 }
1086 void clear_incremental_collection_failed() {
1087 _incremental_collection_failed = false;
1088 }
1090 // accessors
1091 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1092 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1094 private:
1095 // For parallel young-gen GC support.
1096 CMSParGCThreadState** _par_gc_thread_states;
1098 // Reason generation was expanded
1099 CMSExpansionCause::Cause _expansion_cause;
1101 // In support of MinChunkSize being larger than min object size
1102 const double _dilatation_factor;
1104 enum CollectionTypes {
1105 Concurrent_collection_type = 0,
1106 MS_foreground_collection_type = 1,
1107 MSC_foreground_collection_type = 2,
1108 Unknown_collection_type = 3
1109 };
1111 CollectionTypes _debug_collection_type;
1113 // True if a compactiing collection was done.
1114 bool _did_compact;
1115 bool did_compact() { return _did_compact; }
1117 // Fraction of current occupancy at which to start a CMS collection which
1118 // will collect this generation (at least).
1119 double _initiating_occupancy;
1121 protected:
1122 // Shrink generation by specified size (returns false if unable to shrink)
1123 void shrink_free_list_by(size_t bytes);
1125 // Update statistics for GC
1126 virtual void update_gc_stats(int level, bool full);
1128 // Maximum available space in the generation (including uncommitted)
1129 // space.
1130 size_t max_available() const;
1132 // getter and initializer for _initiating_occupancy field.
1133 double initiating_occupancy() const { return _initiating_occupancy; }
1134 void init_initiating_occupancy(intx io, uintx tr);
1136 public:
1137 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1138 int level, CardTableRS* ct,
1139 bool use_adaptive_freelists,
1140 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
1142 // Accessors
1143 CMSCollector* collector() const { return _collector; }
1144 static void set_collector(CMSCollector* collector) {
1145 assert(_collector == NULL, "already set");
1146 _collector = collector;
1147 }
1148 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1150 Mutex* freelistLock() const;
1152 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1154 // Adaptive size policy
1155 CMSAdaptiveSizePolicy* size_policy();
1157 void set_did_compact(bool v) { _did_compact = v; }
1159 bool refs_discovery_is_atomic() const { return false; }
1160 bool refs_discovery_is_mt() const {
1161 // Note: CMS does MT-discovery during the parallel-remark
1162 // phases. Use ReferenceProcessorMTMutator to make refs
1163 // discovery MT-safe during such phases or other parallel
1164 // discovery phases in the future. This may all go away
1165 // if/when we decide that refs discovery is sufficiently
1166 // rare that the cost of the CAS's involved is in the
1167 // noise. That's a measurement that should be done, and
1168 // the code simplified if that turns out to be the case.
1169 return ConcGCThreads > 1;
1170 }
1172 // Override
1173 virtual void ref_processor_init();
1175 // Grow generation by specified size (returns false if unable to grow)
1176 bool grow_by(size_t bytes);
1177 // Grow generation to reserved size.
1178 bool grow_to_reserved();
1180 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1182 // Space enquiries
1183 size_t capacity() const;
1184 size_t used() const;
1185 size_t free() const;
1186 double occupancy() const { return ((double)used())/((double)capacity()); }
1187 size_t contiguous_available() const;
1188 size_t unsafe_max_alloc_nogc() const;
1190 // over-rides
1191 MemRegion used_region() const;
1192 MemRegion used_region_at_save_marks() const;
1194 // Does a "full" (forced) collection invoked on this generation collect
1195 // all younger generations as well? Note that the second conjunct is a
1196 // hack to allow the collection of the younger gen first if the flag is
1197 // set. This is better than using th policy's should_collect_gen0_first()
1198 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1199 virtual bool full_collects_younger_generations() const {
1200 return UseCMSCompactAtFullCollection && !CollectGen0First;
1201 }
1203 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1205 // Support for compaction
1206 CompactibleSpace* first_compaction_space() const;
1207 // Adjust quantites in the generation affected by
1208 // the compaction.
1209 void reset_after_compaction();
1211 // Allocation support
1212 HeapWord* allocate(size_t size, bool tlab);
1213 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1214 oop promote(oop obj, size_t obj_size);
1215 HeapWord* par_allocate(size_t size, bool tlab) {
1216 return allocate(size, tlab);
1217 }
1219 // Incremental mode triggering.
1220 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1221 size_t word_size);
1223 // Used by CMSStats to track direct allocation. The value is sampled and
1224 // reset after each young gen collection.
1225 size_t direct_allocated_words() const { return _direct_allocated_words; }
1226 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1228 // Overrides for parallel promotion.
1229 virtual oop par_promote(int thread_num,
1230 oop obj, markOop m, size_t word_sz);
1231 // This one should not be called for CMS.
1232 virtual void par_promote_alloc_undo(int thread_num,
1233 HeapWord* obj, size_t word_sz);
1234 virtual void par_promote_alloc_done(int thread_num);
1235 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1237 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1239 // Inform this (non-young) generation that a promotion failure was
1240 // encountered during a collection of a younger generation that
1241 // promotes into this generation.
1242 virtual void promotion_failure_occurred();
1244 bool should_collect(bool full, size_t size, bool tlab);
1245 virtual bool should_concurrent_collect() const;
1246 virtual bool is_too_full() const;
1247 void collect(bool full,
1248 bool clear_all_soft_refs,
1249 size_t size,
1250 bool tlab);
1252 HeapWord* expand_and_allocate(size_t word_size,
1253 bool tlab,
1254 bool parallel = false);
1256 // GC prologue and epilogue
1257 void gc_prologue(bool full);
1258 void gc_prologue_work(bool full, bool registerClosure,
1259 ModUnionClosure* modUnionClosure);
1260 void gc_epilogue(bool full);
1261 void gc_epilogue_work(bool full);
1263 // Time since last GC of this generation
1264 jlong time_of_last_gc(jlong now) {
1265 return collector()->time_of_last_gc(now);
1266 }
1267 void update_time_of_last_gc(jlong now) {
1268 collector()-> update_time_of_last_gc(now);
1269 }
1271 // Allocation failure
1272 void expand(size_t bytes, size_t expand_bytes,
1273 CMSExpansionCause::Cause cause);
1274 virtual bool expand(size_t bytes, size_t expand_bytes);
1275 void shrink(size_t bytes);
1276 void shrink_by(size_t bytes);
1277 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1278 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1280 // Iteration support and related enquiries
1281 void save_marks();
1282 bool no_allocs_since_save_marks();
1283 void younger_refs_iterate(OopsInGenClosure* cl);
1285 // Iteration support specific to CMS generations
1286 void save_sweep_limit();
1288 // More iteration support
1289 virtual void oop_iterate(ExtendedOopClosure* cl);
1290 virtual void safe_object_iterate(ObjectClosure* cl);
1291 virtual void object_iterate(ObjectClosure* cl);
1293 // Need to declare the full complement of closures, whether we'll
1294 // override them or not, or get message from the compiler:
1295 // oop_since_save_marks_iterate_nv hides virtual function...
1296 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1297 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1298 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1300 // Smart allocation XXX -- move to CFLSpace?
1301 void setNearLargestChunk();
1302 bool isNearLargestChunk(HeapWord* addr);
1304 // Get the chunk at the end of the space. Delagates to
1305 // the space.
1306 FreeChunk* find_chunk_at_end();
1308 void post_compact();
1310 // Debugging
1311 void prepare_for_verify();
1312 void verify();
1313 void print_statistics() PRODUCT_RETURN;
1315 // Performance Counters support
1316 virtual void update_counters();
1317 virtual void update_counters(size_t used);
1318 void initialize_performance_counters();
1319 CollectorCounters* counters() { return collector()->counters(); }
1321 // Support for parallel remark of survivor space
1322 void* get_data_recorder(int thr_num) {
1323 //Delegate to collector
1324 return collector()->get_data_recorder(thr_num);
1325 }
1326 void sample_eden_chunk() {
1327 //Delegate to collector
1328 return collector()->sample_eden_chunk();
1329 }
1331 // Printing
1332 const char* name() const;
1333 virtual const char* short_name() const { return "CMS"; }
1334 void print() const;
1335 void printOccupancy(const char* s);
1336 bool must_be_youngest() const { return false; }
1337 bool must_be_oldest() const { return true; }
1339 // Resize the generation after a compacting GC. The
1340 // generation can be treated as a contiguous space
1341 // after the compaction.
1342 virtual void compute_new_size();
1343 // Resize the generation after a non-compacting
1344 // collection.
1345 void compute_new_size_free_list();
1347 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1348 void rotate_debug_collection_type();
1349 };
1351 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1353 // Return the size policy from the heap's collector
1354 // policy casted to CMSAdaptiveSizePolicy*.
1355 CMSAdaptiveSizePolicy* cms_size_policy() const;
1357 // Resize the generation based on the adaptive size
1358 // policy.
1359 void resize(size_t cur_promo, size_t desired_promo);
1361 // Return the GC counters from the collector policy
1362 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1364 virtual void shrink_by(size_t bytes);
1366 public:
1367 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1368 int level, CardTableRS* ct,
1369 bool use_adaptive_freelists,
1370 FreeBlockDictionary<FreeChunk>::DictionaryChoice
1371 dictionaryChoice) :
1372 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1373 use_adaptive_freelists, dictionaryChoice) {}
1375 virtual const char* short_name() const { return "ASCMS"; }
1376 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1378 virtual void update_counters();
1379 virtual void update_counters(size_t used);
1380 };
1382 //
1383 // Closures of various sorts used by CMS to accomplish its work
1384 //
1386 // This closure is used to do concurrent marking from the roots
1387 // following the first checkpoint.
1388 class MarkFromRootsClosure: public BitMapClosure {
1389 CMSCollector* _collector;
1390 MemRegion _span;
1391 CMSBitMap* _bitMap;
1392 CMSBitMap* _mut;
1393 CMSMarkStack* _markStack;
1394 bool _yield;
1395 int _skipBits;
1396 HeapWord* _finger;
1397 HeapWord* _threshold;
1398 DEBUG_ONLY(bool _verifying;)
1400 public:
1401 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1402 CMSBitMap* bitMap,
1403 CMSMarkStack* markStack,
1404 bool should_yield, bool verifying = false);
1405 bool do_bit(size_t offset);
1406 void reset(HeapWord* addr);
1407 inline void do_yield_check();
1409 private:
1410 void scanOopsInOop(HeapWord* ptr);
1411 void do_yield_work();
1412 };
1414 // This closure is used to do concurrent multi-threaded
1415 // marking from the roots following the first checkpoint.
1416 // XXX This should really be a subclass of The serial version
1417 // above, but i have not had the time to refactor things cleanly.
1418 // That willbe done for Dolphin.
1419 class Par_MarkFromRootsClosure: public BitMapClosure {
1420 CMSCollector* _collector;
1421 MemRegion _whole_span;
1422 MemRegion _span;
1423 CMSBitMap* _bit_map;
1424 CMSBitMap* _mut;
1425 OopTaskQueue* _work_queue;
1426 CMSMarkStack* _overflow_stack;
1427 bool _yield;
1428 int _skip_bits;
1429 HeapWord* _finger;
1430 HeapWord* _threshold;
1431 CMSConcMarkingTask* _task;
1432 public:
1433 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1434 MemRegion span,
1435 CMSBitMap* bit_map,
1436 OopTaskQueue* work_queue,
1437 CMSMarkStack* overflow_stack,
1438 bool should_yield);
1439 bool do_bit(size_t offset);
1440 inline void do_yield_check();
1442 private:
1443 void scan_oops_in_oop(HeapWord* ptr);
1444 void do_yield_work();
1445 bool get_work_from_overflow_stack();
1446 };
1448 // The following closures are used to do certain kinds of verification of
1449 // CMS marking.
1450 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
1451 CMSCollector* _collector;
1452 MemRegion _span;
1453 CMSBitMap* _verification_bm;
1454 CMSBitMap* _cms_bm;
1455 CMSMarkStack* _mark_stack;
1456 protected:
1457 void do_oop(oop p);
1458 template <class T> inline void do_oop_work(T *p) {
1459 oop obj = oopDesc::load_decode_heap_oop(p);
1460 do_oop(obj);
1461 }
1462 public:
1463 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1464 MemRegion span,
1465 CMSBitMap* verification_bm,
1466 CMSBitMap* cms_bm,
1467 CMSMarkStack* mark_stack);
1468 void do_oop(oop* p);
1469 void do_oop(narrowOop* p);
1471 // Deal with a stack overflow condition
1472 void handle_stack_overflow(HeapWord* lost);
1473 };
1475 class MarkFromRootsVerifyClosure: public BitMapClosure {
1476 CMSCollector* _collector;
1477 MemRegion _span;
1478 CMSBitMap* _verification_bm;
1479 CMSBitMap* _cms_bm;
1480 CMSMarkStack* _mark_stack;
1481 HeapWord* _finger;
1482 PushAndMarkVerifyClosure _pam_verify_closure;
1483 public:
1484 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1485 CMSBitMap* verification_bm,
1486 CMSBitMap* cms_bm,
1487 CMSMarkStack* mark_stack);
1488 bool do_bit(size_t offset);
1489 void reset(HeapWord* addr);
1490 };
1493 // This closure is used to check that a certain set of bits is
1494 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1495 class FalseBitMapClosure: public BitMapClosure {
1496 public:
1497 bool do_bit(size_t offset) {
1498 guarantee(false, "Should not have a 1 bit");
1499 return true;
1500 }
1501 };
1503 // A version of ObjectClosure with "memory" (see _previous_address below)
1504 class UpwardsObjectClosure: public BoolObjectClosure {
1505 HeapWord* _previous_address;
1506 public:
1507 UpwardsObjectClosure() : _previous_address(NULL) { }
1508 void set_previous(HeapWord* addr) { _previous_address = addr; }
1509 HeapWord* previous() { return _previous_address; }
1510 // A return value of "true" can be used by the caller to decide
1511 // if this object's end should *NOT* be recorded in
1512 // _previous_address above.
1513 virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
1514 };
1516 // This closure is used during the second checkpointing phase
1517 // to rescan the marked objects on the dirty cards in the mod
1518 // union table and the card table proper. It's invoked via
1519 // MarkFromDirtyCardsClosure below. It uses either
1520 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1521 // declared in genOopClosures.hpp to accomplish some of its work.
1522 // In the parallel case the bitMap is shared, so access to
1523 // it needs to be suitably synchronized for updates by embedded
1524 // closures that update it; however, this closure itself only
1525 // reads the bit_map and because it is idempotent, is immune to
1526 // reading stale values.
1527 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1528 #ifdef ASSERT
1529 CMSCollector* _collector;
1530 MemRegion _span;
1531 union {
1532 CMSMarkStack* _mark_stack;
1533 OopTaskQueue* _work_queue;
1534 };
1535 #endif // ASSERT
1536 bool _parallel;
1537 CMSBitMap* _bit_map;
1538 union {
1539 MarkRefsIntoAndScanClosure* _scan_closure;
1540 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1541 };
1543 public:
1544 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1545 MemRegion span,
1546 ReferenceProcessor* rp,
1547 CMSBitMap* bit_map,
1548 CMSMarkStack* mark_stack,
1549 MarkRefsIntoAndScanClosure* cl):
1550 #ifdef ASSERT
1551 _collector(collector),
1552 _span(span),
1553 _mark_stack(mark_stack),
1554 #endif // ASSERT
1555 _parallel(false),
1556 _bit_map(bit_map),
1557 _scan_closure(cl) { }
1559 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1560 MemRegion span,
1561 ReferenceProcessor* rp,
1562 CMSBitMap* bit_map,
1563 OopTaskQueue* work_queue,
1564 Par_MarkRefsIntoAndScanClosure* cl):
1565 #ifdef ASSERT
1566 _collector(collector),
1567 _span(span),
1568 _work_queue(work_queue),
1569 #endif // ASSERT
1570 _parallel(true),
1571 _bit_map(bit_map),
1572 _par_scan_closure(cl) { }
1574 bool do_object_b(oop obj) {
1575 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1576 return false;
1577 }
1578 bool do_object_bm(oop p, MemRegion mr);
1579 };
1581 // This closure is used during the second checkpointing phase
1582 // to rescan the marked objects on the dirty cards in the mod
1583 // union table and the card table proper. It invokes
1584 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1585 // In the parallel case, the bit map is shared and requires
1586 // synchronized access.
1587 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1588 CompactibleFreeListSpace* _space;
1589 ScanMarkedObjectsAgainClosure _scan_cl;
1590 size_t _num_dirty_cards;
1592 public:
1593 MarkFromDirtyCardsClosure(CMSCollector* collector,
1594 MemRegion span,
1595 CompactibleFreeListSpace* space,
1596 CMSBitMap* bit_map,
1597 CMSMarkStack* mark_stack,
1598 MarkRefsIntoAndScanClosure* cl):
1599 _space(space),
1600 _num_dirty_cards(0),
1601 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1602 mark_stack, cl) { }
1604 MarkFromDirtyCardsClosure(CMSCollector* collector,
1605 MemRegion span,
1606 CompactibleFreeListSpace* space,
1607 CMSBitMap* bit_map,
1608 OopTaskQueue* work_queue,
1609 Par_MarkRefsIntoAndScanClosure* cl):
1610 _space(space),
1611 _num_dirty_cards(0),
1612 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1613 work_queue, cl) { }
1615 void do_MemRegion(MemRegion mr);
1616 void set_space(CompactibleFreeListSpace* space) { _space = space; }
1617 size_t num_dirty_cards() { return _num_dirty_cards; }
1618 };
1620 // This closure is used in the non-product build to check
1621 // that there are no MemRegions with a certain property.
1622 class FalseMemRegionClosure: public MemRegionClosure {
1623 void do_MemRegion(MemRegion mr) {
1624 guarantee(!mr.is_empty(), "Shouldn't be empty");
1625 guarantee(false, "Should never be here");
1626 }
1627 };
1629 // This closure is used during the precleaning phase
1630 // to "carefully" rescan marked objects on dirty cards.
1631 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1632 // to accomplish some of its work.
1633 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1634 CMSCollector* _collector;
1635 MemRegion _span;
1636 bool _yield;
1637 Mutex* _freelistLock;
1638 CMSBitMap* _bitMap;
1639 CMSMarkStack* _markStack;
1640 MarkRefsIntoAndScanClosure* _scanningClosure;
1642 public:
1643 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1644 MemRegion span,
1645 CMSBitMap* bitMap,
1646 CMSMarkStack* markStack,
1647 MarkRefsIntoAndScanClosure* cl,
1648 bool should_yield):
1649 _collector(collector),
1650 _span(span),
1651 _yield(should_yield),
1652 _bitMap(bitMap),
1653 _markStack(markStack),
1654 _scanningClosure(cl) {
1655 }
1657 void do_object(oop p) {
1658 guarantee(false, "call do_object_careful instead");
1659 }
1661 size_t do_object_careful(oop p) {
1662 guarantee(false, "Unexpected caller");
1663 return 0;
1664 }
1666 size_t do_object_careful_m(oop p, MemRegion mr);
1668 void setFreelistLock(Mutex* m) {
1669 _freelistLock = m;
1670 _scanningClosure->set_freelistLock(m);
1671 }
1673 private:
1674 inline bool do_yield_check();
1676 void do_yield_work();
1677 };
1679 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1680 CMSCollector* _collector;
1681 MemRegion _span;
1682 bool _yield;
1683 CMSBitMap* _bit_map;
1684 CMSMarkStack* _mark_stack;
1685 PushAndMarkClosure* _scanning_closure;
1686 unsigned int _before_count;
1688 public:
1689 SurvivorSpacePrecleanClosure(CMSCollector* collector,
1690 MemRegion span,
1691 CMSBitMap* bit_map,
1692 CMSMarkStack* mark_stack,
1693 PushAndMarkClosure* cl,
1694 unsigned int before_count,
1695 bool should_yield):
1696 _collector(collector),
1697 _span(span),
1698 _yield(should_yield),
1699 _bit_map(bit_map),
1700 _mark_stack(mark_stack),
1701 _scanning_closure(cl),
1702 _before_count(before_count)
1703 { }
1705 void do_object(oop p) {
1706 guarantee(false, "call do_object_careful instead");
1707 }
1709 size_t do_object_careful(oop p);
1711 size_t do_object_careful_m(oop p, MemRegion mr) {
1712 guarantee(false, "Unexpected caller");
1713 return 0;
1714 }
1716 private:
1717 inline void do_yield_check();
1718 void do_yield_work();
1719 };
1721 // This closure is used to accomplish the sweeping work
1722 // after the second checkpoint but before the concurrent reset
1723 // phase.
1724 //
1725 // Terminology
1726 // left hand chunk (LHC) - block of one or more chunks currently being
1727 // coalesced. The LHC is available for coalescing with a new chunk.
1728 // right hand chunk (RHC) - block that is currently being swept that is
1729 // free or garbage that can be coalesced with the LHC.
1730 // _inFreeRange is true if there is currently a LHC
1731 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1732 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1733 // _freeFinger is the address of the current LHC
1734 class SweepClosure: public BlkClosureCareful {
1735 CMSCollector* _collector; // collector doing the work
1736 ConcurrentMarkSweepGeneration* _g; // Generation being swept
1737 CompactibleFreeListSpace* _sp; // Space being swept
1738 HeapWord* _limit;// the address at or above which the sweep should stop
1739 // because we do not expect newly garbage blocks
1740 // eligible for sweeping past that address.
1741 Mutex* _freelistLock; // Free list lock (in space)
1742 CMSBitMap* _bitMap; // Marking bit map (in
1743 // generation)
1744 bool _inFreeRange; // Indicates if we are in the
1745 // midst of a free run
1746 bool _freeRangeInFreeLists;
1747 // Often, we have just found
1748 // a free chunk and started
1749 // a new free range; we do not
1750 // eagerly remove this chunk from
1751 // the free lists unless there is
1752 // a possibility of coalescing.
1753 // When true, this flag indicates
1754 // that the _freeFinger below
1755 // points to a potentially free chunk
1756 // that may still be in the free lists
1757 bool _lastFreeRangeCoalesced;
1758 // free range contains chunks
1759 // coalesced
1760 bool _yield;
1761 // Whether sweeping should be
1762 // done with yields. For instance
1763 // when done by the foreground
1764 // collector we shouldn't yield.
1765 HeapWord* _freeFinger; // When _inFreeRange is set, the
1766 // pointer to the "left hand
1767 // chunk"
1768 size_t _freeRangeSize;
1769 // When _inFreeRange is set, this
1770 // indicates the accumulated size
1771 // of the "left hand chunk"
1772 NOT_PRODUCT(
1773 size_t _numObjectsFreed;
1774 size_t _numWordsFreed;
1775 size_t _numObjectsLive;
1776 size_t _numWordsLive;
1777 size_t _numObjectsAlreadyFree;
1778 size_t _numWordsAlreadyFree;
1779 FreeChunk* _last_fc;
1780 )
1781 private:
1782 // Code that is common to a free chunk or garbage when
1783 // encountered during sweeping.
1784 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1785 // Process a free chunk during sweeping.
1786 void do_already_free_chunk(FreeChunk *fc);
1787 // Work method called when processing an already free or a
1788 // freshly garbage chunk to do a lookahead and possibly a
1789 // premptive flush if crossing over _limit.
1790 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1791 // Process a garbage chunk during sweeping.
1792 size_t do_garbage_chunk(FreeChunk *fc);
1793 // Process a live chunk during sweeping.
1794 size_t do_live_chunk(FreeChunk* fc);
1796 // Accessors.
1797 HeapWord* freeFinger() const { return _freeFinger; }
1798 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1799 bool inFreeRange() const { return _inFreeRange; }
1800 void set_inFreeRange(bool v) { _inFreeRange = v; }
1801 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1802 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1803 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1804 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1806 // Initialize a free range.
1807 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1808 // Return this chunk to the free lists.
1809 void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1811 // Check if we should yield and do so when necessary.
1812 inline void do_yield_check(HeapWord* addr);
1814 // Yield
1815 void do_yield_work(HeapWord* addr);
1817 // Debugging/Printing
1818 void print_free_block_coalesced(FreeChunk* fc) const;
1820 public:
1821 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1822 CMSBitMap* bitMap, bool should_yield);
1823 ~SweepClosure() PRODUCT_RETURN;
1825 size_t do_blk_careful(HeapWord* addr);
1826 void print() const { print_on(tty); }
1827 void print_on(outputStream *st) const;
1828 };
1830 // Closures related to weak references processing
1832 // During CMS' weak reference processing, this is a
1833 // work-routine/closure used to complete transitive
1834 // marking of objects as live after a certain point
1835 // in which an initial set has been completely accumulated.
1836 // This closure is currently used both during the final
1837 // remark stop-world phase, as well as during the concurrent
1838 // precleaning of the discovered reference lists.
1839 class CMSDrainMarkingStackClosure: public VoidClosure {
1840 CMSCollector* _collector;
1841 MemRegion _span;
1842 CMSMarkStack* _mark_stack;
1843 CMSBitMap* _bit_map;
1844 CMSKeepAliveClosure* _keep_alive;
1845 bool _concurrent_precleaning;
1846 public:
1847 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1848 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1849 CMSKeepAliveClosure* keep_alive,
1850 bool cpc):
1851 _collector(collector),
1852 _span(span),
1853 _bit_map(bit_map),
1854 _mark_stack(mark_stack),
1855 _keep_alive(keep_alive),
1856 _concurrent_precleaning(cpc) {
1857 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1858 "Mismatch");
1859 }
1861 void do_void();
1862 };
1864 // A parallel version of CMSDrainMarkingStackClosure above.
1865 class CMSParDrainMarkingStackClosure: public VoidClosure {
1866 CMSCollector* _collector;
1867 MemRegion _span;
1868 OopTaskQueue* _work_queue;
1869 CMSBitMap* _bit_map;
1870 CMSInnerParMarkAndPushClosure _mark_and_push;
1872 public:
1873 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1874 MemRegion span, CMSBitMap* bit_map,
1875 OopTaskQueue* work_queue):
1876 _collector(collector),
1877 _span(span),
1878 _bit_map(bit_map),
1879 _work_queue(work_queue),
1880 _mark_and_push(collector, span, bit_map, work_queue) { }
1882 public:
1883 void trim_queue(uint max);
1884 void do_void();
1885 };
1887 // Allow yielding or short-circuiting of reference list
1888 // prelceaning work.
1889 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1890 CMSCollector* _collector;
1891 void do_yield_work();
1892 public:
1893 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1894 _collector(collector) {}
1895 virtual bool should_return();
1896 };
1899 // Convenience class that locks free list locks for given CMS collector
1900 class FreelistLocker: public StackObj {
1901 private:
1902 CMSCollector* _collector;
1903 public:
1904 FreelistLocker(CMSCollector* collector):
1905 _collector(collector) {
1906 _collector->getFreelistLocks();
1907 }
1909 ~FreelistLocker() {
1910 _collector->releaseFreelistLocks();
1911 }
1912 };
1914 // Mark all dead objects in a given space.
1915 class MarkDeadObjectsClosure: public BlkClosure {
1916 const CMSCollector* _collector;
1917 const CompactibleFreeListSpace* _sp;
1918 CMSBitMap* _live_bit_map;
1919 CMSBitMap* _dead_bit_map;
1920 public:
1921 MarkDeadObjectsClosure(const CMSCollector* collector,
1922 const CompactibleFreeListSpace* sp,
1923 CMSBitMap *live_bit_map,
1924 CMSBitMap *dead_bit_map) :
1925 _collector(collector),
1926 _sp(sp),
1927 _live_bit_map(live_bit_map),
1928 _dead_bit_map(dead_bit_map) {}
1929 size_t do_blk(HeapWord* addr);
1930 };
1932 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1934 public:
1935 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
1936 };
1939 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP