Wed, 03 Mar 2010 08:10:41 -0800
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
Summary: Calculation of the slicing of survivor spaces for MT was incorrect.
Reviewed-by: ysr
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(UseCompressedOops ||
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
195 "Offset of FreeChunk::_prev within FreeChunk must match"
196 " that of OopDesc::_klass within OopDesc");
197 )
198 if (ParallelGCThreads > 0) {
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
200 _par_gc_thread_states =
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
202 if (_par_gc_thread_states == NULL) {
203 vm_exit_during_initialization("Could not allocate par gc structs");
204 }
205 for (uint i = 0; i < ParallelGCThreads; i++) {
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
207 if (_par_gc_thread_states[i] == NULL) {
208 vm_exit_during_initialization("Could not allocate par gc structs");
209 }
210 }
211 } else {
212 _par_gc_thread_states = NULL;
213 }
214 _incremental_collection_failed = false;
215 // The "dilatation_factor" is the expansion that can occur on
216 // account of the fact that the minimum object size in the CMS
217 // generation may be larger than that in, say, a contiguous young
218 // generation.
219 // Ideally, in the calculation below, we'd compute the dilatation
220 // factor as: MinChunkSize/(promoting_gen's min object size)
221 // Since we do not have such a general query interface for the
222 // promoting generation, we'll instead just use the mimimum
223 // object size (which today is a header's worth of space);
224 // note that all arithmetic is in units of HeapWords.
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
226 assert(_dilatation_factor >= 1.0, "from previous assert");
227 }
230 // The field "_initiating_occupancy" represents the occupancy percentage
231 // at which we trigger a new collection cycle. Unless explicitly specified
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
233 // is calculated by:
234 //
235 // Let "f" be MinHeapFreeRatio in
236 //
237 // _intiating_occupancy = 100-f +
238 // f * (CMSTrigger[Perm]Ratio/100)
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
240 //
241 // That is, if we assume the heap is at its desired maximum occupancy at the
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
243 // space be allocated before initiating a new collection cycle.
244 //
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
247 if (io >= 0) {
248 _initiating_occupancy = (double)io / 100.0;
249 } else {
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0;
253 }
254 }
256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
257 assert(collector() != NULL, "no collector");
258 collector()->ref_processor_init();
259 }
261 void CMSCollector::ref_processor_init() {
262 if (_ref_processor == NULL) {
263 // Allocate and initialize a reference processor
264 _ref_processor = ReferenceProcessor::create_ref_processor(
265 _span, // span
266 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
267 _cmsGen->refs_discovery_is_mt(), // mt_discovery
268 &_is_alive_closure,
269 ParallelGCThreads,
270 ParallelRefProcEnabled);
271 // Initialize the _ref_processor field of CMSGen
272 _cmsGen->set_ref_processor(_ref_processor);
274 // Allocate a dummy ref processor for perm gen.
275 ReferenceProcessor* rp2 = new ReferenceProcessor();
276 if (rp2 == NULL) {
277 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
278 }
279 _permGen->set_ref_processor(rp2);
280 }
281 }
283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
284 GenCollectedHeap* gch = GenCollectedHeap::heap();
285 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
286 "Wrong type of heap");
287 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
288 gch->gen_policy()->size_policy();
289 assert(sp->is_gc_cms_adaptive_size_policy(),
290 "Wrong type of size policy");
291 return sp;
292 }
294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
295 CMSGCAdaptivePolicyCounters* results =
296 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
297 assert(
298 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
299 "Wrong gc policy counter kind");
300 return results;
301 }
304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
306 const char* gen_name = "old";
308 // Generation Counters - generation 1, 1 subspace
309 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
311 _space_counters = new GSpaceCounters(gen_name, 0,
312 _virtual_space.reserved_size(),
313 this, _gen_counters);
314 }
316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317 _cms_gen(cms_gen)
318 {
319 assert(alpha <= 100, "bad value");
320 _saved_alpha = alpha;
322 // Initialize the alphas to the bootstrap value of 100.
323 _gc0_alpha = _cms_alpha = 100;
325 _cms_begin_time.update();
326 _cms_end_time.update();
328 _gc0_duration = 0.0;
329 _gc0_period = 0.0;
330 _gc0_promoted = 0;
332 _cms_duration = 0.0;
333 _cms_period = 0.0;
334 _cms_allocated = 0;
336 _cms_used_at_gc0_begin = 0;
337 _cms_used_at_gc0_end = 0;
338 _allow_duty_cycle_reduction = false;
339 _valid_bits = 0;
340 _icms_duty_cycle = CMSIncrementalDutyCycle;
341 }
343 double CMSStats::cms_free_adjustment_factor(size_t free) const {
344 // TBD: CR 6909490
345 return 1.0;
346 }
348 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
349 }
351 // If promotion failure handling is on use
352 // the padded average size of the promotion for each
353 // young generation collection.
354 double CMSStats::time_until_cms_gen_full() const {
355 size_t cms_free = _cms_gen->cmsSpace()->free();
356 GenCollectedHeap* gch = GenCollectedHeap::heap();
357 size_t expected_promotion = gch->get_gen(0)->capacity();
358 if (HandlePromotionFailure) {
359 expected_promotion = MIN2(
360 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
361 expected_promotion);
362 }
363 if (cms_free > expected_promotion) {
364 // Start a cms collection if there isn't enough space to promote
365 // for the next minor collection. Use the padded average as
366 // a safety factor.
367 cms_free -= expected_promotion;
369 // Adjust by the safety factor.
370 double cms_free_dbl = (double)cms_free;
371 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
372 // Apply a further correction factor which tries to adjust
373 // for recent occurance of concurrent mode failures.
374 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
375 cms_free_dbl = cms_free_dbl * cms_adjustment;
377 if (PrintGCDetails && Verbose) {
378 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
379 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
380 cms_free, expected_promotion);
381 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
382 cms_free_dbl, cms_consumption_rate() + 1.0);
383 }
384 // Add 1 in case the consumption rate goes to zero.
385 return cms_free_dbl / (cms_consumption_rate() + 1.0);
386 }
387 return 0.0;
388 }
390 // Compare the duration of the cms collection to the
391 // time remaining before the cms generation is empty.
392 // Note that the time from the start of the cms collection
393 // to the start of the cms sweep (less than the total
394 // duration of the cms collection) can be used. This
395 // has been tried and some applications experienced
396 // promotion failures early in execution. This was
397 // possibly because the averages were not accurate
398 // enough at the beginning.
399 double CMSStats::time_until_cms_start() const {
400 // We add "gc0_period" to the "work" calculation
401 // below because this query is done (mostly) at the
402 // end of a scavenge, so we need to conservatively
403 // account for that much possible delay
404 // in the query so as to avoid concurrent mode failures
405 // due to starting the collection just a wee bit too
406 // late.
407 double work = cms_duration() + gc0_period();
408 double deadline = time_until_cms_gen_full();
409 // If a concurrent mode failure occurred recently, we want to be
410 // more conservative and halve our expected time_until_cms_gen_full()
411 if (work > deadline) {
412 if (Verbose && PrintGCDetails) {
413 gclog_or_tty->print(
414 " CMSCollector: collect because of anticipated promotion "
415 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
416 gc0_period(), time_until_cms_gen_full());
417 }
418 return 0.0;
419 }
420 return work - deadline;
421 }
423 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
424 // amount of change to prevent wild oscillation.
425 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
426 unsigned int new_duty_cycle) {
427 assert(old_duty_cycle <= 100, "bad input value");
428 assert(new_duty_cycle <= 100, "bad input value");
430 // Note: use subtraction with caution since it may underflow (values are
431 // unsigned). Addition is safe since we're in the range 0-100.
432 unsigned int damped_duty_cycle = new_duty_cycle;
433 if (new_duty_cycle < old_duty_cycle) {
434 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
435 if (new_duty_cycle + largest_delta < old_duty_cycle) {
436 damped_duty_cycle = old_duty_cycle - largest_delta;
437 }
438 } else if (new_duty_cycle > old_duty_cycle) {
439 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
440 if (new_duty_cycle > old_duty_cycle + largest_delta) {
441 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
442 }
443 }
444 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
446 if (CMSTraceIncrementalPacing) {
447 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
448 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
449 }
450 return damped_duty_cycle;
451 }
453 unsigned int CMSStats::icms_update_duty_cycle_impl() {
454 assert(CMSIncrementalPacing && valid(),
455 "should be handled in icms_update_duty_cycle()");
457 double cms_time_so_far = cms_timer().seconds();
458 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
459 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
461 // Avoid division by 0.
462 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
463 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
465 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
466 if (new_duty_cycle > _icms_duty_cycle) {
467 // Avoid very small duty cycles (1 or 2); 0 is allowed.
468 if (new_duty_cycle > 2) {
469 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
470 new_duty_cycle);
471 }
472 } else if (_allow_duty_cycle_reduction) {
473 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
474 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
475 // Respect the minimum duty cycle.
476 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
477 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
478 }
480 if (PrintGCDetails || CMSTraceIncrementalPacing) {
481 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
482 }
484 _allow_duty_cycle_reduction = false;
485 return _icms_duty_cycle;
486 }
488 #ifndef PRODUCT
489 void CMSStats::print_on(outputStream *st) const {
490 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
491 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
492 gc0_duration(), gc0_period(), gc0_promoted());
493 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
494 cms_duration(), cms_duration_per_mb(),
495 cms_period(), cms_allocated());
496 st->print(",cms_since_beg=%g,cms_since_end=%g",
497 cms_time_since_begin(), cms_time_since_end());
498 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
499 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
500 if (CMSIncrementalMode) {
501 st->print(",dc=%d", icms_duty_cycle());
502 }
504 if (valid()) {
505 st->print(",promo_rate=%g,cms_alloc_rate=%g",
506 promotion_rate(), cms_allocation_rate());
507 st->print(",cms_consumption_rate=%g,time_until_full=%g",
508 cms_consumption_rate(), time_until_cms_gen_full());
509 }
510 st->print(" ");
511 }
512 #endif // #ifndef PRODUCT
514 CMSCollector::CollectorState CMSCollector::_collectorState =
515 CMSCollector::Idling;
516 bool CMSCollector::_foregroundGCIsActive = false;
517 bool CMSCollector::_foregroundGCShouldWait = false;
519 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
520 ConcurrentMarkSweepGeneration* permGen,
521 CardTableRS* ct,
522 ConcurrentMarkSweepPolicy* cp):
523 _cmsGen(cmsGen),
524 _permGen(permGen),
525 _ct(ct),
526 _ref_processor(NULL), // will be set later
527 _conc_workers(NULL), // may be set later
528 _abort_preclean(false),
529 _start_sampling(false),
530 _between_prologue_and_epilogue(false),
531 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
532 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
533 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
534 -1 /* lock-free */, "No_lock" /* dummy */),
535 _modUnionClosure(&_modUnionTable),
536 _modUnionClosurePar(&_modUnionTable),
537 // Adjust my span to cover old (cms) gen and perm gen
538 _span(cmsGen->reserved()._union(permGen->reserved())),
539 // Construct the is_alive_closure with _span & markBitMap
540 _is_alive_closure(_span, &_markBitMap),
541 _restart_addr(NULL),
542 _overflow_list(NULL),
543 _preserved_oop_stack(NULL),
544 _preserved_mark_stack(NULL),
545 _stats(cmsGen),
546 _eden_chunk_array(NULL), // may be set in ctor body
547 _eden_chunk_capacity(0), // -- ditto --
548 _eden_chunk_index(0), // -- ditto --
549 _survivor_plab_array(NULL), // -- ditto --
550 _survivor_chunk_array(NULL), // -- ditto --
551 _survivor_chunk_capacity(0), // -- ditto --
552 _survivor_chunk_index(0), // -- ditto --
553 _ser_pmc_preclean_ovflw(0),
554 _ser_kac_preclean_ovflw(0),
555 _ser_pmc_remark_ovflw(0),
556 _par_pmc_remark_ovflw(0),
557 _ser_kac_ovflw(0),
558 _par_kac_ovflw(0),
559 #ifndef PRODUCT
560 _num_par_pushes(0),
561 #endif
562 _collection_count_start(0),
563 _verifying(false),
564 _icms_start_limit(NULL),
565 _icms_stop_limit(NULL),
566 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
567 _completed_initialization(false),
568 _collector_policy(cp),
569 _should_unload_classes(false),
570 _concurrent_cycles_since_last_unload(0),
571 _roots_scanning_options(0),
572 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
573 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
574 {
575 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
576 ExplicitGCInvokesConcurrent = true;
577 }
578 // Now expand the span and allocate the collection support structures
579 // (MUT, marking bit map etc.) to cover both generations subject to
580 // collection.
582 // First check that _permGen is adjacent to _cmsGen and above it.
583 assert( _cmsGen->reserved().word_size() > 0
584 && _permGen->reserved().word_size() > 0,
585 "generations should not be of zero size");
586 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
587 "_cmsGen and _permGen should not overlap");
588 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
589 "_cmsGen->end() different from _permGen->start()");
591 // For use by dirty card to oop closures.
592 _cmsGen->cmsSpace()->set_collector(this);
593 _permGen->cmsSpace()->set_collector(this);
595 // Allocate MUT and marking bit map
596 {
597 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
598 if (!_markBitMap.allocate(_span)) {
599 warning("Failed to allocate CMS Bit Map");
600 return;
601 }
602 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
603 }
604 {
605 _modUnionTable.allocate(_span);
606 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
607 }
609 if (!_markStack.allocate(MarkStackSize)) {
610 warning("Failed to allocate CMS Marking Stack");
611 return;
612 }
613 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
614 warning("Failed to allocate CMS Revisit Stack");
615 return;
616 }
618 // Support for multi-threaded concurrent phases
619 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
620 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
621 // just for now
622 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
623 }
624 if (ConcGCThreads > 1) {
625 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
626 ConcGCThreads, true);
627 if (_conc_workers == NULL) {
628 warning("GC/CMS: _conc_workers allocation failure: "
629 "forcing -CMSConcurrentMTEnabled");
630 CMSConcurrentMTEnabled = false;
631 }
632 } else {
633 CMSConcurrentMTEnabled = false;
634 }
635 }
636 if (!CMSConcurrentMTEnabled) {
637 ConcGCThreads = 0;
638 } else {
639 // Turn off CMSCleanOnEnter optimization temporarily for
640 // the MT case where it's not fixed yet; see 6178663.
641 CMSCleanOnEnter = false;
642 }
643 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
644 "Inconsistency");
646 // Parallel task queues; these are shared for the
647 // concurrent and stop-world phases of CMS, but
648 // are not shared with parallel scavenge (ParNew).
649 {
650 uint i;
651 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
653 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
654 || ParallelRefProcEnabled)
655 && num_queues > 0) {
656 _task_queues = new OopTaskQueueSet(num_queues);
657 if (_task_queues == NULL) {
658 warning("task_queues allocation failure.");
659 return;
660 }
661 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
662 if (_hash_seed == NULL) {
663 warning("_hash_seed array allocation failure");
664 return;
665 }
667 // XXX use a global constant instead of 64!
668 typedef struct OopTaskQueuePadded {
669 OopTaskQueue work_queue;
670 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
671 } OopTaskQueuePadded;
673 for (i = 0; i < num_queues; i++) {
674 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
675 if (q_padded == NULL) {
676 warning("work_queue allocation failure.");
677 return;
678 }
679 _task_queues->register_queue(i, &q_padded->work_queue);
680 }
681 for (i = 0; i < num_queues; i++) {
682 _task_queues->queue(i)->initialize();
683 _hash_seed[i] = 17; // copied from ParNew
684 }
685 }
686 }
688 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
689 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
691 // Clip CMSBootstrapOccupancy between 0 and 100.
692 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
693 /(double)100;
695 _full_gcs_since_conc_gc = 0;
697 // Now tell CMS generations the identity of their collector
698 ConcurrentMarkSweepGeneration::set_collector(this);
700 // Create & start a CMS thread for this CMS collector
701 _cmsThread = ConcurrentMarkSweepThread::start(this);
702 assert(cmsThread() != NULL, "CMS Thread should have been created");
703 assert(cmsThread()->collector() == this,
704 "CMS Thread should refer to this gen");
705 assert(CGC_lock != NULL, "Where's the CGC_lock?");
707 // Support for parallelizing young gen rescan
708 GenCollectedHeap* gch = GenCollectedHeap::heap();
709 _young_gen = gch->prev_gen(_cmsGen);
710 if (gch->supports_inline_contig_alloc()) {
711 _top_addr = gch->top_addr();
712 _end_addr = gch->end_addr();
713 assert(_young_gen != NULL, "no _young_gen");
714 _eden_chunk_index = 0;
715 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
716 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
717 if (_eden_chunk_array == NULL) {
718 _eden_chunk_capacity = 0;
719 warning("GC/CMS: _eden_chunk_array allocation failure");
720 }
721 }
722 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
724 // Support for parallelizing survivor space rescan
725 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
726 const size_t max_plab_samples =
727 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
729 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
730 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
731 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
732 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
733 || _cursor == NULL) {
734 warning("Failed to allocate survivor plab/chunk array");
735 if (_survivor_plab_array != NULL) {
736 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
737 _survivor_plab_array = NULL;
738 }
739 if (_survivor_chunk_array != NULL) {
740 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
741 _survivor_chunk_array = NULL;
742 }
743 if (_cursor != NULL) {
744 FREE_C_HEAP_ARRAY(size_t, _cursor);
745 _cursor = NULL;
746 }
747 } else {
748 _survivor_chunk_capacity = 2*max_plab_samples;
749 for (uint i = 0; i < ParallelGCThreads; i++) {
750 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
751 if (vec == NULL) {
752 warning("Failed to allocate survivor plab array");
753 for (int j = i; j > 0; j--) {
754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
755 }
756 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
757 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
758 _survivor_plab_array = NULL;
759 _survivor_chunk_array = NULL;
760 _survivor_chunk_capacity = 0;
761 break;
762 } else {
763 ChunkArray* cur =
764 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
765 max_plab_samples);
766 assert(cur->end() == 0, "Should be 0");
767 assert(cur->array() == vec, "Should be vec");
768 assert(cur->capacity() == max_plab_samples, "Error");
769 }
770 }
771 }
772 }
773 assert( ( _survivor_plab_array != NULL
774 && _survivor_chunk_array != NULL)
775 || ( _survivor_chunk_capacity == 0
776 && _survivor_chunk_index == 0),
777 "Error");
779 // Choose what strong roots should be scanned depending on verification options
780 // and perm gen collection mode.
781 if (!CMSClassUnloadingEnabled) {
782 // If class unloading is disabled we want to include all classes into the root set.
783 add_root_scanning_option(SharedHeap::SO_AllClasses);
784 } else {
785 add_root_scanning_option(SharedHeap::SO_SystemClasses);
786 }
788 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
789 _gc_counters = new CollectorCounters("CMS", 1);
790 _completed_initialization = true;
791 _inter_sweep_timer.start(); // start of time
792 }
794 const char* ConcurrentMarkSweepGeneration::name() const {
795 return "concurrent mark-sweep generation";
796 }
797 void ConcurrentMarkSweepGeneration::update_counters() {
798 if (UsePerfData) {
799 _space_counters->update_all();
800 _gen_counters->update_all();
801 }
802 }
804 // this is an optimized version of update_counters(). it takes the
805 // used value as a parameter rather than computing it.
806 //
807 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
808 if (UsePerfData) {
809 _space_counters->update_used(used);
810 _space_counters->update_capacity();
811 _gen_counters->update_all();
812 }
813 }
815 void ConcurrentMarkSweepGeneration::print() const {
816 Generation::print();
817 cmsSpace()->print();
818 }
820 #ifndef PRODUCT
821 void ConcurrentMarkSweepGeneration::print_statistics() {
822 cmsSpace()->printFLCensus(0);
823 }
824 #endif
826 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
827 GenCollectedHeap* gch = GenCollectedHeap::heap();
828 if (PrintGCDetails) {
829 if (Verbose) {
830 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
831 level(), short_name(), s, used(), capacity());
832 } else {
833 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
834 level(), short_name(), s, used() / K, capacity() / K);
835 }
836 }
837 if (Verbose) {
838 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
839 gch->used(), gch->capacity());
840 } else {
841 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
842 gch->used() / K, gch->capacity() / K);
843 }
844 }
846 size_t
847 ConcurrentMarkSweepGeneration::contiguous_available() const {
848 // dld proposes an improvement in precision here. If the committed
849 // part of the space ends in a free block we should add that to
850 // uncommitted size in the calculation below. Will make this
851 // change later, staying with the approximation below for the
852 // time being. -- ysr.
853 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
854 }
856 size_t
857 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
858 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
859 }
861 size_t ConcurrentMarkSweepGeneration::max_available() const {
862 return free() + _virtual_space.uncommitted_size();
863 }
865 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
866 size_t max_promotion_in_bytes,
867 bool younger_handles_promotion_failure) const {
869 // This is the most conservative test. Full promotion is
870 // guaranteed if this is used. The multiplicative factor is to
871 // account for the worst case "dilatation".
872 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
873 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
874 adjusted_max_promo_bytes = (double)max_uintx;
875 }
876 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
878 if (younger_handles_promotion_failure && !result) {
879 // Full promotion is not guaranteed because fragmentation
880 // of the cms generation can prevent the full promotion.
881 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
883 if (!result) {
884 // With promotion failure handling the test for the ability
885 // to support the promotion does not have to be guaranteed.
886 // Use an average of the amount promoted.
887 result = max_available() >= (size_t)
888 gc_stats()->avg_promoted()->padded_average();
889 if (PrintGC && Verbose && result) {
890 gclog_or_tty->print_cr(
891 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
892 " max_available: " SIZE_FORMAT
893 " avg_promoted: " SIZE_FORMAT,
894 max_available(), (size_t)
895 gc_stats()->avg_promoted()->padded_average());
896 }
897 } else {
898 if (PrintGC && Verbose) {
899 gclog_or_tty->print_cr(
900 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
901 " max_available: " SIZE_FORMAT
902 " adj_max_promo_bytes: " SIZE_FORMAT,
903 max_available(), (size_t)adjusted_max_promo_bytes);
904 }
905 }
906 } else {
907 if (PrintGC && Verbose) {
908 gclog_or_tty->print_cr(
909 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
910 " contiguous_available: " SIZE_FORMAT
911 " adj_max_promo_bytes: " SIZE_FORMAT,
912 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
913 }
914 }
915 return result;
916 }
918 // At a promotion failure dump information on block layout in heap
919 // (cms old generation).
920 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
921 if (CMSDumpAtPromotionFailure) {
922 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
923 }
924 }
926 CompactibleSpace*
927 ConcurrentMarkSweepGeneration::first_compaction_space() const {
928 return _cmsSpace;
929 }
931 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
932 // Clear the promotion information. These pointers can be adjusted
933 // along with all the other pointers into the heap but
934 // compaction is expected to be a rare event with
935 // a heap using cms so don't do it without seeing the need.
936 if (ParallelGCThreads > 0) {
937 for (uint i = 0; i < ParallelGCThreads; i++) {
938 _par_gc_thread_states[i]->promo.reset();
939 }
940 }
941 }
943 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
944 blk->do_space(_cmsSpace);
945 }
947 void ConcurrentMarkSweepGeneration::compute_new_size() {
948 assert_locked_or_safepoint(Heap_lock);
950 // If incremental collection failed, we just want to expand
951 // to the limit.
952 if (incremental_collection_failed()) {
953 clear_incremental_collection_failed();
954 grow_to_reserved();
955 return;
956 }
958 size_t expand_bytes = 0;
959 double free_percentage = ((double) free()) / capacity();
960 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
961 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
963 // compute expansion delta needed for reaching desired free percentage
964 if (free_percentage < desired_free_percentage) {
965 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
966 assert(desired_capacity >= capacity(), "invalid expansion size");
967 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
968 }
969 if (expand_bytes > 0) {
970 if (PrintGCDetails && Verbose) {
971 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
972 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
973 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
974 gclog_or_tty->print_cr(" Desired free fraction %f",
975 desired_free_percentage);
976 gclog_or_tty->print_cr(" Maximum free fraction %f",
977 maximum_free_percentage);
978 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
979 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
980 desired_capacity/1000);
981 int prev_level = level() - 1;
982 if (prev_level >= 0) {
983 size_t prev_size = 0;
984 GenCollectedHeap* gch = GenCollectedHeap::heap();
985 Generation* prev_gen = gch->_gens[prev_level];
986 prev_size = prev_gen->capacity();
987 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
988 prev_size/1000);
989 }
990 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
991 unsafe_max_alloc_nogc()/1000);
992 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
993 contiguous_available()/1000);
994 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
995 expand_bytes);
996 }
997 // safe if expansion fails
998 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
999 if (PrintGCDetails && Verbose) {
1000 gclog_or_tty->print_cr(" Expanded free fraction %f",
1001 ((double) free()) / capacity());
1002 }
1003 }
1004 }
1006 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1007 return cmsSpace()->freelistLock();
1008 }
1010 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1011 bool tlab) {
1012 CMSSynchronousYieldRequest yr;
1013 MutexLockerEx x(freelistLock(),
1014 Mutex::_no_safepoint_check_flag);
1015 return have_lock_and_allocate(size, tlab);
1016 }
1018 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1019 bool tlab) {
1020 assert_lock_strong(freelistLock());
1021 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1022 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1023 // Allocate the object live (grey) if the background collector has
1024 // started marking. This is necessary because the marker may
1025 // have passed this address and consequently this object will
1026 // not otherwise be greyed and would be incorrectly swept up.
1027 // Note that if this object contains references, the writing
1028 // of those references will dirty the card containing this object
1029 // allowing the object to be blackened (and its references scanned)
1030 // either during a preclean phase or at the final checkpoint.
1031 if (res != NULL) {
1032 collector()->direct_allocated(res, adjustedSize);
1033 _direct_allocated_words += adjustedSize;
1034 // allocation counters
1035 NOT_PRODUCT(
1036 _numObjectsAllocated++;
1037 _numWordsAllocated += (int)adjustedSize;
1038 )
1039 }
1040 return res;
1041 }
1043 // In the case of direct allocation by mutators in a generation that
1044 // is being concurrently collected, the object must be allocated
1045 // live (grey) if the background collector has started marking.
1046 // This is necessary because the marker may
1047 // have passed this address and consequently this object will
1048 // not otherwise be greyed and would be incorrectly swept up.
1049 // Note that if this object contains references, the writing
1050 // of those references will dirty the card containing this object
1051 // allowing the object to be blackened (and its references scanned)
1052 // either during a preclean phase or at the final checkpoint.
1053 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1054 assert(_markBitMap.covers(start, size), "Out of bounds");
1055 if (_collectorState >= Marking) {
1056 MutexLockerEx y(_markBitMap.lock(),
1057 Mutex::_no_safepoint_check_flag);
1058 // [see comments preceding SweepClosure::do_blk() below for details]
1059 // 1. need to mark the object as live so it isn't collected
1060 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1061 // 3. need to mark the end of the object so sweeper can skip over it
1062 // if it's uninitialized when the sweeper reaches it.
1063 _markBitMap.mark(start); // object is live
1064 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1065 _markBitMap.mark(start + size - 1);
1066 // mark end of object
1067 }
1068 // check that oop looks uninitialized
1069 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1070 }
1072 void CMSCollector::promoted(bool par, HeapWord* start,
1073 bool is_obj_array, size_t obj_size) {
1074 assert(_markBitMap.covers(start), "Out of bounds");
1075 // See comment in direct_allocated() about when objects should
1076 // be allocated live.
1077 if (_collectorState >= Marking) {
1078 // we already hold the marking bit map lock, taken in
1079 // the prologue
1080 if (par) {
1081 _markBitMap.par_mark(start);
1082 } else {
1083 _markBitMap.mark(start);
1084 }
1085 // We don't need to mark the object as uninitialized (as
1086 // in direct_allocated above) because this is being done with the
1087 // world stopped and the object will be initialized by the
1088 // time the sweeper gets to look at it.
1089 assert(SafepointSynchronize::is_at_safepoint(),
1090 "expect promotion only at safepoints");
1092 if (_collectorState < Sweeping) {
1093 // Mark the appropriate cards in the modUnionTable, so that
1094 // this object gets scanned before the sweep. If this is
1095 // not done, CMS generation references in the object might
1096 // not get marked.
1097 // For the case of arrays, which are otherwise precisely
1098 // marked, we need to dirty the entire array, not just its head.
1099 if (is_obj_array) {
1100 // The [par_]mark_range() method expects mr.end() below to
1101 // be aligned to the granularity of a bit's representation
1102 // in the heap. In the case of the MUT below, that's a
1103 // card size.
1104 MemRegion mr(start,
1105 (HeapWord*)round_to((intptr_t)(start + obj_size),
1106 CardTableModRefBS::card_size /* bytes */));
1107 if (par) {
1108 _modUnionTable.par_mark_range(mr);
1109 } else {
1110 _modUnionTable.mark_range(mr);
1111 }
1112 } else { // not an obj array; we can just mark the head
1113 if (par) {
1114 _modUnionTable.par_mark(start);
1115 } else {
1116 _modUnionTable.mark(start);
1117 }
1118 }
1119 }
1120 }
1121 }
1123 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1124 {
1125 size_t delta = pointer_delta(addr, space->bottom());
1126 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1127 }
1129 void CMSCollector::icms_update_allocation_limits()
1130 {
1131 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1132 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1134 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1135 if (CMSTraceIncrementalPacing) {
1136 stats().print();
1137 }
1139 assert(duty_cycle <= 100, "invalid duty cycle");
1140 if (duty_cycle != 0) {
1141 // The duty_cycle is a percentage between 0 and 100; convert to words and
1142 // then compute the offset from the endpoints of the space.
1143 size_t free_words = eden->free() / HeapWordSize;
1144 double free_words_dbl = (double)free_words;
1145 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1146 size_t offset_words = (free_words - duty_cycle_words) / 2;
1148 _icms_start_limit = eden->top() + offset_words;
1149 _icms_stop_limit = eden->end() - offset_words;
1151 // The limits may be adjusted (shifted to the right) by
1152 // CMSIncrementalOffset, to allow the application more mutator time after a
1153 // young gen gc (when all mutators were stopped) and before CMS starts and
1154 // takes away one or more cpus.
1155 if (CMSIncrementalOffset != 0) {
1156 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1157 size_t adjustment = (size_t)adjustment_dbl;
1158 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1159 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1160 _icms_start_limit += adjustment;
1161 _icms_stop_limit = tmp_stop;
1162 }
1163 }
1164 }
1165 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1166 _icms_start_limit = _icms_stop_limit = eden->end();
1167 }
1169 // Install the new start limit.
1170 eden->set_soft_end(_icms_start_limit);
1172 if (CMSTraceIncrementalMode) {
1173 gclog_or_tty->print(" icms alloc limits: "
1174 PTR_FORMAT "," PTR_FORMAT
1175 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1176 _icms_start_limit, _icms_stop_limit,
1177 percent_of_space(eden, _icms_start_limit),
1178 percent_of_space(eden, _icms_stop_limit));
1179 if (Verbose) {
1180 gclog_or_tty->print("eden: ");
1181 eden->print_on(gclog_or_tty);
1182 }
1183 }
1184 }
1186 // Any changes here should try to maintain the invariant
1187 // that if this method is called with _icms_start_limit
1188 // and _icms_stop_limit both NULL, then it should return NULL
1189 // and not notify the icms thread.
1190 HeapWord*
1191 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1192 size_t word_size)
1193 {
1194 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1195 // nop.
1196 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1197 if (top <= _icms_start_limit) {
1198 if (CMSTraceIncrementalMode) {
1199 space->print_on(gclog_or_tty);
1200 gclog_or_tty->stamp();
1201 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1202 ", new limit=" PTR_FORMAT
1203 " (" SIZE_FORMAT "%%)",
1204 top, _icms_stop_limit,
1205 percent_of_space(space, _icms_stop_limit));
1206 }
1207 ConcurrentMarkSweepThread::start_icms();
1208 assert(top < _icms_stop_limit, "Tautology");
1209 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1210 return _icms_stop_limit;
1211 }
1213 // The allocation will cross both the _start and _stop limits, so do the
1214 // stop notification also and return end().
1215 if (CMSTraceIncrementalMode) {
1216 space->print_on(gclog_or_tty);
1217 gclog_or_tty->stamp();
1218 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1219 ", new limit=" PTR_FORMAT
1220 " (" SIZE_FORMAT "%%)",
1221 top, space->end(),
1222 percent_of_space(space, space->end()));
1223 }
1224 ConcurrentMarkSweepThread::stop_icms();
1225 return space->end();
1226 }
1228 if (top <= _icms_stop_limit) {
1229 if (CMSTraceIncrementalMode) {
1230 space->print_on(gclog_or_tty);
1231 gclog_or_tty->stamp();
1232 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1233 ", new limit=" PTR_FORMAT
1234 " (" SIZE_FORMAT "%%)",
1235 top, space->end(),
1236 percent_of_space(space, space->end()));
1237 }
1238 ConcurrentMarkSweepThread::stop_icms();
1239 return space->end();
1240 }
1242 if (CMSTraceIncrementalMode) {
1243 space->print_on(gclog_or_tty);
1244 gclog_or_tty->stamp();
1245 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1246 ", new limit=" PTR_FORMAT,
1247 top, NULL);
1248 }
1249 }
1251 return NULL;
1252 }
1254 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1255 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1256 // allocate, copy and if necessary update promoinfo --
1257 // delegate to underlying space.
1258 assert_lock_strong(freelistLock());
1260 #ifndef PRODUCT
1261 if (Universe::heap()->promotion_should_fail()) {
1262 return NULL;
1263 }
1264 #endif // #ifndef PRODUCT
1266 oop res = _cmsSpace->promote(obj, obj_size);
1267 if (res == NULL) {
1268 // expand and retry
1269 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1270 expand(s*HeapWordSize, MinHeapDeltaBytes,
1271 CMSExpansionCause::_satisfy_promotion);
1272 // Since there's currently no next generation, we don't try to promote
1273 // into a more senior generation.
1274 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1275 "is made to pass on a possibly failing "
1276 "promotion to next generation");
1277 res = _cmsSpace->promote(obj, obj_size);
1278 }
1279 if (res != NULL) {
1280 // See comment in allocate() about when objects should
1281 // be allocated live.
1282 assert(obj->is_oop(), "Will dereference klass pointer below");
1283 collector()->promoted(false, // Not parallel
1284 (HeapWord*)res, obj->is_objArray(), obj_size);
1285 // promotion counters
1286 NOT_PRODUCT(
1287 _numObjectsPromoted++;
1288 _numWordsPromoted +=
1289 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1290 )
1291 }
1292 return res;
1293 }
1296 HeapWord*
1297 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1298 HeapWord* top,
1299 size_t word_sz)
1300 {
1301 return collector()->allocation_limit_reached(space, top, word_sz);
1302 }
1304 // Things to support parallel young-gen collection.
1305 oop
1306 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1307 oop old, markOop m,
1308 size_t word_sz) {
1309 #ifndef PRODUCT
1310 if (Universe::heap()->promotion_should_fail()) {
1311 return NULL;
1312 }
1313 #endif // #ifndef PRODUCT
1315 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1316 PromotionInfo* promoInfo = &ps->promo;
1317 // if we are tracking promotions, then first ensure space for
1318 // promotion (including spooling space for saving header if necessary).
1319 // then allocate and copy, then track promoted info if needed.
1320 // When tracking (see PromotionInfo::track()), the mark word may
1321 // be displaced and in this case restoration of the mark word
1322 // occurs in the (oop_since_save_marks_)iterate phase.
1323 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1324 // Out of space for allocating spooling buffers;
1325 // try expanding and allocating spooling buffers.
1326 if (!expand_and_ensure_spooling_space(promoInfo)) {
1327 return NULL;
1328 }
1329 }
1330 assert(promoInfo->has_spooling_space(), "Control point invariant");
1331 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1332 if (obj_ptr == NULL) {
1333 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1334 if (obj_ptr == NULL) {
1335 return NULL;
1336 }
1337 }
1338 oop obj = oop(obj_ptr);
1339 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1340 // Otherwise, copy the object. Here we must be careful to insert the
1341 // klass pointer last, since this marks the block as an allocated object.
1342 // Except with compressed oops it's the mark word.
1343 HeapWord* old_ptr = (HeapWord*)old;
1344 if (word_sz > (size_t)oopDesc::header_size()) {
1345 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1346 obj_ptr + oopDesc::header_size(),
1347 word_sz - oopDesc::header_size());
1348 }
1350 if (UseCompressedOops) {
1351 // Copy gap missed by (aligned) header size calculation above
1352 obj->set_klass_gap(old->klass_gap());
1353 }
1355 // Restore the mark word copied above.
1356 obj->set_mark(m);
1358 // Now we can track the promoted object, if necessary. We take care
1359 // To delay the transition from uninitialized to full object
1360 // (i.e., insertion of klass pointer) until after, so that it
1361 // atomically becomes a promoted object.
1362 if (promoInfo->tracking()) {
1363 promoInfo->track((PromotedObject*)obj, old->klass());
1364 }
1366 // Finally, install the klass pointer (this should be volatile).
1367 obj->set_klass(old->klass());
1369 assert(old->is_oop(), "Will dereference klass ptr below");
1370 collector()->promoted(true, // parallel
1371 obj_ptr, old->is_objArray(), word_sz);
1373 NOT_PRODUCT(
1374 Atomic::inc(&_numObjectsPromoted);
1375 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1376 &_numWordsPromoted);
1377 )
1379 return obj;
1380 }
1382 void
1383 ConcurrentMarkSweepGeneration::
1384 par_promote_alloc_undo(int thread_num,
1385 HeapWord* obj, size_t word_sz) {
1386 // CMS does not support promotion undo.
1387 ShouldNotReachHere();
1388 }
1390 void
1391 ConcurrentMarkSweepGeneration::
1392 par_promote_alloc_done(int thread_num) {
1393 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1394 ps->lab.retire(thread_num);
1395 }
1397 void
1398 ConcurrentMarkSweepGeneration::
1399 par_oop_since_save_marks_iterate_done(int thread_num) {
1400 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1401 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1402 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1403 }
1405 // XXXPERM
1406 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1407 size_t size,
1408 bool tlab)
1409 {
1410 // We allow a STW collection only if a full
1411 // collection was requested.
1412 return full || should_allocate(size, tlab); // FIX ME !!!
1413 // This and promotion failure handling are connected at the
1414 // hip and should be fixed by untying them.
1415 }
1417 bool CMSCollector::shouldConcurrentCollect() {
1418 if (_full_gc_requested) {
1419 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1420 if (Verbose && PrintGCDetails) {
1421 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1422 " gc request");
1423 }
1424 return true;
1425 }
1427 // For debugging purposes, change the type of collection.
1428 // If the rotation is not on the concurrent collection
1429 // type, don't start a concurrent collection.
1430 NOT_PRODUCT(
1431 if (RotateCMSCollectionTypes &&
1432 (_cmsGen->debug_collection_type() !=
1433 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1434 assert(_cmsGen->debug_collection_type() !=
1435 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1436 "Bad cms collection type");
1437 return false;
1438 }
1439 )
1441 FreelistLocker x(this);
1442 // ------------------------------------------------------------------
1443 // Print out lots of information which affects the initiation of
1444 // a collection.
1445 if (PrintCMSInitiationStatistics && stats().valid()) {
1446 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1447 gclog_or_tty->stamp();
1448 gclog_or_tty->print_cr("");
1449 stats().print_on(gclog_or_tty);
1450 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1451 stats().time_until_cms_gen_full());
1452 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1453 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1454 _cmsGen->contiguous_available());
1455 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1456 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1457 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1458 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1459 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1460 }
1461 // ------------------------------------------------------------------
1463 // If the estimated time to complete a cms collection (cms_duration())
1464 // is less than the estimated time remaining until the cms generation
1465 // is full, start a collection.
1466 if (!UseCMSInitiatingOccupancyOnly) {
1467 if (stats().valid()) {
1468 if (stats().time_until_cms_start() == 0.0) {
1469 return true;
1470 }
1471 } else {
1472 // We want to conservatively collect somewhat early in order
1473 // to try and "bootstrap" our CMS/promotion statistics;
1474 // this branch will not fire after the first successful CMS
1475 // collection because the stats should then be valid.
1476 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1477 if (Verbose && PrintGCDetails) {
1478 gclog_or_tty->print_cr(
1479 " CMSCollector: collect for bootstrapping statistics:"
1480 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1481 _bootstrap_occupancy);
1482 }
1483 return true;
1484 }
1485 }
1486 }
1488 // Otherwise, we start a collection cycle if either the perm gen or
1489 // old gen want a collection cycle started. Each may use
1490 // an appropriate criterion for making this decision.
1491 // XXX We need to make sure that the gen expansion
1492 // criterion dovetails well with this. XXX NEED TO FIX THIS
1493 if (_cmsGen->should_concurrent_collect()) {
1494 if (Verbose && PrintGCDetails) {
1495 gclog_or_tty->print_cr("CMS old gen initiated");
1496 }
1497 return true;
1498 }
1500 // We start a collection if we believe an incremental collection may fail;
1501 // this is not likely to be productive in practice because it's probably too
1502 // late anyway.
1503 GenCollectedHeap* gch = GenCollectedHeap::heap();
1504 assert(gch->collector_policy()->is_two_generation_policy(),
1505 "You may want to check the correctness of the following");
1506 if (gch->incremental_collection_will_fail()) {
1507 if (PrintGCDetails && Verbose) {
1508 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1509 }
1510 return true;
1511 }
1513 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1514 bool res = update_should_unload_classes();
1515 if (res) {
1516 if (Verbose && PrintGCDetails) {
1517 gclog_or_tty->print_cr("CMS perm gen initiated");
1518 }
1519 return true;
1520 }
1521 }
1522 return false;
1523 }
1525 // Clear _expansion_cause fields of constituent generations
1526 void CMSCollector::clear_expansion_cause() {
1527 _cmsGen->clear_expansion_cause();
1528 _permGen->clear_expansion_cause();
1529 }
1531 // We should be conservative in starting a collection cycle. To
1532 // start too eagerly runs the risk of collecting too often in the
1533 // extreme. To collect too rarely falls back on full collections,
1534 // which works, even if not optimum in terms of concurrent work.
1535 // As a work around for too eagerly collecting, use the flag
1536 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1537 // giving the user an easily understandable way of controlling the
1538 // collections.
1539 // We want to start a new collection cycle if any of the following
1540 // conditions hold:
1541 // . our current occupancy exceeds the configured initiating occupancy
1542 // for this generation, or
1543 // . we recently needed to expand this space and have not, since that
1544 // expansion, done a collection of this generation, or
1545 // . the underlying space believes that it may be a good idea to initiate
1546 // a concurrent collection (this may be based on criteria such as the
1547 // following: the space uses linear allocation and linear allocation is
1548 // going to fail, or there is believed to be excessive fragmentation in
1549 // the generation, etc... or ...
1550 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1551 // the case of the old generation, not the perm generation; see CR 6543076):
1552 // we may be approaching a point at which allocation requests may fail because
1553 // we will be out of sufficient free space given allocation rate estimates.]
1554 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1556 assert_lock_strong(freelistLock());
1557 if (occupancy() > initiating_occupancy()) {
1558 if (PrintGCDetails && Verbose) {
1559 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1560 short_name(), occupancy(), initiating_occupancy());
1561 }
1562 return true;
1563 }
1564 if (UseCMSInitiatingOccupancyOnly) {
1565 return false;
1566 }
1567 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1568 if (PrintGCDetails && Verbose) {
1569 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1570 short_name());
1571 }
1572 return true;
1573 }
1574 if (_cmsSpace->should_concurrent_collect()) {
1575 if (PrintGCDetails && Verbose) {
1576 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1577 short_name());
1578 }
1579 return true;
1580 }
1581 return false;
1582 }
1584 void ConcurrentMarkSweepGeneration::collect(bool full,
1585 bool clear_all_soft_refs,
1586 size_t size,
1587 bool tlab)
1588 {
1589 collector()->collect(full, clear_all_soft_refs, size, tlab);
1590 }
1592 void CMSCollector::collect(bool full,
1593 bool clear_all_soft_refs,
1594 size_t size,
1595 bool tlab)
1596 {
1597 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1598 // For debugging purposes skip the collection if the state
1599 // is not currently idle
1600 if (TraceCMSState) {
1601 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1602 Thread::current(), full, _collectorState);
1603 }
1604 return;
1605 }
1607 // The following "if" branch is present for defensive reasons.
1608 // In the current uses of this interface, it can be replaced with:
1609 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1610 // But I am not placing that assert here to allow future
1611 // generality in invoking this interface.
1612 if (GC_locker::is_active()) {
1613 // A consistency test for GC_locker
1614 assert(GC_locker::needs_gc(), "Should have been set already");
1615 // Skip this foreground collection, instead
1616 // expanding the heap if necessary.
1617 // Need the free list locks for the call to free() in compute_new_size()
1618 compute_new_size();
1619 return;
1620 }
1621 acquire_control_and_collect(full, clear_all_soft_refs);
1622 _full_gcs_since_conc_gc++;
1624 }
1626 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1627 GenCollectedHeap* gch = GenCollectedHeap::heap();
1628 unsigned int gc_count = gch->total_full_collections();
1629 if (gc_count == full_gc_count) {
1630 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1631 _full_gc_requested = true;
1632 CGC_lock->notify(); // nudge CMS thread
1633 }
1634 }
1637 // The foreground and background collectors need to coordinate in order
1638 // to make sure that they do not mutually interfere with CMS collections.
1639 // When a background collection is active,
1640 // the foreground collector may need to take over (preempt) and
1641 // synchronously complete an ongoing collection. Depending on the
1642 // frequency of the background collections and the heap usage
1643 // of the application, this preemption can be seldom or frequent.
1644 // There are only certain
1645 // points in the background collection that the "collection-baton"
1646 // can be passed to the foreground collector.
1647 //
1648 // The foreground collector will wait for the baton before
1649 // starting any part of the collection. The foreground collector
1650 // will only wait at one location.
1651 //
1652 // The background collector will yield the baton before starting a new
1653 // phase of the collection (e.g., before initial marking, marking from roots,
1654 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1655 // of the loop which switches the phases. The background collector does some
1656 // of the phases (initial mark, final re-mark) with the world stopped.
1657 // Because of locking involved in stopping the world,
1658 // the foreground collector should not block waiting for the background
1659 // collector when it is doing a stop-the-world phase. The background
1660 // collector will yield the baton at an additional point just before
1661 // it enters a stop-the-world phase. Once the world is stopped, the
1662 // background collector checks the phase of the collection. If the
1663 // phase has not changed, it proceeds with the collection. If the
1664 // phase has changed, it skips that phase of the collection. See
1665 // the comments on the use of the Heap_lock in collect_in_background().
1666 //
1667 // Variable used in baton passing.
1668 // _foregroundGCIsActive - Set to true by the foreground collector when
1669 // it wants the baton. The foreground clears it when it has finished
1670 // the collection.
1671 // _foregroundGCShouldWait - Set to true by the background collector
1672 // when it is running. The foreground collector waits while
1673 // _foregroundGCShouldWait is true.
1674 // CGC_lock - monitor used to protect access to the above variables
1675 // and to notify the foreground and background collectors.
1676 // _collectorState - current state of the CMS collection.
1677 //
1678 // The foreground collector
1679 // acquires the CGC_lock
1680 // sets _foregroundGCIsActive
1681 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1682 // various locks acquired in preparation for the collection
1683 // are released so as not to block the background collector
1684 // that is in the midst of a collection
1685 // proceeds with the collection
1686 // clears _foregroundGCIsActive
1687 // returns
1688 //
1689 // The background collector in a loop iterating on the phases of the
1690 // collection
1691 // acquires the CGC_lock
1692 // sets _foregroundGCShouldWait
1693 // if _foregroundGCIsActive is set
1694 // clears _foregroundGCShouldWait, notifies _CGC_lock
1695 // waits on _CGC_lock for _foregroundGCIsActive to become false
1696 // and exits the loop.
1697 // otherwise
1698 // proceed with that phase of the collection
1699 // if the phase is a stop-the-world phase,
1700 // yield the baton once more just before enqueueing
1701 // the stop-world CMS operation (executed by the VM thread).
1702 // returns after all phases of the collection are done
1703 //
1705 void CMSCollector::acquire_control_and_collect(bool full,
1706 bool clear_all_soft_refs) {
1707 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1708 assert(!Thread::current()->is_ConcurrentGC_thread(),
1709 "shouldn't try to acquire control from self!");
1711 // Start the protocol for acquiring control of the
1712 // collection from the background collector (aka CMS thread).
1713 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1714 "VM thread should have CMS token");
1715 // Remember the possibly interrupted state of an ongoing
1716 // concurrent collection
1717 CollectorState first_state = _collectorState;
1719 // Signal to a possibly ongoing concurrent collection that
1720 // we want to do a foreground collection.
1721 _foregroundGCIsActive = true;
1723 // Disable incremental mode during a foreground collection.
1724 ICMSDisabler icms_disabler;
1726 // release locks and wait for a notify from the background collector
1727 // releasing the locks in only necessary for phases which
1728 // do yields to improve the granularity of the collection.
1729 assert_lock_strong(bitMapLock());
1730 // We need to lock the Free list lock for the space that we are
1731 // currently collecting.
1732 assert(haveFreelistLocks(), "Must be holding free list locks");
1733 bitMapLock()->unlock();
1734 releaseFreelistLocks();
1735 {
1736 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1737 if (_foregroundGCShouldWait) {
1738 // We are going to be waiting for action for the CMS thread;
1739 // it had better not be gone (for instance at shutdown)!
1740 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1741 "CMS thread must be running");
1742 // Wait here until the background collector gives us the go-ahead
1743 ConcurrentMarkSweepThread::clear_CMS_flag(
1744 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1745 // Get a possibly blocked CMS thread going:
1746 // Note that we set _foregroundGCIsActive true above,
1747 // without protection of the CGC_lock.
1748 CGC_lock->notify();
1749 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1750 "Possible deadlock");
1751 while (_foregroundGCShouldWait) {
1752 // wait for notification
1753 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1754 // Possibility of delay/starvation here, since CMS token does
1755 // not know to give priority to VM thread? Actually, i think
1756 // there wouldn't be any delay/starvation, but the proof of
1757 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1758 }
1759 ConcurrentMarkSweepThread::set_CMS_flag(
1760 ConcurrentMarkSweepThread::CMS_vm_has_token);
1761 }
1762 }
1763 // The CMS_token is already held. Get back the other locks.
1764 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1765 "VM thread should have CMS token");
1766 getFreelistLocks();
1767 bitMapLock()->lock_without_safepoint_check();
1768 if (TraceCMSState) {
1769 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1770 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1771 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1772 }
1774 // Check if we need to do a compaction, or if not, whether
1775 // we need to start the mark-sweep from scratch.
1776 bool should_compact = false;
1777 bool should_start_over = false;
1778 decide_foreground_collection_type(clear_all_soft_refs,
1779 &should_compact, &should_start_over);
1781 NOT_PRODUCT(
1782 if (RotateCMSCollectionTypes) {
1783 if (_cmsGen->debug_collection_type() ==
1784 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1785 should_compact = true;
1786 } else if (_cmsGen->debug_collection_type() ==
1787 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1788 should_compact = false;
1789 }
1790 }
1791 )
1793 if (PrintGCDetails && first_state > Idling) {
1794 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1795 if (GCCause::is_user_requested_gc(cause) ||
1796 GCCause::is_serviceability_requested_gc(cause)) {
1797 gclog_or_tty->print(" (concurrent mode interrupted)");
1798 } else {
1799 gclog_or_tty->print(" (concurrent mode failure)");
1800 }
1801 }
1803 if (should_compact) {
1804 // If the collection is being acquired from the background
1805 // collector, there may be references on the discovered
1806 // references lists that have NULL referents (being those
1807 // that were concurrently cleared by a mutator) or
1808 // that are no longer active (having been enqueued concurrently
1809 // by the mutator).
1810 // Scrub the list of those references because Mark-Sweep-Compact
1811 // code assumes referents are not NULL and that all discovered
1812 // Reference objects are active.
1813 ref_processor()->clean_up_discovered_references();
1815 do_compaction_work(clear_all_soft_refs);
1817 // Has the GC time limit been exceeded?
1818 check_gc_time_limit();
1820 } else {
1821 do_mark_sweep_work(clear_all_soft_refs, first_state,
1822 should_start_over);
1823 }
1824 // Reset the expansion cause, now that we just completed
1825 // a collection cycle.
1826 clear_expansion_cause();
1827 _foregroundGCIsActive = false;
1828 return;
1829 }
1831 void CMSCollector::check_gc_time_limit() {
1833 // Ignore explicit GC's. Exiting here does not set the flag and
1834 // does not reset the count. Updating of the averages for system
1835 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1836 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1837 if (GCCause::is_user_requested_gc(gc_cause) ||
1838 GCCause::is_serviceability_requested_gc(gc_cause)) {
1839 return;
1840 }
1842 // Calculate the fraction of the CMS generation was freed during
1843 // the last collection.
1844 // Only consider the STW compacting cost for now.
1845 //
1846 // Note that the gc time limit test only works for the collections
1847 // of the young gen + tenured gen and not for collections of the
1848 // permanent gen. That is because the calculation of the space
1849 // freed by the collection is the free space in the young gen +
1850 // tenured gen.
1852 double fraction_free =
1853 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1854 if ((100.0 * size_policy()->compacting_gc_cost()) >
1855 ((double) GCTimeLimit) &&
1856 ((fraction_free * 100) < GCHeapFreeLimit)) {
1857 size_policy()->inc_gc_time_limit_count();
1858 if (UseGCOverheadLimit &&
1859 (size_policy()->gc_time_limit_count() >
1860 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1861 size_policy()->set_gc_time_limit_exceeded(true);
1862 // Avoid consecutive OOM due to the gc time limit by resetting
1863 // the counter.
1864 size_policy()->reset_gc_time_limit_count();
1865 if (PrintGCDetails) {
1866 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1867 "of %d%%", GCTimeLimit);
1868 }
1869 } else {
1870 if (PrintGCDetails) {
1871 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1872 "of %d%%", GCTimeLimit);
1873 }
1874 }
1875 } else {
1876 size_policy()->reset_gc_time_limit_count();
1877 }
1878 }
1880 // Resize the perm generation and the tenured generation
1881 // after obtaining the free list locks for the
1882 // two generations.
1883 void CMSCollector::compute_new_size() {
1884 assert_locked_or_safepoint(Heap_lock);
1885 FreelistLocker z(this);
1886 _permGen->compute_new_size();
1887 _cmsGen->compute_new_size();
1888 }
1890 // A work method used by foreground collection to determine
1891 // what type of collection (compacting or not, continuing or fresh)
1892 // it should do.
1893 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1894 // and CMSCompactWhenClearAllSoftRefs the default in the future
1895 // and do away with the flags after a suitable period.
1896 void CMSCollector::decide_foreground_collection_type(
1897 bool clear_all_soft_refs, bool* should_compact,
1898 bool* should_start_over) {
1899 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1900 // flag is set, and we have either requested a System.gc() or
1901 // the number of full gc's since the last concurrent cycle
1902 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1903 // or if an incremental collection has failed
1904 GenCollectedHeap* gch = GenCollectedHeap::heap();
1905 assert(gch->collector_policy()->is_two_generation_policy(),
1906 "You may want to check the correctness of the following");
1907 // Inform cms gen if this was due to partial collection failing.
1908 // The CMS gen may use this fact to determine its expansion policy.
1909 if (gch->incremental_collection_will_fail()) {
1910 assert(!_cmsGen->incremental_collection_failed(),
1911 "Should have been noticed, reacted to and cleared");
1912 _cmsGen->set_incremental_collection_failed();
1913 }
1914 *should_compact =
1915 UseCMSCompactAtFullCollection &&
1916 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1917 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1918 gch->incremental_collection_will_fail());
1919 *should_start_over = false;
1920 if (clear_all_soft_refs && !*should_compact) {
1921 // We are about to do a last ditch collection attempt
1922 // so it would normally make sense to do a compaction
1923 // to reclaim as much space as possible.
1924 if (CMSCompactWhenClearAllSoftRefs) {
1925 // Default: The rationale is that in this case either
1926 // we are past the final marking phase, in which case
1927 // we'd have to start over, or so little has been done
1928 // that there's little point in saving that work. Compaction
1929 // appears to be the sensible choice in either case.
1930 *should_compact = true;
1931 } else {
1932 // We have been asked to clear all soft refs, but not to
1933 // compact. Make sure that we aren't past the final checkpoint
1934 // phase, for that is where we process soft refs. If we are already
1935 // past that phase, we'll need to redo the refs discovery phase and
1936 // if necessary clear soft refs that weren't previously
1937 // cleared. We do so by remembering the phase in which
1938 // we came in, and if we are past the refs processing
1939 // phase, we'll choose to just redo the mark-sweep
1940 // collection from scratch.
1941 if (_collectorState > FinalMarking) {
1942 // We are past the refs processing phase;
1943 // start over and do a fresh synchronous CMS cycle
1944 _collectorState = Resetting; // skip to reset to start new cycle
1945 reset(false /* == !asynch */);
1946 *should_start_over = true;
1947 } // else we can continue a possibly ongoing current cycle
1948 }
1949 }
1950 }
1952 // A work method used by the foreground collector to do
1953 // a mark-sweep-compact.
1954 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1955 GenCollectedHeap* gch = GenCollectedHeap::heap();
1956 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1957 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1958 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1959 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1960 }
1962 // Sample collection interval time and reset for collection pause.
1963 if (UseAdaptiveSizePolicy) {
1964 size_policy()->msc_collection_begin();
1965 }
1967 // Temporarily widen the span of the weak reference processing to
1968 // the entire heap.
1969 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1970 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1972 // Temporarily, clear the "is_alive_non_header" field of the
1973 // reference processor.
1974 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1976 // Temporarily make reference _processing_ single threaded (non-MT).
1977 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1979 // Temporarily make refs discovery atomic
1980 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1982 ref_processor()->set_enqueuing_is_done(false);
1983 ref_processor()->enable_discovery();
1984 ref_processor()->setup_policy(clear_all_soft_refs);
1985 // If an asynchronous collection finishes, the _modUnionTable is
1986 // all clear. If we are assuming the collection from an asynchronous
1987 // collection, clear the _modUnionTable.
1988 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1989 "_modUnionTable should be clear if the baton was not passed");
1990 _modUnionTable.clear_all();
1992 // We must adjust the allocation statistics being maintained
1993 // in the free list space. We do so by reading and clearing
1994 // the sweep timer and updating the block flux rate estimates below.
1995 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1996 if (_inter_sweep_timer.is_active()) {
1997 _inter_sweep_timer.stop();
1998 // Note that we do not use this sample to update the _inter_sweep_estimate.
1999 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2000 _inter_sweep_estimate.padded_average(),
2001 _intra_sweep_estimate.padded_average());
2002 }
2004 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2005 ref_processor(), clear_all_soft_refs);
2006 #ifdef ASSERT
2007 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2008 size_t free_size = cms_space->free();
2009 assert(free_size ==
2010 pointer_delta(cms_space->end(), cms_space->compaction_top())
2011 * HeapWordSize,
2012 "All the free space should be compacted into one chunk at top");
2013 assert(cms_space->dictionary()->totalChunkSize(
2014 debug_only(cms_space->freelistLock())) == 0 ||
2015 cms_space->totalSizeInIndexedFreeLists() == 0,
2016 "All the free space should be in a single chunk");
2017 size_t num = cms_space->totalCount();
2018 assert((free_size == 0 && num == 0) ||
2019 (free_size > 0 && (num == 1 || num == 2)),
2020 "There should be at most 2 free chunks after compaction");
2021 #endif // ASSERT
2022 _collectorState = Resetting;
2023 assert(_restart_addr == NULL,
2024 "Should have been NULL'd before baton was passed");
2025 reset(false /* == !asynch */);
2026 _cmsGen->reset_after_compaction();
2027 _concurrent_cycles_since_last_unload = 0;
2029 if (verifying() && !should_unload_classes()) {
2030 perm_gen_verify_bit_map()->clear_all();
2031 }
2033 // Clear any data recorded in the PLAB chunk arrays.
2034 if (_survivor_plab_array != NULL) {
2035 reset_survivor_plab_arrays();
2036 }
2038 // Adjust the per-size allocation stats for the next epoch.
2039 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2040 // Restart the "inter sweep timer" for the next epoch.
2041 _inter_sweep_timer.reset();
2042 _inter_sweep_timer.start();
2044 // Sample collection pause time and reset for collection interval.
2045 if (UseAdaptiveSizePolicy) {
2046 size_policy()->msc_collection_end(gch->gc_cause());
2047 }
2049 // For a mark-sweep-compact, compute_new_size() will be called
2050 // in the heap's do_collection() method.
2051 }
2053 // A work method used by the foreground collector to do
2054 // a mark-sweep, after taking over from a possibly on-going
2055 // concurrent mark-sweep collection.
2056 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2057 CollectorState first_state, bool should_start_over) {
2058 if (PrintGC && Verbose) {
2059 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2060 "collector with count %d",
2061 _full_gcs_since_conc_gc);
2062 }
2063 switch (_collectorState) {
2064 case Idling:
2065 if (first_state == Idling || should_start_over) {
2066 // The background GC was not active, or should
2067 // restarted from scratch; start the cycle.
2068 _collectorState = InitialMarking;
2069 }
2070 // If first_state was not Idling, then a background GC
2071 // was in progress and has now finished. No need to do it
2072 // again. Leave the state as Idling.
2073 break;
2074 case Precleaning:
2075 // In the foreground case don't do the precleaning since
2076 // it is not done concurrently and there is extra work
2077 // required.
2078 _collectorState = FinalMarking;
2079 }
2080 if (PrintGCDetails &&
2081 (_collectorState > Idling ||
2082 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2083 gclog_or_tty->print(" (concurrent mode failure)");
2084 }
2085 collect_in_foreground(clear_all_soft_refs);
2087 // For a mark-sweep, compute_new_size() will be called
2088 // in the heap's do_collection() method.
2089 }
2092 void CMSCollector::getFreelistLocks() const {
2093 // Get locks for all free lists in all generations that this
2094 // collector is responsible for
2095 _cmsGen->freelistLock()->lock_without_safepoint_check();
2096 _permGen->freelistLock()->lock_without_safepoint_check();
2097 }
2099 void CMSCollector::releaseFreelistLocks() const {
2100 // Release locks for all free lists in all generations that this
2101 // collector is responsible for
2102 _cmsGen->freelistLock()->unlock();
2103 _permGen->freelistLock()->unlock();
2104 }
2106 bool CMSCollector::haveFreelistLocks() const {
2107 // Check locks for all free lists in all generations that this
2108 // collector is responsible for
2109 assert_lock_strong(_cmsGen->freelistLock());
2110 assert_lock_strong(_permGen->freelistLock());
2111 PRODUCT_ONLY(ShouldNotReachHere());
2112 return true;
2113 }
2115 // A utility class that is used by the CMS collector to
2116 // temporarily "release" the foreground collector from its
2117 // usual obligation to wait for the background collector to
2118 // complete an ongoing phase before proceeding.
2119 class ReleaseForegroundGC: public StackObj {
2120 private:
2121 CMSCollector* _c;
2122 public:
2123 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2124 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2125 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2126 // allow a potentially blocked foreground collector to proceed
2127 _c->_foregroundGCShouldWait = false;
2128 if (_c->_foregroundGCIsActive) {
2129 CGC_lock->notify();
2130 }
2131 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2132 "Possible deadlock");
2133 }
2135 ~ReleaseForegroundGC() {
2136 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2137 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2138 _c->_foregroundGCShouldWait = true;
2139 }
2140 };
2142 // There are separate collect_in_background and collect_in_foreground because of
2143 // the different locking requirements of the background collector and the
2144 // foreground collector. There was originally an attempt to share
2145 // one "collect" method between the background collector and the foreground
2146 // collector but the if-then-else required made it cleaner to have
2147 // separate methods.
2148 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2149 assert(Thread::current()->is_ConcurrentGC_thread(),
2150 "A CMS asynchronous collection is only allowed on a CMS thread.");
2152 GenCollectedHeap* gch = GenCollectedHeap::heap();
2153 {
2154 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2155 MutexLockerEx hl(Heap_lock, safepoint_check);
2156 FreelistLocker fll(this);
2157 MutexLockerEx x(CGC_lock, safepoint_check);
2158 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2159 // The foreground collector is active or we're
2160 // not using asynchronous collections. Skip this
2161 // background collection.
2162 assert(!_foregroundGCShouldWait, "Should be clear");
2163 return;
2164 } else {
2165 assert(_collectorState == Idling, "Should be idling before start.");
2166 _collectorState = InitialMarking;
2167 // Reset the expansion cause, now that we are about to begin
2168 // a new cycle.
2169 clear_expansion_cause();
2170 }
2171 // Decide if we want to enable class unloading as part of the
2172 // ensuing concurrent GC cycle.
2173 update_should_unload_classes();
2174 _full_gc_requested = false; // acks all outstanding full gc requests
2175 // Signal that we are about to start a collection
2176 gch->increment_total_full_collections(); // ... starting a collection cycle
2177 _collection_count_start = gch->total_full_collections();
2178 }
2180 // Used for PrintGC
2181 size_t prev_used;
2182 if (PrintGC && Verbose) {
2183 prev_used = _cmsGen->used(); // XXXPERM
2184 }
2186 // The change of the collection state is normally done at this level;
2187 // the exceptions are phases that are executed while the world is
2188 // stopped. For those phases the change of state is done while the
2189 // world is stopped. For baton passing purposes this allows the
2190 // background collector to finish the phase and change state atomically.
2191 // The foreground collector cannot wait on a phase that is done
2192 // while the world is stopped because the foreground collector already
2193 // has the world stopped and would deadlock.
2194 while (_collectorState != Idling) {
2195 if (TraceCMSState) {
2196 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2197 Thread::current(), _collectorState);
2198 }
2199 // The foreground collector
2200 // holds the Heap_lock throughout its collection.
2201 // holds the CMS token (but not the lock)
2202 // except while it is waiting for the background collector to yield.
2203 //
2204 // The foreground collector should be blocked (not for long)
2205 // if the background collector is about to start a phase
2206 // executed with world stopped. If the background
2207 // collector has already started such a phase, the
2208 // foreground collector is blocked waiting for the
2209 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2210 // are executed in the VM thread.
2211 //
2212 // The locking order is
2213 // PendingListLock (PLL) -- if applicable (FinalMarking)
2214 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2215 // CMS token (claimed in
2216 // stop_world_and_do() -->
2217 // safepoint_synchronize() -->
2218 // CMSThread::synchronize())
2220 {
2221 // Check if the FG collector wants us to yield.
2222 CMSTokenSync x(true); // is cms thread
2223 if (waitForForegroundGC()) {
2224 // We yielded to a foreground GC, nothing more to be
2225 // done this round.
2226 assert(_foregroundGCShouldWait == false, "We set it to false in "
2227 "waitForForegroundGC()");
2228 if (TraceCMSState) {
2229 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2230 " exiting collection CMS state %d",
2231 Thread::current(), _collectorState);
2232 }
2233 return;
2234 } else {
2235 // The background collector can run but check to see if the
2236 // foreground collector has done a collection while the
2237 // background collector was waiting to get the CGC_lock
2238 // above. If yes, break so that _foregroundGCShouldWait
2239 // is cleared before returning.
2240 if (_collectorState == Idling) {
2241 break;
2242 }
2243 }
2244 }
2246 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2247 "should be waiting");
2249 switch (_collectorState) {
2250 case InitialMarking:
2251 {
2252 ReleaseForegroundGC x(this);
2253 stats().record_cms_begin();
2255 VM_CMS_Initial_Mark initial_mark_op(this);
2256 VMThread::execute(&initial_mark_op);
2257 }
2258 // The collector state may be any legal state at this point
2259 // since the background collector may have yielded to the
2260 // foreground collector.
2261 break;
2262 case Marking:
2263 // initial marking in checkpointRootsInitialWork has been completed
2264 if (markFromRoots(true)) { // we were successful
2265 assert(_collectorState == Precleaning, "Collector state should "
2266 "have changed");
2267 } else {
2268 assert(_foregroundGCIsActive, "Internal state inconsistency");
2269 }
2270 break;
2271 case Precleaning:
2272 if (UseAdaptiveSizePolicy) {
2273 size_policy()->concurrent_precleaning_begin();
2274 }
2275 // marking from roots in markFromRoots has been completed
2276 preclean();
2277 if (UseAdaptiveSizePolicy) {
2278 size_policy()->concurrent_precleaning_end();
2279 }
2280 assert(_collectorState == AbortablePreclean ||
2281 _collectorState == FinalMarking,
2282 "Collector state should have changed");
2283 break;
2284 case AbortablePreclean:
2285 if (UseAdaptiveSizePolicy) {
2286 size_policy()->concurrent_phases_resume();
2287 }
2288 abortable_preclean();
2289 if (UseAdaptiveSizePolicy) {
2290 size_policy()->concurrent_precleaning_end();
2291 }
2292 assert(_collectorState == FinalMarking, "Collector state should "
2293 "have changed");
2294 break;
2295 case FinalMarking:
2296 {
2297 ReleaseForegroundGC x(this);
2299 VM_CMS_Final_Remark final_remark_op(this);
2300 VMThread::execute(&final_remark_op);
2301 }
2302 assert(_foregroundGCShouldWait, "block post-condition");
2303 break;
2304 case Sweeping:
2305 if (UseAdaptiveSizePolicy) {
2306 size_policy()->concurrent_sweeping_begin();
2307 }
2308 // final marking in checkpointRootsFinal has been completed
2309 sweep(true);
2310 assert(_collectorState == Resizing, "Collector state change "
2311 "to Resizing must be done under the free_list_lock");
2312 _full_gcs_since_conc_gc = 0;
2314 // Stop the timers for adaptive size policy for the concurrent phases
2315 if (UseAdaptiveSizePolicy) {
2316 size_policy()->concurrent_sweeping_end();
2317 size_policy()->concurrent_phases_end(gch->gc_cause(),
2318 gch->prev_gen(_cmsGen)->capacity(),
2319 _cmsGen->free());
2320 }
2322 case Resizing: {
2323 // Sweeping has been completed...
2324 // At this point the background collection has completed.
2325 // Don't move the call to compute_new_size() down
2326 // into code that might be executed if the background
2327 // collection was preempted.
2328 {
2329 ReleaseForegroundGC x(this); // unblock FG collection
2330 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2331 CMSTokenSync z(true); // not strictly needed.
2332 if (_collectorState == Resizing) {
2333 compute_new_size();
2334 _collectorState = Resetting;
2335 } else {
2336 assert(_collectorState == Idling, "The state should only change"
2337 " because the foreground collector has finished the collection");
2338 }
2339 }
2340 break;
2341 }
2342 case Resetting:
2343 // CMS heap resizing has been completed
2344 reset(true);
2345 assert(_collectorState == Idling, "Collector state should "
2346 "have changed");
2347 stats().record_cms_end();
2348 // Don't move the concurrent_phases_end() and compute_new_size()
2349 // calls to here because a preempted background collection
2350 // has it's state set to "Resetting".
2351 break;
2352 case Idling:
2353 default:
2354 ShouldNotReachHere();
2355 break;
2356 }
2357 if (TraceCMSState) {
2358 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2359 Thread::current(), _collectorState);
2360 }
2361 assert(_foregroundGCShouldWait, "block post-condition");
2362 }
2364 // Should this be in gc_epilogue?
2365 collector_policy()->counters()->update_counters();
2367 {
2368 // Clear _foregroundGCShouldWait and, in the event that the
2369 // foreground collector is waiting, notify it, before
2370 // returning.
2371 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2372 _foregroundGCShouldWait = false;
2373 if (_foregroundGCIsActive) {
2374 CGC_lock->notify();
2375 }
2376 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2377 "Possible deadlock");
2378 }
2379 if (TraceCMSState) {
2380 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2381 " exiting collection CMS state %d",
2382 Thread::current(), _collectorState);
2383 }
2384 if (PrintGC && Verbose) {
2385 _cmsGen->print_heap_change(prev_used);
2386 }
2387 }
2389 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2390 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2391 "Foreground collector should be waiting, not executing");
2392 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2393 "may only be done by the VM Thread with the world stopped");
2394 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2395 "VM thread should have CMS token");
2397 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2398 true, gclog_or_tty);)
2399 if (UseAdaptiveSizePolicy) {
2400 size_policy()->ms_collection_begin();
2401 }
2402 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2404 HandleMark hm; // Discard invalid handles created during verification
2406 if (VerifyBeforeGC &&
2407 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2408 Universe::verify(true);
2409 }
2411 // Snapshot the soft reference policy to be used in this collection cycle.
2412 ref_processor()->setup_policy(clear_all_soft_refs);
2414 bool init_mark_was_synchronous = false; // until proven otherwise
2415 while (_collectorState != Idling) {
2416 if (TraceCMSState) {
2417 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2418 Thread::current(), _collectorState);
2419 }
2420 switch (_collectorState) {
2421 case InitialMarking:
2422 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2423 checkpointRootsInitial(false);
2424 assert(_collectorState == Marking, "Collector state should have changed"
2425 " within checkpointRootsInitial()");
2426 break;
2427 case Marking:
2428 // initial marking in checkpointRootsInitialWork has been completed
2429 if (VerifyDuringGC &&
2430 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2431 gclog_or_tty->print("Verify before initial mark: ");
2432 Universe::verify(true);
2433 }
2434 {
2435 bool res = markFromRoots(false);
2436 assert(res && _collectorState == FinalMarking, "Collector state should "
2437 "have changed");
2438 break;
2439 }
2440 case FinalMarking:
2441 if (VerifyDuringGC &&
2442 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2443 gclog_or_tty->print("Verify before re-mark: ");
2444 Universe::verify(true);
2445 }
2446 checkpointRootsFinal(false, clear_all_soft_refs,
2447 init_mark_was_synchronous);
2448 assert(_collectorState == Sweeping, "Collector state should not "
2449 "have changed within checkpointRootsFinal()");
2450 break;
2451 case Sweeping:
2452 // final marking in checkpointRootsFinal has been completed
2453 if (VerifyDuringGC &&
2454 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2455 gclog_or_tty->print("Verify before sweep: ");
2456 Universe::verify(true);
2457 }
2458 sweep(false);
2459 assert(_collectorState == Resizing, "Incorrect state");
2460 break;
2461 case Resizing: {
2462 // Sweeping has been completed; the actual resize in this case
2463 // is done separately; nothing to be done in this state.
2464 _collectorState = Resetting;
2465 break;
2466 }
2467 case Resetting:
2468 // The heap has been resized.
2469 if (VerifyDuringGC &&
2470 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2471 gclog_or_tty->print("Verify before reset: ");
2472 Universe::verify(true);
2473 }
2474 reset(false);
2475 assert(_collectorState == Idling, "Collector state should "
2476 "have changed");
2477 break;
2478 case Precleaning:
2479 case AbortablePreclean:
2480 // Elide the preclean phase
2481 _collectorState = FinalMarking;
2482 break;
2483 default:
2484 ShouldNotReachHere();
2485 }
2486 if (TraceCMSState) {
2487 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2488 Thread::current(), _collectorState);
2489 }
2490 }
2492 if (UseAdaptiveSizePolicy) {
2493 GenCollectedHeap* gch = GenCollectedHeap::heap();
2494 size_policy()->ms_collection_end(gch->gc_cause());
2495 }
2497 if (VerifyAfterGC &&
2498 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2499 Universe::verify(true);
2500 }
2501 if (TraceCMSState) {
2502 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2503 " exiting collection CMS state %d",
2504 Thread::current(), _collectorState);
2505 }
2506 }
2508 bool CMSCollector::waitForForegroundGC() {
2509 bool res = false;
2510 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2511 "CMS thread should have CMS token");
2512 // Block the foreground collector until the
2513 // background collectors decides whether to
2514 // yield.
2515 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2516 _foregroundGCShouldWait = true;
2517 if (_foregroundGCIsActive) {
2518 // The background collector yields to the
2519 // foreground collector and returns a value
2520 // indicating that it has yielded. The foreground
2521 // collector can proceed.
2522 res = true;
2523 _foregroundGCShouldWait = false;
2524 ConcurrentMarkSweepThread::clear_CMS_flag(
2525 ConcurrentMarkSweepThread::CMS_cms_has_token);
2526 ConcurrentMarkSweepThread::set_CMS_flag(
2527 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2528 // Get a possibly blocked foreground thread going
2529 CGC_lock->notify();
2530 if (TraceCMSState) {
2531 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2532 Thread::current(), _collectorState);
2533 }
2534 while (_foregroundGCIsActive) {
2535 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2536 }
2537 ConcurrentMarkSweepThread::set_CMS_flag(
2538 ConcurrentMarkSweepThread::CMS_cms_has_token);
2539 ConcurrentMarkSweepThread::clear_CMS_flag(
2540 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2541 }
2542 if (TraceCMSState) {
2543 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2544 Thread::current(), _collectorState);
2545 }
2546 return res;
2547 }
2549 // Because of the need to lock the free lists and other structures in
2550 // the collector, common to all the generations that the collector is
2551 // collecting, we need the gc_prologues of individual CMS generations
2552 // delegate to their collector. It may have been simpler had the
2553 // current infrastructure allowed one to call a prologue on a
2554 // collector. In the absence of that we have the generation's
2555 // prologue delegate to the collector, which delegates back
2556 // some "local" work to a worker method in the individual generations
2557 // that it's responsible for collecting, while itself doing any
2558 // work common to all generations it's responsible for. A similar
2559 // comment applies to the gc_epilogue()'s.
2560 // The role of the varaible _between_prologue_and_epilogue is to
2561 // enforce the invocation protocol.
2562 void CMSCollector::gc_prologue(bool full) {
2563 // Call gc_prologue_work() for each CMSGen and PermGen that
2564 // we are responsible for.
2566 // The following locking discipline assumes that we are only called
2567 // when the world is stopped.
2568 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2570 // The CMSCollector prologue must call the gc_prologues for the
2571 // "generations" (including PermGen if any) that it's responsible
2572 // for.
2574 assert( Thread::current()->is_VM_thread()
2575 || ( CMSScavengeBeforeRemark
2576 && Thread::current()->is_ConcurrentGC_thread()),
2577 "Incorrect thread type for prologue execution");
2579 if (_between_prologue_and_epilogue) {
2580 // We have already been invoked; this is a gc_prologue delegation
2581 // from yet another CMS generation that we are responsible for, just
2582 // ignore it since all relevant work has already been done.
2583 return;
2584 }
2586 // set a bit saying prologue has been called; cleared in epilogue
2587 _between_prologue_and_epilogue = true;
2588 // Claim locks for common data structures, then call gc_prologue_work()
2589 // for each CMSGen and PermGen that we are responsible for.
2591 getFreelistLocks(); // gets free list locks on constituent spaces
2592 bitMapLock()->lock_without_safepoint_check();
2594 // Should call gc_prologue_work() for all cms gens we are responsible for
2595 bool registerClosure = _collectorState >= Marking
2596 && _collectorState < Sweeping;
2597 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2598 : &_modUnionClosure;
2599 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2600 _permGen->gc_prologue_work(full, registerClosure, muc);
2602 if (!full) {
2603 stats().record_gc0_begin();
2604 }
2605 }
2607 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2608 // Delegate to CMScollector which knows how to coordinate between
2609 // this and any other CMS generations that it is responsible for
2610 // collecting.
2611 collector()->gc_prologue(full);
2612 }
2614 // This is a "private" interface for use by this generation's CMSCollector.
2615 // Not to be called directly by any other entity (for instance,
2616 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2617 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2618 bool registerClosure, ModUnionClosure* modUnionClosure) {
2619 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2620 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2621 "Should be NULL");
2622 if (registerClosure) {
2623 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2624 }
2625 cmsSpace()->gc_prologue();
2626 // Clear stat counters
2627 NOT_PRODUCT(
2628 assert(_numObjectsPromoted == 0, "check");
2629 assert(_numWordsPromoted == 0, "check");
2630 if (Verbose && PrintGC) {
2631 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2632 SIZE_FORMAT" bytes concurrently",
2633 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2634 }
2635 _numObjectsAllocated = 0;
2636 _numWordsAllocated = 0;
2637 )
2638 }
2640 void CMSCollector::gc_epilogue(bool full) {
2641 // The following locking discipline assumes that we are only called
2642 // when the world is stopped.
2643 assert(SafepointSynchronize::is_at_safepoint(),
2644 "world is stopped assumption");
2646 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2647 // if linear allocation blocks need to be appropriately marked to allow the
2648 // the blocks to be parsable. We also check here whether we need to nudge the
2649 // CMS collector thread to start a new cycle (if it's not already active).
2650 assert( Thread::current()->is_VM_thread()
2651 || ( CMSScavengeBeforeRemark
2652 && Thread::current()->is_ConcurrentGC_thread()),
2653 "Incorrect thread type for epilogue execution");
2655 if (!_between_prologue_and_epilogue) {
2656 // We have already been invoked; this is a gc_epilogue delegation
2657 // from yet another CMS generation that we are responsible for, just
2658 // ignore it since all relevant work has already been done.
2659 return;
2660 }
2661 assert(haveFreelistLocks(), "must have freelist locks");
2662 assert_lock_strong(bitMapLock());
2664 _cmsGen->gc_epilogue_work(full);
2665 _permGen->gc_epilogue_work(full);
2667 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2668 // in case sampling was not already enabled, enable it
2669 _start_sampling = true;
2670 }
2671 // reset _eden_chunk_array so sampling starts afresh
2672 _eden_chunk_index = 0;
2674 size_t cms_used = _cmsGen->cmsSpace()->used();
2675 size_t perm_used = _permGen->cmsSpace()->used();
2677 // update performance counters - this uses a special version of
2678 // update_counters() that allows the utilization to be passed as a
2679 // parameter, avoiding multiple calls to used().
2680 //
2681 _cmsGen->update_counters(cms_used);
2682 _permGen->update_counters(perm_used);
2684 if (CMSIncrementalMode) {
2685 icms_update_allocation_limits();
2686 }
2688 bitMapLock()->unlock();
2689 releaseFreelistLocks();
2691 _between_prologue_and_epilogue = false; // ready for next cycle
2692 }
2694 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2695 collector()->gc_epilogue(full);
2697 // Also reset promotion tracking in par gc thread states.
2698 if (ParallelGCThreads > 0) {
2699 for (uint i = 0; i < ParallelGCThreads; i++) {
2700 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2701 }
2702 }
2703 }
2705 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2706 assert(!incremental_collection_failed(), "Should have been cleared");
2707 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2708 cmsSpace()->gc_epilogue();
2709 // Print stat counters
2710 NOT_PRODUCT(
2711 assert(_numObjectsAllocated == 0, "check");
2712 assert(_numWordsAllocated == 0, "check");
2713 if (Verbose && PrintGC) {
2714 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2715 SIZE_FORMAT" bytes",
2716 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2717 }
2718 _numObjectsPromoted = 0;
2719 _numWordsPromoted = 0;
2720 )
2722 if (PrintGC && Verbose) {
2723 // Call down the chain in contiguous_available needs the freelistLock
2724 // so print this out before releasing the freeListLock.
2725 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2726 contiguous_available());
2727 }
2728 }
2730 #ifndef PRODUCT
2731 bool CMSCollector::have_cms_token() {
2732 Thread* thr = Thread::current();
2733 if (thr->is_VM_thread()) {
2734 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2735 } else if (thr->is_ConcurrentGC_thread()) {
2736 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2737 } else if (thr->is_GC_task_thread()) {
2738 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2739 ParGCRareEvent_lock->owned_by_self();
2740 }
2741 return false;
2742 }
2743 #endif
2745 // Check reachability of the given heap address in CMS generation,
2746 // treating all other generations as roots.
2747 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2748 // We could "guarantee" below, rather than assert, but i'll
2749 // leave these as "asserts" so that an adventurous debugger
2750 // could try this in the product build provided some subset of
2751 // the conditions were met, provided they were intersted in the
2752 // results and knew that the computation below wouldn't interfere
2753 // with other concurrent computations mutating the structures
2754 // being read or written.
2755 assert(SafepointSynchronize::is_at_safepoint(),
2756 "Else mutations in object graph will make answer suspect");
2757 assert(have_cms_token(), "Should hold cms token");
2758 assert(haveFreelistLocks(), "must hold free list locks");
2759 assert_lock_strong(bitMapLock());
2761 // Clear the marking bit map array before starting, but, just
2762 // for kicks, first report if the given address is already marked
2763 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2764 _markBitMap.isMarked(addr) ? "" : " not");
2766 if (verify_after_remark()) {
2767 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2768 bool result = verification_mark_bm()->isMarked(addr);
2769 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2770 result ? "IS" : "is NOT");
2771 return result;
2772 } else {
2773 gclog_or_tty->print_cr("Could not compute result");
2774 return false;
2775 }
2776 }
2778 ////////////////////////////////////////////////////////
2779 // CMS Verification Support
2780 ////////////////////////////////////////////////////////
2781 // Following the remark phase, the following invariant
2782 // should hold -- each object in the CMS heap which is
2783 // marked in markBitMap() should be marked in the verification_mark_bm().
2785 class VerifyMarkedClosure: public BitMapClosure {
2786 CMSBitMap* _marks;
2787 bool _failed;
2789 public:
2790 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2792 bool do_bit(size_t offset) {
2793 HeapWord* addr = _marks->offsetToHeapWord(offset);
2794 if (!_marks->isMarked(addr)) {
2795 oop(addr)->print_on(gclog_or_tty);
2796 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2797 _failed = true;
2798 }
2799 return true;
2800 }
2802 bool failed() { return _failed; }
2803 };
2805 bool CMSCollector::verify_after_remark() {
2806 gclog_or_tty->print(" [Verifying CMS Marking... ");
2807 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2808 static bool init = false;
2810 assert(SafepointSynchronize::is_at_safepoint(),
2811 "Else mutations in object graph will make answer suspect");
2812 assert(have_cms_token(),
2813 "Else there may be mutual interference in use of "
2814 " verification data structures");
2815 assert(_collectorState > Marking && _collectorState <= Sweeping,
2816 "Else marking info checked here may be obsolete");
2817 assert(haveFreelistLocks(), "must hold free list locks");
2818 assert_lock_strong(bitMapLock());
2821 // Allocate marking bit map if not already allocated
2822 if (!init) { // first time
2823 if (!verification_mark_bm()->allocate(_span)) {
2824 return false;
2825 }
2826 init = true;
2827 }
2829 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2831 // Turn off refs discovery -- so we will be tracing through refs.
2832 // This is as intended, because by this time
2833 // GC must already have cleared any refs that need to be cleared,
2834 // and traced those that need to be marked; moreover,
2835 // the marking done here is not going to intefere in any
2836 // way with the marking information used by GC.
2837 NoRefDiscovery no_discovery(ref_processor());
2839 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2841 // Clear any marks from a previous round
2842 verification_mark_bm()->clear_all();
2843 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2844 verify_work_stacks_empty();
2846 GenCollectedHeap* gch = GenCollectedHeap::heap();
2847 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2848 // Update the saved marks which may affect the root scans.
2849 gch->save_marks();
2851 if (CMSRemarkVerifyVariant == 1) {
2852 // In this first variant of verification, we complete
2853 // all marking, then check if the new marks-verctor is
2854 // a subset of the CMS marks-vector.
2855 verify_after_remark_work_1();
2856 } else if (CMSRemarkVerifyVariant == 2) {
2857 // In this second variant of verification, we flag an error
2858 // (i.e. an object reachable in the new marks-vector not reachable
2859 // in the CMS marks-vector) immediately, also indicating the
2860 // identify of an object (A) that references the unmarked object (B) --
2861 // presumably, a mutation to A failed to be picked up by preclean/remark?
2862 verify_after_remark_work_2();
2863 } else {
2864 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2865 CMSRemarkVerifyVariant);
2866 }
2867 gclog_or_tty->print(" done] ");
2868 return true;
2869 }
2871 void CMSCollector::verify_after_remark_work_1() {
2872 ResourceMark rm;
2873 HandleMark hm;
2874 GenCollectedHeap* gch = GenCollectedHeap::heap();
2876 // Mark from roots one level into CMS
2877 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2878 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2880 gch->gen_process_strong_roots(_cmsGen->level(),
2881 true, // younger gens are roots
2882 true, // activate StrongRootsScope
2883 true, // collecting perm gen
2884 SharedHeap::ScanningOption(roots_scanning_options()),
2885 ¬Older,
2886 true, // walk code active on stacks
2887 NULL);
2889 // Now mark from the roots
2890 assert(_revisitStack.isEmpty(), "Should be empty");
2891 MarkFromRootsClosure markFromRootsClosure(this, _span,
2892 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2893 false /* don't yield */, true /* verifying */);
2894 assert(_restart_addr == NULL, "Expected pre-condition");
2895 verification_mark_bm()->iterate(&markFromRootsClosure);
2896 while (_restart_addr != NULL) {
2897 // Deal with stack overflow: by restarting at the indicated
2898 // address.
2899 HeapWord* ra = _restart_addr;
2900 markFromRootsClosure.reset(ra);
2901 _restart_addr = NULL;
2902 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2903 }
2904 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2905 verify_work_stacks_empty();
2906 // Should reset the revisit stack above, since no class tree
2907 // surgery is forthcoming.
2908 _revisitStack.reset(); // throwing away all contents
2910 // Marking completed -- now verify that each bit marked in
2911 // verification_mark_bm() is also marked in markBitMap(); flag all
2912 // errors by printing corresponding objects.
2913 VerifyMarkedClosure vcl(markBitMap());
2914 verification_mark_bm()->iterate(&vcl);
2915 if (vcl.failed()) {
2916 gclog_or_tty->print("Verification failed");
2917 Universe::heap()->print_on(gclog_or_tty);
2918 fatal("CMS: failed marking verification after remark");
2919 }
2920 }
2922 void CMSCollector::verify_after_remark_work_2() {
2923 ResourceMark rm;
2924 HandleMark hm;
2925 GenCollectedHeap* gch = GenCollectedHeap::heap();
2927 // Mark from roots one level into CMS
2928 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2929 markBitMap());
2930 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2931 gch->gen_process_strong_roots(_cmsGen->level(),
2932 true, // younger gens are roots
2933 true, // activate StrongRootsScope
2934 true, // collecting perm gen
2935 SharedHeap::ScanningOption(roots_scanning_options()),
2936 ¬Older,
2937 true, // walk code active on stacks
2938 NULL);
2940 // Now mark from the roots
2941 assert(_revisitStack.isEmpty(), "Should be empty");
2942 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2943 verification_mark_bm(), markBitMap(), verification_mark_stack());
2944 assert(_restart_addr == NULL, "Expected pre-condition");
2945 verification_mark_bm()->iterate(&markFromRootsClosure);
2946 while (_restart_addr != NULL) {
2947 // Deal with stack overflow: by restarting at the indicated
2948 // address.
2949 HeapWord* ra = _restart_addr;
2950 markFromRootsClosure.reset(ra);
2951 _restart_addr = NULL;
2952 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2953 }
2954 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2955 verify_work_stacks_empty();
2956 // Should reset the revisit stack above, since no class tree
2957 // surgery is forthcoming.
2958 _revisitStack.reset(); // throwing away all contents
2960 // Marking completed -- now verify that each bit marked in
2961 // verification_mark_bm() is also marked in markBitMap(); flag all
2962 // errors by printing corresponding objects.
2963 VerifyMarkedClosure vcl(markBitMap());
2964 verification_mark_bm()->iterate(&vcl);
2965 assert(!vcl.failed(), "Else verification above should not have succeeded");
2966 }
2968 void ConcurrentMarkSweepGeneration::save_marks() {
2969 // delegate to CMS space
2970 cmsSpace()->save_marks();
2971 for (uint i = 0; i < ParallelGCThreads; i++) {
2972 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2973 }
2974 }
2976 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2977 return cmsSpace()->no_allocs_since_save_marks();
2978 }
2980 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2981 \
2982 void ConcurrentMarkSweepGeneration:: \
2983 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2984 cl->set_generation(this); \
2985 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2986 cl->reset_generation(); \
2987 save_marks(); \
2988 }
2990 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2992 void
2993 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2994 {
2995 // Not currently implemented; need to do the following. -- ysr.
2996 // dld -- I think that is used for some sort of allocation profiler. So it
2997 // really means the objects allocated by the mutator since the last
2998 // GC. We could potentially implement this cheaply by recording only
2999 // the direct allocations in a side data structure.
3000 //
3001 // I think we probably ought not to be required to support these
3002 // iterations at any arbitrary point; I think there ought to be some
3003 // call to enable/disable allocation profiling in a generation/space,
3004 // and the iterator ought to return the objects allocated in the
3005 // gen/space since the enable call, or the last iterator call (which
3006 // will probably be at a GC.) That way, for gens like CM&S that would
3007 // require some extra data structure to support this, we only pay the
3008 // cost when it's in use...
3009 cmsSpace()->object_iterate_since_last_GC(blk);
3010 }
3012 void
3013 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3014 cl->set_generation(this);
3015 younger_refs_in_space_iterate(_cmsSpace, cl);
3016 cl->reset_generation();
3017 }
3019 void
3020 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
3021 if (freelistLock()->owned_by_self()) {
3022 Generation::oop_iterate(mr, cl);
3023 } else {
3024 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3025 Generation::oop_iterate(mr, cl);
3026 }
3027 }
3029 void
3030 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3031 if (freelistLock()->owned_by_self()) {
3032 Generation::oop_iterate(cl);
3033 } else {
3034 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3035 Generation::oop_iterate(cl);
3036 }
3037 }
3039 void
3040 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3041 if (freelistLock()->owned_by_self()) {
3042 Generation::object_iterate(cl);
3043 } else {
3044 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3045 Generation::object_iterate(cl);
3046 }
3047 }
3049 void
3050 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3051 if (freelistLock()->owned_by_self()) {
3052 Generation::safe_object_iterate(cl);
3053 } else {
3054 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3055 Generation::safe_object_iterate(cl);
3056 }
3057 }
3059 void
3060 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3061 }
3063 void
3064 ConcurrentMarkSweepGeneration::post_compact() {
3065 }
3067 void
3068 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3069 // Fix the linear allocation blocks to look like free blocks.
3071 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3072 // are not called when the heap is verified during universe initialization and
3073 // at vm shutdown.
3074 if (freelistLock()->owned_by_self()) {
3075 cmsSpace()->prepare_for_verify();
3076 } else {
3077 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3078 cmsSpace()->prepare_for_verify();
3079 }
3080 }
3082 void
3083 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3084 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3085 // are not called when the heap is verified during universe initialization and
3086 // at vm shutdown.
3087 if (freelistLock()->owned_by_self()) {
3088 cmsSpace()->verify(false /* ignored */);
3089 } else {
3090 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3091 cmsSpace()->verify(false /* ignored */);
3092 }
3093 }
3095 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3096 _cmsGen->verify(allow_dirty);
3097 _permGen->verify(allow_dirty);
3098 }
3100 #ifndef PRODUCT
3101 bool CMSCollector::overflow_list_is_empty() const {
3102 assert(_num_par_pushes >= 0, "Inconsistency");
3103 if (_overflow_list == NULL) {
3104 assert(_num_par_pushes == 0, "Inconsistency");
3105 }
3106 return _overflow_list == NULL;
3107 }
3109 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3110 // merely consolidate assertion checks that appear to occur together frequently.
3111 void CMSCollector::verify_work_stacks_empty() const {
3112 assert(_markStack.isEmpty(), "Marking stack should be empty");
3113 assert(overflow_list_is_empty(), "Overflow list should be empty");
3114 }
3116 void CMSCollector::verify_overflow_empty() const {
3117 assert(overflow_list_is_empty(), "Overflow list should be empty");
3118 assert(no_preserved_marks(), "No preserved marks");
3119 }
3120 #endif // PRODUCT
3122 // Decide if we want to enable class unloading as part of the
3123 // ensuing concurrent GC cycle. We will collect the perm gen and
3124 // unload classes if it's the case that:
3125 // (1) an explicit gc request has been made and the flag
3126 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3127 // (2) (a) class unloading is enabled at the command line, and
3128 // (b) (i) perm gen threshold has been crossed, or
3129 // (ii) old gen is getting really full, or
3130 // (iii) the previous N CMS collections did not collect the
3131 // perm gen
3132 // NOTE: Provided there is no change in the state of the heap between
3133 // calls to this method, it should have idempotent results. Moreover,
3134 // its results should be monotonically increasing (i.e. going from 0 to 1,
3135 // but not 1 to 0) between successive calls between which the heap was
3136 // not collected. For the implementation below, it must thus rely on
3137 // the property that concurrent_cycles_since_last_unload()
3138 // will not decrease unless a collection cycle happened and that
3139 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3140 // themselves also monotonic in that sense. See check_monotonicity()
3141 // below.
3142 bool CMSCollector::update_should_unload_classes() {
3143 _should_unload_classes = false;
3144 // Condition 1 above
3145 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3146 _should_unload_classes = true;
3147 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3148 // Disjuncts 2.b.(i,ii,iii) above
3149 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3150 CMSClassUnloadingMaxInterval)
3151 || _permGen->should_concurrent_collect()
3152 || _cmsGen->is_too_full();
3153 }
3154 return _should_unload_classes;
3155 }
3157 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3158 bool res = should_concurrent_collect();
3159 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3160 return res;
3161 }
3163 void CMSCollector::setup_cms_unloading_and_verification_state() {
3164 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3165 || VerifyBeforeExit;
3166 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3167 | SharedHeap::SO_CodeCache;
3169 if (should_unload_classes()) { // Should unload classes this cycle
3170 remove_root_scanning_option(rso); // Shrink the root set appropriately
3171 set_verifying(should_verify); // Set verification state for this cycle
3172 return; // Nothing else needs to be done at this time
3173 }
3175 // Not unloading classes this cycle
3176 assert(!should_unload_classes(), "Inconsitency!");
3177 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3178 // We were not verifying, or we _were_ unloading classes in the last cycle,
3179 // AND some verification options are enabled this cycle; in this case,
3180 // we must make sure that the deadness map is allocated if not already so,
3181 // and cleared (if already allocated previously --
3182 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3183 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3184 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3185 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3186 "permanent generation verification disabled");
3187 return; // Note that we leave verification disabled, so we'll retry this
3188 // allocation next cycle. We _could_ remember this failure
3189 // and skip further attempts and permanently disable verification
3190 // attempts if that is considered more desirable.
3191 }
3192 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3193 "_perm_gen_ver_bit_map inconsistency?");
3194 } else {
3195 perm_gen_verify_bit_map()->clear_all();
3196 }
3197 // Include symbols, strings and code cache elements to prevent their resurrection.
3198 add_root_scanning_option(rso);
3199 set_verifying(true);
3200 } else if (verifying() && !should_verify) {
3201 // We were verifying, but some verification flags got disabled.
3202 set_verifying(false);
3203 // Exclude symbols, strings and code cache elements from root scanning to
3204 // reduce IM and RM pauses.
3205 remove_root_scanning_option(rso);
3206 }
3207 }
3210 #ifndef PRODUCT
3211 HeapWord* CMSCollector::block_start(const void* p) const {
3212 const HeapWord* addr = (HeapWord*)p;
3213 if (_span.contains(p)) {
3214 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3215 return _cmsGen->cmsSpace()->block_start(p);
3216 } else {
3217 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3218 "Inconsistent _span?");
3219 return _permGen->cmsSpace()->block_start(p);
3220 }
3221 }
3222 return NULL;
3223 }
3224 #endif
3226 HeapWord*
3227 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3228 bool tlab,
3229 bool parallel) {
3230 assert(!tlab, "Can't deal with TLAB allocation");
3231 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3232 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3233 CMSExpansionCause::_satisfy_allocation);
3234 if (GCExpandToAllocateDelayMillis > 0) {
3235 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3236 }
3237 return have_lock_and_allocate(word_size, tlab);
3238 }
3240 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3241 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3242 // to CardGeneration and share it...
3243 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3244 return CardGeneration::expand(bytes, expand_bytes);
3245 }
3247 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3248 CMSExpansionCause::Cause cause)
3249 {
3251 bool success = expand(bytes, expand_bytes);
3253 // remember why we expanded; this information is used
3254 // by shouldConcurrentCollect() when making decisions on whether to start
3255 // a new CMS cycle.
3256 if (success) {
3257 set_expansion_cause(cause);
3258 if (PrintGCDetails && Verbose) {
3259 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3260 CMSExpansionCause::to_string(cause));
3261 }
3262 }
3263 }
3265 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3266 HeapWord* res = NULL;
3267 MutexLocker x(ParGCRareEvent_lock);
3268 while (true) {
3269 // Expansion by some other thread might make alloc OK now:
3270 res = ps->lab.alloc(word_sz);
3271 if (res != NULL) return res;
3272 // If there's not enough expansion space available, give up.
3273 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3274 return NULL;
3275 }
3276 // Otherwise, we try expansion.
3277 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3278 CMSExpansionCause::_allocate_par_lab);
3279 // Now go around the loop and try alloc again;
3280 // A competing par_promote might beat us to the expansion space,
3281 // so we may go around the loop again if promotion fails agaion.
3282 if (GCExpandToAllocateDelayMillis > 0) {
3283 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3284 }
3285 }
3286 }
3289 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3290 PromotionInfo* promo) {
3291 MutexLocker x(ParGCRareEvent_lock);
3292 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3293 while (true) {
3294 // Expansion by some other thread might make alloc OK now:
3295 if (promo->ensure_spooling_space()) {
3296 assert(promo->has_spooling_space(),
3297 "Post-condition of successful ensure_spooling_space()");
3298 return true;
3299 }
3300 // If there's not enough expansion space available, give up.
3301 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3302 return false;
3303 }
3304 // Otherwise, we try expansion.
3305 expand(refill_size_bytes, MinHeapDeltaBytes,
3306 CMSExpansionCause::_allocate_par_spooling_space);
3307 // Now go around the loop and try alloc again;
3308 // A competing allocation might beat us to the expansion space,
3309 // so we may go around the loop again if allocation fails again.
3310 if (GCExpandToAllocateDelayMillis > 0) {
3311 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3312 }
3313 }
3314 }
3318 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3319 assert_locked_or_safepoint(Heap_lock);
3320 size_t size = ReservedSpace::page_align_size_down(bytes);
3321 if (size > 0) {
3322 shrink_by(size);
3323 }
3324 }
3326 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3327 assert_locked_or_safepoint(Heap_lock);
3328 bool result = _virtual_space.expand_by(bytes);
3329 if (result) {
3330 HeapWord* old_end = _cmsSpace->end();
3331 size_t new_word_size =
3332 heap_word_size(_virtual_space.committed_size());
3333 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3334 _bts->resize(new_word_size); // resize the block offset shared array
3335 Universe::heap()->barrier_set()->resize_covered_region(mr);
3336 // Hmmmm... why doesn't CFLS::set_end verify locking?
3337 // This is quite ugly; FIX ME XXX
3338 _cmsSpace->assert_locked(freelistLock());
3339 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3341 // update the space and generation capacity counters
3342 if (UsePerfData) {
3343 _space_counters->update_capacity();
3344 _gen_counters->update_all();
3345 }
3347 if (Verbose && PrintGC) {
3348 size_t new_mem_size = _virtual_space.committed_size();
3349 size_t old_mem_size = new_mem_size - bytes;
3350 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3351 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3352 }
3353 }
3354 return result;
3355 }
3357 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3358 assert_locked_or_safepoint(Heap_lock);
3359 bool success = true;
3360 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3361 if (remaining_bytes > 0) {
3362 success = grow_by(remaining_bytes);
3363 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3364 }
3365 return success;
3366 }
3368 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3369 assert_locked_or_safepoint(Heap_lock);
3370 assert_lock_strong(freelistLock());
3371 // XXX Fix when compaction is implemented.
3372 warning("Shrinking of CMS not yet implemented");
3373 return;
3374 }
3377 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3378 // phases.
3379 class CMSPhaseAccounting: public StackObj {
3380 public:
3381 CMSPhaseAccounting(CMSCollector *collector,
3382 const char *phase,
3383 bool print_cr = true);
3384 ~CMSPhaseAccounting();
3386 private:
3387 CMSCollector *_collector;
3388 const char *_phase;
3389 elapsedTimer _wallclock;
3390 bool _print_cr;
3392 public:
3393 // Not MT-safe; so do not pass around these StackObj's
3394 // where they may be accessed by other threads.
3395 jlong wallclock_millis() {
3396 assert(_wallclock.is_active(), "Wall clock should not stop");
3397 _wallclock.stop(); // to record time
3398 jlong ret = _wallclock.milliseconds();
3399 _wallclock.start(); // restart
3400 return ret;
3401 }
3402 };
3404 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3405 const char *phase,
3406 bool print_cr) :
3407 _collector(collector), _phase(phase), _print_cr(print_cr) {
3409 if (PrintCMSStatistics != 0) {
3410 _collector->resetYields();
3411 }
3412 if (PrintGCDetails && PrintGCTimeStamps) {
3413 gclog_or_tty->date_stamp(PrintGCDateStamps);
3414 gclog_or_tty->stamp();
3415 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3416 _collector->cmsGen()->short_name(), _phase);
3417 }
3418 _collector->resetTimer();
3419 _wallclock.start();
3420 _collector->startTimer();
3421 }
3423 CMSPhaseAccounting::~CMSPhaseAccounting() {
3424 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3425 _collector->stopTimer();
3426 _wallclock.stop();
3427 if (PrintGCDetails) {
3428 gclog_or_tty->date_stamp(PrintGCDateStamps);
3429 if (PrintGCTimeStamps) {
3430 gclog_or_tty->stamp();
3431 gclog_or_tty->print(": ");
3432 }
3433 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3434 _collector->cmsGen()->short_name(),
3435 _phase, _collector->timerValue(), _wallclock.seconds());
3436 if (_print_cr) {
3437 gclog_or_tty->print_cr("");
3438 }
3439 if (PrintCMSStatistics != 0) {
3440 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3441 _collector->yields());
3442 }
3443 }
3444 }
3446 // CMS work
3448 // Checkpoint the roots into this generation from outside
3449 // this generation. [Note this initial checkpoint need only
3450 // be approximate -- we'll do a catch up phase subsequently.]
3451 void CMSCollector::checkpointRootsInitial(bool asynch) {
3452 assert(_collectorState == InitialMarking, "Wrong collector state");
3453 check_correct_thread_executing();
3454 ReferenceProcessor* rp = ref_processor();
3455 SpecializationStats::clear();
3456 assert(_restart_addr == NULL, "Control point invariant");
3457 if (asynch) {
3458 // acquire locks for subsequent manipulations
3459 MutexLockerEx x(bitMapLock(),
3460 Mutex::_no_safepoint_check_flag);
3461 checkpointRootsInitialWork(asynch);
3462 rp->verify_no_references_recorded();
3463 rp->enable_discovery(); // enable ("weak") refs discovery
3464 _collectorState = Marking;
3465 } else {
3466 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3467 // which recognizes if we are a CMS generation, and doesn't try to turn on
3468 // discovery; verify that they aren't meddling.
3469 assert(!rp->discovery_is_atomic(),
3470 "incorrect setting of discovery predicate");
3471 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3472 "ref discovery for this generation kind");
3473 // already have locks
3474 checkpointRootsInitialWork(asynch);
3475 rp->enable_discovery(); // now enable ("weak") refs discovery
3476 _collectorState = Marking;
3477 }
3478 SpecializationStats::print();
3479 }
3481 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3482 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3483 assert(_collectorState == InitialMarking, "just checking");
3485 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3486 // precede our marking with a collection of all
3487 // younger generations to keep floating garbage to a minimum.
3488 // XXX: we won't do this for now -- it's an optimization to be done later.
3490 // already have locks
3491 assert_lock_strong(bitMapLock());
3492 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3494 // Setup the verification and class unloading state for this
3495 // CMS collection cycle.
3496 setup_cms_unloading_and_verification_state();
3498 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3499 PrintGCDetails && Verbose, true, gclog_or_tty);)
3500 if (UseAdaptiveSizePolicy) {
3501 size_policy()->checkpoint_roots_initial_begin();
3502 }
3504 // Reset all the PLAB chunk arrays if necessary.
3505 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3506 reset_survivor_plab_arrays();
3507 }
3509 ResourceMark rm;
3510 HandleMark hm;
3512 FalseClosure falseClosure;
3513 // In the case of a synchronous collection, we will elide the
3514 // remark step, so it's important to catch all the nmethod oops
3515 // in this step.
3516 // The final 'true' flag to gen_process_strong_roots will ensure this.
3517 // If 'async' is true, we can relax the nmethod tracing.
3518 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3519 GenCollectedHeap* gch = GenCollectedHeap::heap();
3521 verify_work_stacks_empty();
3522 verify_overflow_empty();
3524 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3525 // Update the saved marks which may affect the root scans.
3526 gch->save_marks();
3528 // weak reference processing has not started yet.
3529 ref_processor()->set_enqueuing_is_done(false);
3531 {
3532 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3533 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3534 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3535 gch->gen_process_strong_roots(_cmsGen->level(),
3536 true, // younger gens are roots
3537 true, // activate StrongRootsScope
3538 true, // collecting perm gen
3539 SharedHeap::ScanningOption(roots_scanning_options()),
3540 ¬Older,
3541 true, // walk all of code cache if (so & SO_CodeCache)
3542 NULL);
3543 }
3545 // Clear mod-union table; it will be dirtied in the prologue of
3546 // CMS generation per each younger generation collection.
3548 assert(_modUnionTable.isAllClear(),
3549 "Was cleared in most recent final checkpoint phase"
3550 " or no bits are set in the gc_prologue before the start of the next "
3551 "subsequent marking phase.");
3553 // Temporarily disabled, since pre/post-consumption closures don't
3554 // care about precleaned cards
3555 #if 0
3556 {
3557 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3558 (HeapWord*)_virtual_space.high());
3559 _ct->ct_bs()->preclean_dirty_cards(mr);
3560 }
3561 #endif
3563 // Save the end of the used_region of the constituent generations
3564 // to be used to limit the extent of sweep in each generation.
3565 save_sweep_limits();
3566 if (UseAdaptiveSizePolicy) {
3567 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3568 }
3569 verify_overflow_empty();
3570 }
3572 bool CMSCollector::markFromRoots(bool asynch) {
3573 // we might be tempted to assert that:
3574 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3575 // "inconsistent argument?");
3576 // However that wouldn't be right, because it's possible that
3577 // a safepoint is indeed in progress as a younger generation
3578 // stop-the-world GC happens even as we mark in this generation.
3579 assert(_collectorState == Marking, "inconsistent state?");
3580 check_correct_thread_executing();
3581 verify_overflow_empty();
3583 bool res;
3584 if (asynch) {
3586 // Start the timers for adaptive size policy for the concurrent phases
3587 // Do it here so that the foreground MS can use the concurrent
3588 // timer since a foreground MS might has the sweep done concurrently
3589 // or STW.
3590 if (UseAdaptiveSizePolicy) {
3591 size_policy()->concurrent_marking_begin();
3592 }
3594 // Weak ref discovery note: We may be discovering weak
3595 // refs in this generation concurrent (but interleaved) with
3596 // weak ref discovery by a younger generation collector.
3598 CMSTokenSyncWithLocks ts(true, bitMapLock());
3599 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3600 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3601 res = markFromRootsWork(asynch);
3602 if (res) {
3603 _collectorState = Precleaning;
3604 } else { // We failed and a foreground collection wants to take over
3605 assert(_foregroundGCIsActive, "internal state inconsistency");
3606 assert(_restart_addr == NULL, "foreground will restart from scratch");
3607 if (PrintGCDetails) {
3608 gclog_or_tty->print_cr("bailing out to foreground collection");
3609 }
3610 }
3611 if (UseAdaptiveSizePolicy) {
3612 size_policy()->concurrent_marking_end();
3613 }
3614 } else {
3615 assert(SafepointSynchronize::is_at_safepoint(),
3616 "inconsistent with asynch == false");
3617 if (UseAdaptiveSizePolicy) {
3618 size_policy()->ms_collection_marking_begin();
3619 }
3620 // already have locks
3621 res = markFromRootsWork(asynch);
3622 _collectorState = FinalMarking;
3623 if (UseAdaptiveSizePolicy) {
3624 GenCollectedHeap* gch = GenCollectedHeap::heap();
3625 size_policy()->ms_collection_marking_end(gch->gc_cause());
3626 }
3627 }
3628 verify_overflow_empty();
3629 return res;
3630 }
3632 bool CMSCollector::markFromRootsWork(bool asynch) {
3633 // iterate over marked bits in bit map, doing a full scan and mark
3634 // from these roots using the following algorithm:
3635 // . if oop is to the right of the current scan pointer,
3636 // mark corresponding bit (we'll process it later)
3637 // . else (oop is to left of current scan pointer)
3638 // push oop on marking stack
3639 // . drain the marking stack
3641 // Note that when we do a marking step we need to hold the
3642 // bit map lock -- recall that direct allocation (by mutators)
3643 // and promotion (by younger generation collectors) is also
3644 // marking the bit map. [the so-called allocate live policy.]
3645 // Because the implementation of bit map marking is not
3646 // robust wrt simultaneous marking of bits in the same word,
3647 // we need to make sure that there is no such interference
3648 // between concurrent such updates.
3650 // already have locks
3651 assert_lock_strong(bitMapLock());
3653 // Clear the revisit stack, just in case there are any
3654 // obsolete contents from a short-circuited previous CMS cycle.
3655 _revisitStack.reset();
3656 verify_work_stacks_empty();
3657 verify_overflow_empty();
3658 assert(_revisitStack.isEmpty(), "tabula rasa");
3659 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3660 bool result = false;
3661 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3662 result = do_marking_mt(asynch);
3663 } else {
3664 result = do_marking_st(asynch);
3665 }
3666 return result;
3667 }
3669 // Forward decl
3670 class CMSConcMarkingTask;
3672 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3673 CMSCollector* _collector;
3674 CMSConcMarkingTask* _task;
3675 bool _yield;
3676 protected:
3677 virtual void yield();
3678 public:
3679 // "n_threads" is the number of threads to be terminated.
3680 // "queue_set" is a set of work queues of other threads.
3681 // "collector" is the CMS collector associated with this task terminator.
3682 // "yield" indicates whether we need the gang as a whole to yield.
3683 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3684 CMSCollector* collector, bool yield) :
3685 ParallelTaskTerminator(n_threads, queue_set),
3686 _collector(collector),
3687 _yield(yield) { }
3689 void set_task(CMSConcMarkingTask* task) {
3690 _task = task;
3691 }
3692 };
3694 // MT Concurrent Marking Task
3695 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3696 CMSCollector* _collector;
3697 YieldingFlexibleWorkGang* _workers; // the whole gang
3698 int _n_workers; // requested/desired # workers
3699 bool _asynch;
3700 bool _result;
3701 CompactibleFreeListSpace* _cms_space;
3702 CompactibleFreeListSpace* _perm_space;
3703 HeapWord* _global_finger;
3704 HeapWord* _restart_addr;
3706 // Exposed here for yielding support
3707 Mutex* const _bit_map_lock;
3709 // The per thread work queues, available here for stealing
3710 OopTaskQueueSet* _task_queues;
3711 CMSConcMarkingTerminator _term;
3713 public:
3714 CMSConcMarkingTask(CMSCollector* collector,
3715 CompactibleFreeListSpace* cms_space,
3716 CompactibleFreeListSpace* perm_space,
3717 bool asynch, int n_workers,
3718 YieldingFlexibleWorkGang* workers,
3719 OopTaskQueueSet* task_queues):
3720 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3721 _collector(collector),
3722 _cms_space(cms_space),
3723 _perm_space(perm_space),
3724 _asynch(asynch), _n_workers(n_workers), _result(true),
3725 _workers(workers), _task_queues(task_queues),
3726 _term(n_workers, task_queues, _collector, asynch),
3727 _bit_map_lock(collector->bitMapLock())
3728 {
3729 assert(n_workers <= workers->total_workers(),
3730 "Else termination won't work correctly today"); // XXX FIX ME!
3731 _requested_size = n_workers;
3732 _term.set_task(this);
3733 assert(_cms_space->bottom() < _perm_space->bottom(),
3734 "Finger incorrectly initialized below");
3735 _restart_addr = _global_finger = _cms_space->bottom();
3736 }
3739 OopTaskQueueSet* task_queues() { return _task_queues; }
3741 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3743 HeapWord** global_finger_addr() { return &_global_finger; }
3745 CMSConcMarkingTerminator* terminator() { return &_term; }
3747 void work(int i);
3749 virtual void coordinator_yield(); // stuff done by coordinator
3750 bool result() { return _result; }
3752 void reset(HeapWord* ra) {
3753 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3754 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3755 assert(ra < _perm_space->end(), "ra too large");
3756 _restart_addr = _global_finger = ra;
3757 _term.reset_for_reuse();
3758 }
3760 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3761 OopTaskQueue* work_q);
3763 private:
3764 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3765 void do_work_steal(int i);
3766 void bump_global_finger(HeapWord* f);
3767 };
3769 void CMSConcMarkingTerminator::yield() {
3770 if (ConcurrentMarkSweepThread::should_yield() &&
3771 !_collector->foregroundGCIsActive() &&
3772 _yield) {
3773 _task->yield();
3774 } else {
3775 ParallelTaskTerminator::yield();
3776 }
3777 }
3779 ////////////////////////////////////////////////////////////////
3780 // Concurrent Marking Algorithm Sketch
3781 ////////////////////////////////////////////////////////////////
3782 // Until all tasks exhausted (both spaces):
3783 // -- claim next available chunk
3784 // -- bump global finger via CAS
3785 // -- find first object that starts in this chunk
3786 // and start scanning bitmap from that position
3787 // -- scan marked objects for oops
3788 // -- CAS-mark target, and if successful:
3789 // . if target oop is above global finger (volatile read)
3790 // nothing to do
3791 // . if target oop is in chunk and above local finger
3792 // then nothing to do
3793 // . else push on work-queue
3794 // -- Deal with possible overflow issues:
3795 // . local work-queue overflow causes stuff to be pushed on
3796 // global (common) overflow queue
3797 // . always first empty local work queue
3798 // . then get a batch of oops from global work queue if any
3799 // . then do work stealing
3800 // -- When all tasks claimed (both spaces)
3801 // and local work queue empty,
3802 // then in a loop do:
3803 // . check global overflow stack; steal a batch of oops and trace
3804 // . try to steal from other threads oif GOS is empty
3805 // . if neither is available, offer termination
3806 // -- Terminate and return result
3807 //
3808 void CMSConcMarkingTask::work(int i) {
3809 elapsedTimer _timer;
3810 ResourceMark rm;
3811 HandleMark hm;
3813 DEBUG_ONLY(_collector->verify_overflow_empty();)
3815 // Before we begin work, our work queue should be empty
3816 assert(work_queue(i)->size() == 0, "Expected to be empty");
3817 // Scan the bitmap covering _cms_space, tracing through grey objects.
3818 _timer.start();
3819 do_scan_and_mark(i, _cms_space);
3820 _timer.stop();
3821 if (PrintCMSStatistics != 0) {
3822 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3823 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3824 }
3826 // ... do the same for the _perm_space
3827 _timer.reset();
3828 _timer.start();
3829 do_scan_and_mark(i, _perm_space);
3830 _timer.stop();
3831 if (PrintCMSStatistics != 0) {
3832 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3833 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3834 }
3836 // ... do work stealing
3837 _timer.reset();
3838 _timer.start();
3839 do_work_steal(i);
3840 _timer.stop();
3841 if (PrintCMSStatistics != 0) {
3842 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3843 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3844 }
3845 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3846 assert(work_queue(i)->size() == 0, "Should have been emptied");
3847 // Note that under the current task protocol, the
3848 // following assertion is true even of the spaces
3849 // expanded since the completion of the concurrent
3850 // marking. XXX This will likely change under a strict
3851 // ABORT semantics.
3852 assert(_global_finger > _cms_space->end() &&
3853 _global_finger >= _perm_space->end(),
3854 "All tasks have been completed");
3855 DEBUG_ONLY(_collector->verify_overflow_empty();)
3856 }
3858 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3859 HeapWord* read = _global_finger;
3860 HeapWord* cur = read;
3861 while (f > read) {
3862 cur = read;
3863 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3864 if (cur == read) {
3865 // our cas succeeded
3866 assert(_global_finger >= f, "protocol consistency");
3867 break;
3868 }
3869 }
3870 }
3872 // This is really inefficient, and should be redone by
3873 // using (not yet available) block-read and -write interfaces to the
3874 // stack and the work_queue. XXX FIX ME !!!
3875 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3876 OopTaskQueue* work_q) {
3877 // Fast lock-free check
3878 if (ovflw_stk->length() == 0) {
3879 return false;
3880 }
3881 assert(work_q->size() == 0, "Shouldn't steal");
3882 MutexLockerEx ml(ovflw_stk->par_lock(),
3883 Mutex::_no_safepoint_check_flag);
3884 // Grab up to 1/4 the size of the work queue
3885 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3886 (size_t)ParGCDesiredObjsFromOverflowList);
3887 num = MIN2(num, ovflw_stk->length());
3888 for (int i = (int) num; i > 0; i--) {
3889 oop cur = ovflw_stk->pop();
3890 assert(cur != NULL, "Counted wrong?");
3891 work_q->push(cur);
3892 }
3893 return num > 0;
3894 }
3896 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3897 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3898 int n_tasks = pst->n_tasks();
3899 // We allow that there may be no tasks to do here because
3900 // we are restarting after a stack overflow.
3901 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3902 int nth_task = 0;
3904 HeapWord* aligned_start = sp->bottom();
3905 if (sp->used_region().contains(_restart_addr)) {
3906 // Align down to a card boundary for the start of 0th task
3907 // for this space.
3908 aligned_start =
3909 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3910 CardTableModRefBS::card_size);
3911 }
3913 size_t chunk_size = sp->marking_task_size();
3914 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3915 // Having claimed the nth task in this space,
3916 // compute the chunk that it corresponds to:
3917 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3918 aligned_start + (nth_task+1)*chunk_size);
3919 // Try and bump the global finger via a CAS;
3920 // note that we need to do the global finger bump
3921 // _before_ taking the intersection below, because
3922 // the task corresponding to that region will be
3923 // deemed done even if the used_region() expands
3924 // because of allocation -- as it almost certainly will
3925 // during start-up while the threads yield in the
3926 // closure below.
3927 HeapWord* finger = span.end();
3928 bump_global_finger(finger); // atomically
3929 // There are null tasks here corresponding to chunks
3930 // beyond the "top" address of the space.
3931 span = span.intersection(sp->used_region());
3932 if (!span.is_empty()) { // Non-null task
3933 HeapWord* prev_obj;
3934 assert(!span.contains(_restart_addr) || nth_task == 0,
3935 "Inconsistency");
3936 if (nth_task == 0) {
3937 // For the 0th task, we'll not need to compute a block_start.
3938 if (span.contains(_restart_addr)) {
3939 // In the case of a restart because of stack overflow,
3940 // we might additionally skip a chunk prefix.
3941 prev_obj = _restart_addr;
3942 } else {
3943 prev_obj = span.start();
3944 }
3945 } else {
3946 // We want to skip the first object because
3947 // the protocol is to scan any object in its entirety
3948 // that _starts_ in this span; a fortiori, any
3949 // object starting in an earlier span is scanned
3950 // as part of an earlier claimed task.
3951 // Below we use the "careful" version of block_start
3952 // so we do not try to navigate uninitialized objects.
3953 prev_obj = sp->block_start_careful(span.start());
3954 // Below we use a variant of block_size that uses the
3955 // Printezis bits to avoid waiting for allocated
3956 // objects to become initialized/parsable.
3957 while (prev_obj < span.start()) {
3958 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3959 if (sz > 0) {
3960 prev_obj += sz;
3961 } else {
3962 // In this case we may end up doing a bit of redundant
3963 // scanning, but that appears unavoidable, short of
3964 // locking the free list locks; see bug 6324141.
3965 break;
3966 }
3967 }
3968 }
3969 if (prev_obj < span.end()) {
3970 MemRegion my_span = MemRegion(prev_obj, span.end());
3971 // Do the marking work within a non-empty span --
3972 // the last argument to the constructor indicates whether the
3973 // iteration should be incremental with periodic yields.
3974 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3975 &_collector->_markBitMap,
3976 work_queue(i),
3977 &_collector->_markStack,
3978 &_collector->_revisitStack,
3979 _asynch);
3980 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3981 } // else nothing to do for this task
3982 } // else nothing to do for this task
3983 }
3984 // We'd be tempted to assert here that since there are no
3985 // more tasks left to claim in this space, the global_finger
3986 // must exceed space->top() and a fortiori space->end(). However,
3987 // that would not quite be correct because the bumping of
3988 // global_finger occurs strictly after the claiming of a task,
3989 // so by the time we reach here the global finger may not yet
3990 // have been bumped up by the thread that claimed the last
3991 // task.
3992 pst->all_tasks_completed();
3993 }
3995 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
3996 private:
3997 MemRegion _span;
3998 CMSBitMap* _bit_map;
3999 CMSMarkStack* _overflow_stack;
4000 OopTaskQueue* _work_queue;
4001 protected:
4002 DO_OOP_WORK_DEFN
4003 public:
4004 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
4005 CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
4006 CMSMarkStack* revisit_stack):
4007 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
4008 _span(_collector->_span),
4009 _work_queue(work_queue),
4010 _bit_map(bit_map),
4011 _overflow_stack(overflow_stack)
4012 { }
4013 virtual void do_oop(oop* p);
4014 virtual void do_oop(narrowOop* p);
4015 void trim_queue(size_t max);
4016 void handle_stack_overflow(HeapWord* lost);
4017 };
4019 // Grey object scanning during work stealing phase --
4020 // the salient assumption here is that any references
4021 // that are in these stolen objects being scanned must
4022 // already have been initialized (else they would not have
4023 // been published), so we do not need to check for
4024 // uninitialized objects before pushing here.
4025 void Par_ConcMarkingClosure::do_oop(oop obj) {
4026 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4027 HeapWord* addr = (HeapWord*)obj;
4028 // Check if oop points into the CMS generation
4029 // and is not marked
4030 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4031 // a white object ...
4032 // If we manage to "claim" the object, by being the
4033 // first thread to mark it, then we push it on our
4034 // marking stack
4035 if (_bit_map->par_mark(addr)) { // ... now grey
4036 // push on work queue (grey set)
4037 bool simulate_overflow = false;
4038 NOT_PRODUCT(
4039 if (CMSMarkStackOverflowALot &&
4040 _collector->simulate_overflow()) {
4041 // simulate a stack overflow
4042 simulate_overflow = true;
4043 }
4044 )
4045 if (simulate_overflow ||
4046 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4047 // stack overflow
4048 if (PrintCMSStatistics != 0) {
4049 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4050 SIZE_FORMAT, _overflow_stack->capacity());
4051 }
4052 // We cannot assert that the overflow stack is full because
4053 // it may have been emptied since.
4054 assert(simulate_overflow ||
4055 _work_queue->size() == _work_queue->max_elems(),
4056 "Else push should have succeeded");
4057 handle_stack_overflow(addr);
4058 }
4059 } // Else, some other thread got there first
4060 }
4061 }
4063 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4064 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4066 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4067 while (_work_queue->size() > max) {
4068 oop new_oop;
4069 if (_work_queue->pop_local(new_oop)) {
4070 assert(new_oop->is_oop(), "Should be an oop");
4071 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4072 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4073 assert(new_oop->is_parsable(), "Should be parsable");
4074 new_oop->oop_iterate(this); // do_oop() above
4075 }
4076 }
4077 }
4079 // Upon stack overflow, we discard (part of) the stack,
4080 // remembering the least address amongst those discarded
4081 // in CMSCollector's _restart_address.
4082 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4083 // We need to do this under a mutex to prevent other
4084 // workers from interfering with the work done below.
4085 MutexLockerEx ml(_overflow_stack->par_lock(),
4086 Mutex::_no_safepoint_check_flag);
4087 // Remember the least grey address discarded
4088 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4089 _collector->lower_restart_addr(ra);
4090 _overflow_stack->reset(); // discard stack contents
4091 _overflow_stack->expand(); // expand the stack if possible
4092 }
4095 void CMSConcMarkingTask::do_work_steal(int i) {
4096 OopTaskQueue* work_q = work_queue(i);
4097 oop obj_to_scan;
4098 CMSBitMap* bm = &(_collector->_markBitMap);
4099 CMSMarkStack* ovflw = &(_collector->_markStack);
4100 CMSMarkStack* revisit = &(_collector->_revisitStack);
4101 int* seed = _collector->hash_seed(i);
4102 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
4103 while (true) {
4104 cl.trim_queue(0);
4105 assert(work_q->size() == 0, "Should have been emptied above");
4106 if (get_work_from_overflow_stack(ovflw, work_q)) {
4107 // Can't assert below because the work obtained from the
4108 // overflow stack may already have been stolen from us.
4109 // assert(work_q->size() > 0, "Work from overflow stack");
4110 continue;
4111 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4112 assert(obj_to_scan->is_oop(), "Should be an oop");
4113 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4114 obj_to_scan->oop_iterate(&cl);
4115 } else if (terminator()->offer_termination()) {
4116 assert(work_q->size() == 0, "Impossible!");
4117 break;
4118 }
4119 }
4120 }
4122 // This is run by the CMS (coordinator) thread.
4123 void CMSConcMarkingTask::coordinator_yield() {
4124 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4125 "CMS thread should hold CMS token");
4126 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4127 // First give up the locks, then yield, then re-lock
4128 // We should probably use a constructor/destructor idiom to
4129 // do this unlock/lock or modify the MutexUnlocker class to
4130 // serve our purpose. XXX
4131 assert_lock_strong(_bit_map_lock);
4132 _bit_map_lock->unlock();
4133 ConcurrentMarkSweepThread::desynchronize(true);
4134 ConcurrentMarkSweepThread::acknowledge_yield_request();
4135 _collector->stopTimer();
4136 if (PrintCMSStatistics != 0) {
4137 _collector->incrementYields();
4138 }
4139 _collector->icms_wait();
4141 // It is possible for whichever thread initiated the yield request
4142 // not to get a chance to wake up and take the bitmap lock between
4143 // this thread releasing it and reacquiring it. So, while the
4144 // should_yield() flag is on, let's sleep for a bit to give the
4145 // other thread a chance to wake up. The limit imposed on the number
4146 // of iterations is defensive, to avoid any unforseen circumstances
4147 // putting us into an infinite loop. Since it's always been this
4148 // (coordinator_yield()) method that was observed to cause the
4149 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4150 // which is by default non-zero. For the other seven methods that
4151 // also perform the yield operation, as are using a different
4152 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4153 // can enable the sleeping for those methods too, if necessary.
4154 // See 6442774.
4155 //
4156 // We really need to reconsider the synchronization between the GC
4157 // thread and the yield-requesting threads in the future and we
4158 // should really use wait/notify, which is the recommended
4159 // way of doing this type of interaction. Additionally, we should
4160 // consolidate the eight methods that do the yield operation and they
4161 // are almost identical into one for better maintenability and
4162 // readability. See 6445193.
4163 //
4164 // Tony 2006.06.29
4165 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4166 ConcurrentMarkSweepThread::should_yield() &&
4167 !CMSCollector::foregroundGCIsActive(); ++i) {
4168 os::sleep(Thread::current(), 1, false);
4169 ConcurrentMarkSweepThread::acknowledge_yield_request();
4170 }
4172 ConcurrentMarkSweepThread::synchronize(true);
4173 _bit_map_lock->lock_without_safepoint_check();
4174 _collector->startTimer();
4175 }
4177 bool CMSCollector::do_marking_mt(bool asynch) {
4178 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4179 // In the future this would be determined ergonomically, based
4180 // on #cpu's, # active mutator threads (and load), and mutation rate.
4181 int num_workers = ConcGCThreads;
4183 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4184 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4186 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4187 asynch, num_workers /* number requested XXX */,
4188 conc_workers(), task_queues());
4190 // Since the actual number of workers we get may be different
4191 // from the number we requested above, do we need to do anything different
4192 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4193 // class?? XXX
4194 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4195 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4197 // Refs discovery is already non-atomic.
4198 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4199 // Mutate the Refs discovery so it is MT during the
4200 // multi-threaded marking phase.
4201 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4202 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4203 conc_workers()->start_task(&tsk);
4204 while (tsk.yielded()) {
4205 tsk.coordinator_yield();
4206 conc_workers()->continue_task(&tsk);
4207 }
4208 // If the task was aborted, _restart_addr will be non-NULL
4209 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4210 while (_restart_addr != NULL) {
4211 // XXX For now we do not make use of ABORTED state and have not
4212 // yet implemented the right abort semantics (even in the original
4213 // single-threaded CMS case). That needs some more investigation
4214 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4215 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4216 // If _restart_addr is non-NULL, a marking stack overflow
4217 // occurred; we need to do a fresh marking iteration from the
4218 // indicated restart address.
4219 if (_foregroundGCIsActive && asynch) {
4220 // We may be running into repeated stack overflows, having
4221 // reached the limit of the stack size, while making very
4222 // slow forward progress. It may be best to bail out and
4223 // let the foreground collector do its job.
4224 // Clear _restart_addr, so that foreground GC
4225 // works from scratch. This avoids the headache of
4226 // a "rescan" which would otherwise be needed because
4227 // of the dirty mod union table & card table.
4228 _restart_addr = NULL;
4229 return false;
4230 }
4231 // Adjust the task to restart from _restart_addr
4232 tsk.reset(_restart_addr);
4233 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4234 _restart_addr);
4235 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4236 _restart_addr);
4237 _restart_addr = NULL;
4238 // Get the workers going again
4239 conc_workers()->start_task(&tsk);
4240 while (tsk.yielded()) {
4241 tsk.coordinator_yield();
4242 conc_workers()->continue_task(&tsk);
4243 }
4244 }
4245 assert(tsk.completed(), "Inconsistency");
4246 assert(tsk.result() == true, "Inconsistency");
4247 return true;
4248 }
4250 bool CMSCollector::do_marking_st(bool asynch) {
4251 ResourceMark rm;
4252 HandleMark hm;
4254 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4255 &_markStack, &_revisitStack, CMSYield && asynch);
4256 // the last argument to iterate indicates whether the iteration
4257 // should be incremental with periodic yields.
4258 _markBitMap.iterate(&markFromRootsClosure);
4259 // If _restart_addr is non-NULL, a marking stack overflow
4260 // occurred; we need to do a fresh iteration from the
4261 // indicated restart address.
4262 while (_restart_addr != NULL) {
4263 if (_foregroundGCIsActive && asynch) {
4264 // We may be running into repeated stack overflows, having
4265 // reached the limit of the stack size, while making very
4266 // slow forward progress. It may be best to bail out and
4267 // let the foreground collector do its job.
4268 // Clear _restart_addr, so that foreground GC
4269 // works from scratch. This avoids the headache of
4270 // a "rescan" which would otherwise be needed because
4271 // of the dirty mod union table & card table.
4272 _restart_addr = NULL;
4273 return false; // indicating failure to complete marking
4274 }
4275 // Deal with stack overflow:
4276 // we restart marking from _restart_addr
4277 HeapWord* ra = _restart_addr;
4278 markFromRootsClosure.reset(ra);
4279 _restart_addr = NULL;
4280 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4281 }
4282 return true;
4283 }
4285 void CMSCollector::preclean() {
4286 check_correct_thread_executing();
4287 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4288 verify_work_stacks_empty();
4289 verify_overflow_empty();
4290 _abort_preclean = false;
4291 if (CMSPrecleaningEnabled) {
4292 _eden_chunk_index = 0;
4293 size_t used = get_eden_used();
4294 size_t capacity = get_eden_capacity();
4295 // Don't start sampling unless we will get sufficiently
4296 // many samples.
4297 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4298 * CMSScheduleRemarkEdenPenetration)) {
4299 _start_sampling = true;
4300 } else {
4301 _start_sampling = false;
4302 }
4303 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4304 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4305 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4306 }
4307 CMSTokenSync x(true); // is cms thread
4308 if (CMSPrecleaningEnabled) {
4309 sample_eden();
4310 _collectorState = AbortablePreclean;
4311 } else {
4312 _collectorState = FinalMarking;
4313 }
4314 verify_work_stacks_empty();
4315 verify_overflow_empty();
4316 }
4318 // Try and schedule the remark such that young gen
4319 // occupancy is CMSScheduleRemarkEdenPenetration %.
4320 void CMSCollector::abortable_preclean() {
4321 check_correct_thread_executing();
4322 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4323 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4325 // If Eden's current occupancy is below this threshold,
4326 // immediately schedule the remark; else preclean
4327 // past the next scavenge in an effort to
4328 // schedule the pause as described avove. By choosing
4329 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4330 // we will never do an actual abortable preclean cycle.
4331 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4332 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4333 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4334 // We need more smarts in the abortable preclean
4335 // loop below to deal with cases where allocation
4336 // in young gen is very very slow, and our precleaning
4337 // is running a losing race against a horde of
4338 // mutators intent on flooding us with CMS updates
4339 // (dirty cards).
4340 // One, admittedly dumb, strategy is to give up
4341 // after a certain number of abortable precleaning loops
4342 // or after a certain maximum time. We want to make
4343 // this smarter in the next iteration.
4344 // XXX FIX ME!!! YSR
4345 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4346 while (!(should_abort_preclean() ||
4347 ConcurrentMarkSweepThread::should_terminate())) {
4348 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4349 cumworkdone += workdone;
4350 loops++;
4351 // Voluntarily terminate abortable preclean phase if we have
4352 // been at it for too long.
4353 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4354 loops >= CMSMaxAbortablePrecleanLoops) {
4355 if (PrintGCDetails) {
4356 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4357 }
4358 break;
4359 }
4360 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4361 if (PrintGCDetails) {
4362 gclog_or_tty->print(" CMS: abort preclean due to time ");
4363 }
4364 break;
4365 }
4366 // If we are doing little work each iteration, we should
4367 // take a short break.
4368 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4369 // Sleep for some time, waiting for work to accumulate
4370 stopTimer();
4371 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4372 startTimer();
4373 waited++;
4374 }
4375 }
4376 if (PrintCMSStatistics > 0) {
4377 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4378 loops, waited, cumworkdone);
4379 }
4380 }
4381 CMSTokenSync x(true); // is cms thread
4382 if (_collectorState != Idling) {
4383 assert(_collectorState == AbortablePreclean,
4384 "Spontaneous state transition?");
4385 _collectorState = FinalMarking;
4386 } // Else, a foreground collection completed this CMS cycle.
4387 return;
4388 }
4390 // Respond to an Eden sampling opportunity
4391 void CMSCollector::sample_eden() {
4392 // Make sure a young gc cannot sneak in between our
4393 // reading and recording of a sample.
4394 assert(Thread::current()->is_ConcurrentGC_thread(),
4395 "Only the cms thread may collect Eden samples");
4396 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4397 "Should collect samples while holding CMS token");
4398 if (!_start_sampling) {
4399 return;
4400 }
4401 if (_eden_chunk_array) {
4402 if (_eden_chunk_index < _eden_chunk_capacity) {
4403 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4404 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4405 "Unexpected state of Eden");
4406 // We'd like to check that what we just sampled is an oop-start address;
4407 // however, we cannot do that here since the object may not yet have been
4408 // initialized. So we'll instead do the check when we _use_ this sample
4409 // later.
4410 if (_eden_chunk_index == 0 ||
4411 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4412 _eden_chunk_array[_eden_chunk_index-1])
4413 >= CMSSamplingGrain)) {
4414 _eden_chunk_index++; // commit sample
4415 }
4416 }
4417 }
4418 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4419 size_t used = get_eden_used();
4420 size_t capacity = get_eden_capacity();
4421 assert(used <= capacity, "Unexpected state of Eden");
4422 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4423 _abort_preclean = true;
4424 }
4425 }
4426 }
4429 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4430 assert(_collectorState == Precleaning ||
4431 _collectorState == AbortablePreclean, "incorrect state");
4432 ResourceMark rm;
4433 HandleMark hm;
4434 // Do one pass of scrubbing the discovered reference lists
4435 // to remove any reference objects with strongly-reachable
4436 // referents.
4437 if (clean_refs) {
4438 ReferenceProcessor* rp = ref_processor();
4439 CMSPrecleanRefsYieldClosure yield_cl(this);
4440 assert(rp->span().equals(_span), "Spans should be equal");
4441 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4442 &_markStack, &_revisitStack,
4443 true /* preclean */);
4444 CMSDrainMarkingStackClosure complete_trace(this,
4445 _span, &_markBitMap, &_markStack,
4446 &keep_alive, true /* preclean */);
4448 // We don't want this step to interfere with a young
4449 // collection because we don't want to take CPU
4450 // or memory bandwidth away from the young GC threads
4451 // (which may be as many as there are CPUs).
4452 // Note that we don't need to protect ourselves from
4453 // interference with mutators because they can't
4454 // manipulate the discovered reference lists nor affect
4455 // the computed reachability of the referents, the
4456 // only properties manipulated by the precleaning
4457 // of these reference lists.
4458 stopTimer();
4459 CMSTokenSyncWithLocks x(true /* is cms thread */,
4460 bitMapLock());
4461 startTimer();
4462 sample_eden();
4464 // The following will yield to allow foreground
4465 // collection to proceed promptly. XXX YSR:
4466 // The code in this method may need further
4467 // tweaking for better performance and some restructuring
4468 // for cleaner interfaces.
4469 rp->preclean_discovered_references(
4470 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4471 &yield_cl, should_unload_classes());
4472 }
4474 if (clean_survivor) { // preclean the active survivor space(s)
4475 assert(_young_gen->kind() == Generation::DefNew ||
4476 _young_gen->kind() == Generation::ParNew ||
4477 _young_gen->kind() == Generation::ASParNew,
4478 "incorrect type for cast");
4479 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4480 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4481 &_markBitMap, &_modUnionTable,
4482 &_markStack, &_revisitStack,
4483 true /* precleaning phase */);
4484 stopTimer();
4485 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4486 bitMapLock());
4487 startTimer();
4488 unsigned int before_count =
4489 GenCollectedHeap::heap()->total_collections();
4490 SurvivorSpacePrecleanClosure
4491 sss_cl(this, _span, &_markBitMap, &_markStack,
4492 &pam_cl, before_count, CMSYield);
4493 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4494 dng->from()->object_iterate_careful(&sss_cl);
4495 dng->to()->object_iterate_careful(&sss_cl);
4496 }
4497 MarkRefsIntoAndScanClosure
4498 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4499 &_markStack, &_revisitStack, this, CMSYield,
4500 true /* precleaning phase */);
4501 // CAUTION: The following closure has persistent state that may need to
4502 // be reset upon a decrease in the sequence of addresses it
4503 // processes.
4504 ScanMarkedObjectsAgainCarefullyClosure
4505 smoac_cl(this, _span,
4506 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4508 // Preclean dirty cards in ModUnionTable and CardTable using
4509 // appropriate convergence criterion;
4510 // repeat CMSPrecleanIter times unless we find that
4511 // we are losing.
4512 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4513 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4514 "Bad convergence multiplier");
4515 assert(CMSPrecleanThreshold >= 100,
4516 "Unreasonably low CMSPrecleanThreshold");
4518 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4519 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4520 numIter < CMSPrecleanIter;
4521 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4522 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4523 if (CMSPermGenPrecleaningEnabled) {
4524 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4525 }
4526 if (Verbose && PrintGCDetails) {
4527 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4528 }
4529 // Either there are very few dirty cards, so re-mark
4530 // pause will be small anyway, or our pre-cleaning isn't
4531 // that much faster than the rate at which cards are being
4532 // dirtied, so we might as well stop and re-mark since
4533 // precleaning won't improve our re-mark time by much.
4534 if (curNumCards <= CMSPrecleanThreshold ||
4535 (numIter > 0 &&
4536 (curNumCards * CMSPrecleanDenominator >
4537 lastNumCards * CMSPrecleanNumerator))) {
4538 numIter++;
4539 cumNumCards += curNumCards;
4540 break;
4541 }
4542 }
4543 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4544 if (CMSPermGenPrecleaningEnabled) {
4545 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4546 }
4547 cumNumCards += curNumCards;
4548 if (PrintGCDetails && PrintCMSStatistics != 0) {
4549 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4550 curNumCards, cumNumCards, numIter);
4551 }
4552 return cumNumCards; // as a measure of useful work done
4553 }
4555 // PRECLEANING NOTES:
4556 // Precleaning involves:
4557 // . reading the bits of the modUnionTable and clearing the set bits.
4558 // . For the cards corresponding to the set bits, we scan the
4559 // objects on those cards. This means we need the free_list_lock
4560 // so that we can safely iterate over the CMS space when scanning
4561 // for oops.
4562 // . When we scan the objects, we'll be both reading and setting
4563 // marks in the marking bit map, so we'll need the marking bit map.
4564 // . For protecting _collector_state transitions, we take the CGC_lock.
4565 // Note that any races in the reading of of card table entries by the
4566 // CMS thread on the one hand and the clearing of those entries by the
4567 // VM thread or the setting of those entries by the mutator threads on the
4568 // other are quite benign. However, for efficiency it makes sense to keep
4569 // the VM thread from racing with the CMS thread while the latter is
4570 // dirty card info to the modUnionTable. We therefore also use the
4571 // CGC_lock to protect the reading of the card table and the mod union
4572 // table by the CM thread.
4573 // . We run concurrently with mutator updates, so scanning
4574 // needs to be done carefully -- we should not try to scan
4575 // potentially uninitialized objects.
4576 //
4577 // Locking strategy: While holding the CGC_lock, we scan over and
4578 // reset a maximal dirty range of the mod union / card tables, then lock
4579 // the free_list_lock and bitmap lock to do a full marking, then
4580 // release these locks; and repeat the cycle. This allows for a
4581 // certain amount of fairness in the sharing of these locks between
4582 // the CMS collector on the one hand, and the VM thread and the
4583 // mutators on the other.
4585 // NOTE: preclean_mod_union_table() and preclean_card_table()
4586 // further below are largely identical; if you need to modify
4587 // one of these methods, please check the other method too.
4589 size_t CMSCollector::preclean_mod_union_table(
4590 ConcurrentMarkSweepGeneration* gen,
4591 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4592 verify_work_stacks_empty();
4593 verify_overflow_empty();
4595 // Turn off checking for this method but turn it back on
4596 // selectively. There are yield points in this method
4597 // but it is difficult to turn the checking off just around
4598 // the yield points. It is simpler to selectively turn
4599 // it on.
4600 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4602 // strategy: starting with the first card, accumulate contiguous
4603 // ranges of dirty cards; clear these cards, then scan the region
4604 // covered by these cards.
4606 // Since all of the MUT is committed ahead, we can just use
4607 // that, in case the generations expand while we are precleaning.
4608 // It might also be fine to just use the committed part of the
4609 // generation, but we might potentially miss cards when the
4610 // generation is rapidly expanding while we are in the midst
4611 // of precleaning.
4612 HeapWord* startAddr = gen->reserved().start();
4613 HeapWord* endAddr = gen->reserved().end();
4615 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4617 size_t numDirtyCards, cumNumDirtyCards;
4618 HeapWord *nextAddr, *lastAddr;
4619 for (cumNumDirtyCards = numDirtyCards = 0,
4620 nextAddr = lastAddr = startAddr;
4621 nextAddr < endAddr;
4622 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4624 ResourceMark rm;
4625 HandleMark hm;
4627 MemRegion dirtyRegion;
4628 {
4629 stopTimer();
4630 // Potential yield point
4631 CMSTokenSync ts(true);
4632 startTimer();
4633 sample_eden();
4634 // Get dirty region starting at nextOffset (inclusive),
4635 // simultaneously clearing it.
4636 dirtyRegion =
4637 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4638 assert(dirtyRegion.start() >= nextAddr,
4639 "returned region inconsistent?");
4640 }
4641 // Remember where the next search should begin.
4642 // The returned region (if non-empty) is a right open interval,
4643 // so lastOffset is obtained from the right end of that
4644 // interval.
4645 lastAddr = dirtyRegion.end();
4646 // Should do something more transparent and less hacky XXX
4647 numDirtyCards =
4648 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4650 // We'll scan the cards in the dirty region (with periodic
4651 // yields for foreground GC as needed).
4652 if (!dirtyRegion.is_empty()) {
4653 assert(numDirtyCards > 0, "consistency check");
4654 HeapWord* stop_point = NULL;
4655 stopTimer();
4656 // Potential yield point
4657 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4658 bitMapLock());
4659 startTimer();
4660 {
4661 verify_work_stacks_empty();
4662 verify_overflow_empty();
4663 sample_eden();
4664 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4665 stop_point =
4666 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4667 }
4668 if (stop_point != NULL) {
4669 // The careful iteration stopped early either because it found an
4670 // uninitialized object, or because we were in the midst of an
4671 // "abortable preclean", which should now be aborted. Redirty
4672 // the bits corresponding to the partially-scanned or unscanned
4673 // cards. We'll either restart at the next block boundary or
4674 // abort the preclean.
4675 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4676 (_collectorState == AbortablePreclean && should_abort_preclean()),
4677 "Unparsable objects should only be in perm gen.");
4678 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4679 if (should_abort_preclean()) {
4680 break; // out of preclean loop
4681 } else {
4682 // Compute the next address at which preclean should pick up;
4683 // might need bitMapLock in order to read P-bits.
4684 lastAddr = next_card_start_after_block(stop_point);
4685 }
4686 }
4687 } else {
4688 assert(lastAddr == endAddr, "consistency check");
4689 assert(numDirtyCards == 0, "consistency check");
4690 break;
4691 }
4692 }
4693 verify_work_stacks_empty();
4694 verify_overflow_empty();
4695 return cumNumDirtyCards;
4696 }
4698 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4699 // below are largely identical; if you need to modify
4700 // one of these methods, please check the other method too.
4702 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4703 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4704 // strategy: it's similar to precleamModUnionTable above, in that
4705 // we accumulate contiguous ranges of dirty cards, mark these cards
4706 // precleaned, then scan the region covered by these cards.
4707 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4708 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4710 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4712 size_t numDirtyCards, cumNumDirtyCards;
4713 HeapWord *lastAddr, *nextAddr;
4715 for (cumNumDirtyCards = numDirtyCards = 0,
4716 nextAddr = lastAddr = startAddr;
4717 nextAddr < endAddr;
4718 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4720 ResourceMark rm;
4721 HandleMark hm;
4723 MemRegion dirtyRegion;
4724 {
4725 // See comments in "Precleaning notes" above on why we
4726 // do this locking. XXX Could the locking overheads be
4727 // too high when dirty cards are sparse? [I don't think so.]
4728 stopTimer();
4729 CMSTokenSync x(true); // is cms thread
4730 startTimer();
4731 sample_eden();
4732 // Get and clear dirty region from card table
4733 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4734 MemRegion(nextAddr, endAddr),
4735 true,
4736 CardTableModRefBS::precleaned_card_val());
4738 assert(dirtyRegion.start() >= nextAddr,
4739 "returned region inconsistent?");
4740 }
4741 lastAddr = dirtyRegion.end();
4742 numDirtyCards =
4743 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4745 if (!dirtyRegion.is_empty()) {
4746 stopTimer();
4747 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4748 startTimer();
4749 sample_eden();
4750 verify_work_stacks_empty();
4751 verify_overflow_empty();
4752 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4753 HeapWord* stop_point =
4754 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4755 if (stop_point != NULL) {
4756 // The careful iteration stopped early because it found an
4757 // uninitialized object. Redirty the bits corresponding to the
4758 // partially-scanned or unscanned cards, and start again at the
4759 // next block boundary.
4760 assert(CMSPermGenPrecleaningEnabled ||
4761 (_collectorState == AbortablePreclean && should_abort_preclean()),
4762 "Unparsable objects should only be in perm gen.");
4763 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4764 if (should_abort_preclean()) {
4765 break; // out of preclean loop
4766 } else {
4767 // Compute the next address at which preclean should pick up.
4768 lastAddr = next_card_start_after_block(stop_point);
4769 }
4770 }
4771 } else {
4772 break;
4773 }
4774 }
4775 verify_work_stacks_empty();
4776 verify_overflow_empty();
4777 return cumNumDirtyCards;
4778 }
4780 void CMSCollector::checkpointRootsFinal(bool asynch,
4781 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4782 assert(_collectorState == FinalMarking, "incorrect state transition?");
4783 check_correct_thread_executing();
4784 // world is stopped at this checkpoint
4785 assert(SafepointSynchronize::is_at_safepoint(),
4786 "world should be stopped");
4787 verify_work_stacks_empty();
4788 verify_overflow_empty();
4790 SpecializationStats::clear();
4791 if (PrintGCDetails) {
4792 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4793 _young_gen->used() / K,
4794 _young_gen->capacity() / K);
4795 }
4796 if (asynch) {
4797 if (CMSScavengeBeforeRemark) {
4798 GenCollectedHeap* gch = GenCollectedHeap::heap();
4799 // Temporarily set flag to false, GCH->do_collection will
4800 // expect it to be false and set to true
4801 FlagSetting fl(gch->_is_gc_active, false);
4802 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4803 PrintGCDetails && Verbose, true, gclog_or_tty);)
4804 int level = _cmsGen->level() - 1;
4805 if (level >= 0) {
4806 gch->do_collection(true, // full (i.e. force, see below)
4807 false, // !clear_all_soft_refs
4808 0, // size
4809 false, // is_tlab
4810 level // max_level
4811 );
4812 }
4813 }
4814 FreelistLocker x(this);
4815 MutexLockerEx y(bitMapLock(),
4816 Mutex::_no_safepoint_check_flag);
4817 assert(!init_mark_was_synchronous, "but that's impossible!");
4818 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4819 } else {
4820 // already have all the locks
4821 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4822 init_mark_was_synchronous);
4823 }
4824 verify_work_stacks_empty();
4825 verify_overflow_empty();
4826 SpecializationStats::print();
4827 }
4829 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4830 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4832 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4834 assert(haveFreelistLocks(), "must have free list locks");
4835 assert_lock_strong(bitMapLock());
4837 if (UseAdaptiveSizePolicy) {
4838 size_policy()->checkpoint_roots_final_begin();
4839 }
4841 ResourceMark rm;
4842 HandleMark hm;
4844 GenCollectedHeap* gch = GenCollectedHeap::heap();
4846 if (should_unload_classes()) {
4847 CodeCache::gc_prologue();
4848 }
4849 assert(haveFreelistLocks(), "must have free list locks");
4850 assert_lock_strong(bitMapLock());
4852 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4853 if (!init_mark_was_synchronous) {
4854 // We might assume that we need not fill TLAB's when
4855 // CMSScavengeBeforeRemark is set, because we may have just done
4856 // a scavenge which would have filled all TLAB's -- and besides
4857 // Eden would be empty. This however may not always be the case --
4858 // for instance although we asked for a scavenge, it may not have
4859 // happened because of a JNI critical section. We probably need
4860 // a policy for deciding whether we can in that case wait until
4861 // the critical section releases and then do the remark following
4862 // the scavenge, and skip it here. In the absence of that policy,
4863 // or of an indication of whether the scavenge did indeed occur,
4864 // we cannot rely on TLAB's having been filled and must do
4865 // so here just in case a scavenge did not happen.
4866 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4867 // Update the saved marks which may affect the root scans.
4868 gch->save_marks();
4870 {
4871 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4873 // Note on the role of the mod union table:
4874 // Since the marker in "markFromRoots" marks concurrently with
4875 // mutators, it is possible for some reachable objects not to have been
4876 // scanned. For instance, an only reference to an object A was
4877 // placed in object B after the marker scanned B. Unless B is rescanned,
4878 // A would be collected. Such updates to references in marked objects
4879 // are detected via the mod union table which is the set of all cards
4880 // dirtied since the first checkpoint in this GC cycle and prior to
4881 // the most recent young generation GC, minus those cleaned up by the
4882 // concurrent precleaning.
4883 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4884 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4885 do_remark_parallel();
4886 } else {
4887 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4888 gclog_or_tty);
4889 do_remark_non_parallel();
4890 }
4891 }
4892 } else {
4893 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4894 // The initial mark was stop-world, so there's no rescanning to
4895 // do; go straight on to the next step below.
4896 }
4897 verify_work_stacks_empty();
4898 verify_overflow_empty();
4900 {
4901 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4902 refProcessingWork(asynch, clear_all_soft_refs);
4903 }
4904 verify_work_stacks_empty();
4905 verify_overflow_empty();
4907 if (should_unload_classes()) {
4908 CodeCache::gc_epilogue();
4909 }
4911 // If we encountered any (marking stack / work queue) overflow
4912 // events during the current CMS cycle, take appropriate
4913 // remedial measures, where possible, so as to try and avoid
4914 // recurrence of that condition.
4915 assert(_markStack.isEmpty(), "No grey objects");
4916 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4917 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4918 if (ser_ovflw > 0) {
4919 if (PrintCMSStatistics != 0) {
4920 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4921 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4922 ", kac_preclean="SIZE_FORMAT")",
4923 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4924 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4925 }
4926 _markStack.expand();
4927 _ser_pmc_remark_ovflw = 0;
4928 _ser_pmc_preclean_ovflw = 0;
4929 _ser_kac_preclean_ovflw = 0;
4930 _ser_kac_ovflw = 0;
4931 }
4932 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4933 if (PrintCMSStatistics != 0) {
4934 gclog_or_tty->print_cr("Work queue overflow (benign) "
4935 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4936 _par_pmc_remark_ovflw, _par_kac_ovflw);
4937 }
4938 _par_pmc_remark_ovflw = 0;
4939 _par_kac_ovflw = 0;
4940 }
4941 if (PrintCMSStatistics != 0) {
4942 if (_markStack._hit_limit > 0) {
4943 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4944 _markStack._hit_limit);
4945 }
4946 if (_markStack._failed_double > 0) {
4947 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4948 " current capacity "SIZE_FORMAT,
4949 _markStack._failed_double,
4950 _markStack.capacity());
4951 }
4952 }
4953 _markStack._hit_limit = 0;
4954 _markStack._failed_double = 0;
4956 // Check that all the klasses have been checked
4957 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
4959 if ((VerifyAfterGC || VerifyDuringGC) &&
4960 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4961 verify_after_remark();
4962 }
4964 // Change under the freelistLocks.
4965 _collectorState = Sweeping;
4966 // Call isAllClear() under bitMapLock
4967 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4968 " final marking");
4969 if (UseAdaptiveSizePolicy) {
4970 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4971 }
4972 }
4974 // Parallel remark task
4975 class CMSParRemarkTask: public AbstractGangTask {
4976 CMSCollector* _collector;
4977 WorkGang* _workers;
4978 int _n_workers;
4979 CompactibleFreeListSpace* _cms_space;
4980 CompactibleFreeListSpace* _perm_space;
4982 // The per-thread work queues, available here for stealing.
4983 OopTaskQueueSet* _task_queues;
4984 ParallelTaskTerminator _term;
4986 public:
4987 CMSParRemarkTask(CMSCollector* collector,
4988 CompactibleFreeListSpace* cms_space,
4989 CompactibleFreeListSpace* perm_space,
4990 int n_workers, WorkGang* workers,
4991 OopTaskQueueSet* task_queues):
4992 AbstractGangTask("Rescan roots and grey objects in parallel"),
4993 _collector(collector),
4994 _cms_space(cms_space), _perm_space(perm_space),
4995 _n_workers(n_workers),
4996 _workers(workers),
4997 _task_queues(task_queues),
4998 _term(workers->total_workers(), task_queues) { }
5000 OopTaskQueueSet* task_queues() { return _task_queues; }
5002 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5004 ParallelTaskTerminator* terminator() { return &_term; }
5006 void work(int i);
5008 private:
5009 // Work method in support of parallel rescan ... of young gen spaces
5010 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5011 ContiguousSpace* space,
5012 HeapWord** chunk_array, size_t chunk_top);
5014 // ... of dirty cards in old space
5015 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5016 Par_MarkRefsIntoAndScanClosure* cl);
5018 // ... work stealing for the above
5019 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5020 };
5022 void CMSParRemarkTask::work(int i) {
5023 elapsedTimer _timer;
5024 ResourceMark rm;
5025 HandleMark hm;
5027 // ---------- rescan from roots --------------
5028 _timer.start();
5029 GenCollectedHeap* gch = GenCollectedHeap::heap();
5030 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5031 _collector->_span, _collector->ref_processor(),
5032 &(_collector->_markBitMap),
5033 work_queue(i), &(_collector->_revisitStack));
5035 // Rescan young gen roots first since these are likely
5036 // coarsely partitioned and may, on that account, constitute
5037 // the critical path; thus, it's best to start off that
5038 // work first.
5039 // ---------- young gen roots --------------
5040 {
5041 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5042 EdenSpace* eden_space = dng->eden();
5043 ContiguousSpace* from_space = dng->from();
5044 ContiguousSpace* to_space = dng->to();
5046 HeapWord** eca = _collector->_eden_chunk_array;
5047 size_t ect = _collector->_eden_chunk_index;
5048 HeapWord** sca = _collector->_survivor_chunk_array;
5049 size_t sct = _collector->_survivor_chunk_index;
5051 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5052 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5054 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
5055 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
5056 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
5058 _timer.stop();
5059 if (PrintCMSStatistics != 0) {
5060 gclog_or_tty->print_cr(
5061 "Finished young gen rescan work in %dth thread: %3.3f sec",
5062 i, _timer.seconds());
5063 }
5064 }
5066 // ---------- remaining roots --------------
5067 _timer.reset();
5068 _timer.start();
5069 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5070 false, // yg was scanned above
5071 false, // this is parallel code
5072 true, // collecting perm gen
5073 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5074 &par_mrias_cl,
5075 true, // walk all of code cache if (so & SO_CodeCache)
5076 NULL);
5077 assert(_collector->should_unload_classes()
5078 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5079 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5080 _timer.stop();
5081 if (PrintCMSStatistics != 0) {
5082 gclog_or_tty->print_cr(
5083 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5084 i, _timer.seconds());
5085 }
5087 // ---------- rescan dirty cards ------------
5088 _timer.reset();
5089 _timer.start();
5091 // Do the rescan tasks for each of the two spaces
5092 // (cms_space and perm_space) in turn.
5093 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5094 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5095 _timer.stop();
5096 if (PrintCMSStatistics != 0) {
5097 gclog_or_tty->print_cr(
5098 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5099 i, _timer.seconds());
5100 }
5102 // ---------- steal work from other threads ...
5103 // ---------- ... and drain overflow list.
5104 _timer.reset();
5105 _timer.start();
5106 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5107 _timer.stop();
5108 if (PrintCMSStatistics != 0) {
5109 gclog_or_tty->print_cr(
5110 "Finished work stealing in %dth thread: %3.3f sec",
5111 i, _timer.seconds());
5112 }
5113 }
5115 void
5116 CMSParRemarkTask::do_young_space_rescan(int i,
5117 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5118 HeapWord** chunk_array, size_t chunk_top) {
5119 // Until all tasks completed:
5120 // . claim an unclaimed task
5121 // . compute region boundaries corresponding to task claimed
5122 // using chunk_array
5123 // . par_oop_iterate(cl) over that region
5125 ResourceMark rm;
5126 HandleMark hm;
5128 SequentialSubTasksDone* pst = space->par_seq_tasks();
5129 assert(pst->valid(), "Uninitialized use?");
5131 int nth_task = 0;
5132 int n_tasks = pst->n_tasks();
5134 HeapWord *start, *end;
5135 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5136 // We claimed task # nth_task; compute its boundaries.
5137 if (chunk_top == 0) { // no samples were taken
5138 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5139 start = space->bottom();
5140 end = space->top();
5141 } else if (nth_task == 0) {
5142 start = space->bottom();
5143 end = chunk_array[nth_task];
5144 } else if (nth_task < (jint)chunk_top) {
5145 assert(nth_task >= 1, "Control point invariant");
5146 start = chunk_array[nth_task - 1];
5147 end = chunk_array[nth_task];
5148 } else {
5149 assert(nth_task == (jint)chunk_top, "Control point invariant");
5150 start = chunk_array[chunk_top - 1];
5151 end = space->top();
5152 }
5153 MemRegion mr(start, end);
5154 // Verify that mr is in space
5155 assert(mr.is_empty() || space->used_region().contains(mr),
5156 "Should be in space");
5157 // Verify that "start" is an object boundary
5158 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5159 "Should be an oop");
5160 space->par_oop_iterate(mr, cl);
5161 }
5162 pst->all_tasks_completed();
5163 }
5165 void
5166 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5167 CompactibleFreeListSpace* sp, int i,
5168 Par_MarkRefsIntoAndScanClosure* cl) {
5169 // Until all tasks completed:
5170 // . claim an unclaimed task
5171 // . compute region boundaries corresponding to task claimed
5172 // . transfer dirty bits ct->mut for that region
5173 // . apply rescanclosure to dirty mut bits for that region
5175 ResourceMark rm;
5176 HandleMark hm;
5178 OopTaskQueue* work_q = work_queue(i);
5179 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5180 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5181 // CAUTION: This closure has state that persists across calls to
5182 // the work method dirty_range_iterate_clear() in that it has
5183 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5184 // use of that state in the imbedded UpwardsObjectClosure instance
5185 // assumes that the cards are always iterated (even if in parallel
5186 // by several threads) in monotonically increasing order per each
5187 // thread. This is true of the implementation below which picks
5188 // card ranges (chunks) in monotonically increasing order globally
5189 // and, a-fortiori, in monotonically increasing order per thread
5190 // (the latter order being a subsequence of the former).
5191 // If the work code below is ever reorganized into a more chaotic
5192 // work-partitioning form than the current "sequential tasks"
5193 // paradigm, the use of that persistent state will have to be
5194 // revisited and modified appropriately. See also related
5195 // bug 4756801 work on which should examine this code to make
5196 // sure that the changes there do not run counter to the
5197 // assumptions made here and necessary for correctness and
5198 // efficiency. Note also that this code might yield inefficient
5199 // behaviour in the case of very large objects that span one or
5200 // more work chunks. Such objects would potentially be scanned
5201 // several times redundantly. Work on 4756801 should try and
5202 // address that performance anomaly if at all possible. XXX
5203 MemRegion full_span = _collector->_span;
5204 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5205 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5206 MarkFromDirtyCardsClosure
5207 greyRescanClosure(_collector, full_span, // entire span of interest
5208 sp, bm, work_q, rs, cl);
5210 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5211 assert(pst->valid(), "Uninitialized use?");
5212 int nth_task = 0;
5213 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5214 MemRegion span = sp->used_region();
5215 HeapWord* start_addr = span.start();
5216 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5217 alignment);
5218 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5219 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5220 start_addr, "Check alignment");
5221 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5222 chunk_size, "Check alignment");
5224 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5225 // Having claimed the nth_task, compute corresponding mem-region,
5226 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5227 // The alignment restriction ensures that we do not need any
5228 // synchronization with other gang-workers while setting or
5229 // clearing bits in thus chunk of the MUT.
5230 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5231 start_addr + (nth_task+1)*chunk_size);
5232 // The last chunk's end might be way beyond end of the
5233 // used region. In that case pull back appropriately.
5234 if (this_span.end() > end_addr) {
5235 this_span.set_end(end_addr);
5236 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5237 }
5238 // Iterate over the dirty cards covering this chunk, marking them
5239 // precleaned, and setting the corresponding bits in the mod union
5240 // table. Since we have been careful to partition at Card and MUT-word
5241 // boundaries no synchronization is needed between parallel threads.
5242 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5243 &modUnionClosure);
5245 // Having transferred these marks into the modUnionTable,
5246 // rescan the marked objects on the dirty cards in the modUnionTable.
5247 // Even if this is at a synchronous collection, the initial marking
5248 // may have been done during an asynchronous collection so there
5249 // may be dirty bits in the mod-union table.
5250 _collector->_modUnionTable.dirty_range_iterate_clear(
5251 this_span, &greyRescanClosure);
5252 _collector->_modUnionTable.verifyNoOneBitsInRange(
5253 this_span.start(),
5254 this_span.end());
5255 }
5256 pst->all_tasks_completed(); // declare that i am done
5257 }
5259 // . see if we can share work_queues with ParNew? XXX
5260 void
5261 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5262 int* seed) {
5263 OopTaskQueue* work_q = work_queue(i);
5264 NOT_PRODUCT(int num_steals = 0;)
5265 oop obj_to_scan;
5266 CMSBitMap* bm = &(_collector->_markBitMap);
5268 while (true) {
5269 // Completely finish any left over work from (an) earlier round(s)
5270 cl->trim_queue(0);
5271 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5272 (size_t)ParGCDesiredObjsFromOverflowList);
5273 // Now check if there's any work in the overflow list
5274 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5275 work_q)) {
5276 // found something in global overflow list;
5277 // not yet ready to go stealing work from others.
5278 // We'd like to assert(work_q->size() != 0, ...)
5279 // because we just took work from the overflow list,
5280 // but of course we can't since all of that could have
5281 // been already stolen from us.
5282 // "He giveth and He taketh away."
5283 continue;
5284 }
5285 // Verify that we have no work before we resort to stealing
5286 assert(work_q->size() == 0, "Have work, shouldn't steal");
5287 // Try to steal from other queues that have work
5288 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5289 NOT_PRODUCT(num_steals++;)
5290 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5291 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5292 // Do scanning work
5293 obj_to_scan->oop_iterate(cl);
5294 // Loop around, finish this work, and try to steal some more
5295 } else if (terminator()->offer_termination()) {
5296 break; // nirvana from the infinite cycle
5297 }
5298 }
5299 NOT_PRODUCT(
5300 if (PrintCMSStatistics != 0) {
5301 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5302 }
5303 )
5304 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5305 "Else our work is not yet done");
5306 }
5308 // Return a thread-local PLAB recording array, as appropriate.
5309 void* CMSCollector::get_data_recorder(int thr_num) {
5310 if (_survivor_plab_array != NULL &&
5311 (CMSPLABRecordAlways ||
5312 (_collectorState > Marking && _collectorState < FinalMarking))) {
5313 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5314 ChunkArray* ca = &_survivor_plab_array[thr_num];
5315 ca->reset(); // clear it so that fresh data is recorded
5316 return (void*) ca;
5317 } else {
5318 return NULL;
5319 }
5320 }
5322 // Reset all the thread-local PLAB recording arrays
5323 void CMSCollector::reset_survivor_plab_arrays() {
5324 for (uint i = 0; i < ParallelGCThreads; i++) {
5325 _survivor_plab_array[i].reset();
5326 }
5327 }
5329 // Merge the per-thread plab arrays into the global survivor chunk
5330 // array which will provide the partitioning of the survivor space
5331 // for CMS rescan.
5332 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5333 assert(_survivor_plab_array != NULL, "Error");
5334 assert(_survivor_chunk_array != NULL, "Error");
5335 assert(_collectorState == FinalMarking, "Error");
5336 for (uint j = 0; j < ParallelGCThreads; j++) {
5337 _cursor[j] = 0;
5338 }
5339 HeapWord* top = surv->top();
5340 size_t i;
5341 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5342 HeapWord* min_val = top; // Higher than any PLAB address
5343 uint min_tid = 0; // position of min_val this round
5344 for (uint j = 0; j < ParallelGCThreads; j++) {
5345 ChunkArray* cur_sca = &_survivor_plab_array[j];
5346 if (_cursor[j] == cur_sca->end()) {
5347 continue;
5348 }
5349 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5350 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5351 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5352 if (cur_val < min_val) {
5353 min_tid = j;
5354 min_val = cur_val;
5355 } else {
5356 assert(cur_val < top, "All recorded addresses should be less");
5357 }
5358 }
5359 // At this point min_val and min_tid are respectively
5360 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5361 // and the thread (j) that witnesses that address.
5362 // We record this address in the _survivor_chunk_array[i]
5363 // and increment _cursor[min_tid] prior to the next round i.
5364 if (min_val == top) {
5365 break;
5366 }
5367 _survivor_chunk_array[i] = min_val;
5368 _cursor[min_tid]++;
5369 }
5370 // We are all done; record the size of the _survivor_chunk_array
5371 _survivor_chunk_index = i; // exclusive: [0, i)
5372 if (PrintCMSStatistics > 0) {
5373 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5374 }
5375 // Verify that we used up all the recorded entries
5376 #ifdef ASSERT
5377 size_t total = 0;
5378 for (uint j = 0; j < ParallelGCThreads; j++) {
5379 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5380 total += _cursor[j];
5381 }
5382 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5383 // Check that the merged array is in sorted order
5384 if (total > 0) {
5385 for (size_t i = 0; i < total - 1; i++) {
5386 if (PrintCMSStatistics > 0) {
5387 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5388 i, _survivor_chunk_array[i]);
5389 }
5390 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5391 "Not sorted");
5392 }
5393 }
5394 #endif // ASSERT
5395 }
5397 // Set up the space's par_seq_tasks structure for work claiming
5398 // for parallel rescan of young gen.
5399 // See ParRescanTask where this is currently used.
5400 void
5401 CMSCollector::
5402 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5403 assert(n_threads > 0, "Unexpected n_threads argument");
5404 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5406 // Eden space
5407 {
5408 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5409 assert(!pst->valid(), "Clobbering existing data?");
5410 // Each valid entry in [0, _eden_chunk_index) represents a task.
5411 size_t n_tasks = _eden_chunk_index + 1;
5412 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5413 pst->set_par_threads(n_threads);
5414 pst->set_n_tasks((int)n_tasks);
5415 }
5417 // Merge the survivor plab arrays into _survivor_chunk_array
5418 if (_survivor_plab_array != NULL) {
5419 merge_survivor_plab_arrays(dng->from());
5420 } else {
5421 assert(_survivor_chunk_index == 0, "Error");
5422 }
5424 // To space
5425 {
5426 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5427 assert(!pst->valid(), "Clobbering existing data?");
5428 pst->set_par_threads(n_threads);
5429 pst->set_n_tasks(1);
5430 assert(pst->valid(), "Error");
5431 }
5433 // From space
5434 {
5435 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5436 assert(!pst->valid(), "Clobbering existing data?");
5437 size_t n_tasks = _survivor_chunk_index + 1;
5438 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5439 pst->set_par_threads(n_threads);
5440 pst->set_n_tasks((int)n_tasks);
5441 assert(pst->valid(), "Error");
5442 }
5443 }
5445 // Parallel version of remark
5446 void CMSCollector::do_remark_parallel() {
5447 GenCollectedHeap* gch = GenCollectedHeap::heap();
5448 WorkGang* workers = gch->workers();
5449 assert(workers != NULL, "Need parallel worker threads.");
5450 int n_workers = workers->total_workers();
5451 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5452 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5454 CMSParRemarkTask tsk(this,
5455 cms_space, perm_space,
5456 n_workers, workers, task_queues());
5458 // Set up for parallel process_strong_roots work.
5459 gch->set_par_threads(n_workers);
5460 // We won't be iterating over the cards in the card table updating
5461 // the younger_gen cards, so we shouldn't call the following else
5462 // the verification code as well as subsequent younger_refs_iterate
5463 // code would get confused. XXX
5464 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5466 // The young gen rescan work will not be done as part of
5467 // process_strong_roots (which currently doesn't knw how to
5468 // parallelize such a scan), but rather will be broken up into
5469 // a set of parallel tasks (via the sampling that the [abortable]
5470 // preclean phase did of EdenSpace, plus the [two] tasks of
5471 // scanning the [two] survivor spaces. Further fine-grain
5472 // parallelization of the scanning of the survivor spaces
5473 // themselves, and of precleaning of the younger gen itself
5474 // is deferred to the future.
5475 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5477 // The dirty card rescan work is broken up into a "sequence"
5478 // of parallel tasks (per constituent space) that are dynamically
5479 // claimed by the parallel threads.
5480 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5481 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5483 // It turns out that even when we're using 1 thread, doing the work in a
5484 // separate thread causes wide variance in run times. We can't help this
5485 // in the multi-threaded case, but we special-case n=1 here to get
5486 // repeatable measurements of the 1-thread overhead of the parallel code.
5487 if (n_workers > 1) {
5488 // Make refs discovery MT-safe
5489 ReferenceProcessorMTMutator mt(ref_processor(), true);
5490 GenCollectedHeap::StrongRootsScope srs(gch);
5491 workers->run_task(&tsk);
5492 } else {
5493 GenCollectedHeap::StrongRootsScope srs(gch);
5494 tsk.work(0);
5495 }
5496 gch->set_par_threads(0); // 0 ==> non-parallel.
5497 // restore, single-threaded for now, any preserved marks
5498 // as a result of work_q overflow
5499 restore_preserved_marks_if_any();
5500 }
5502 // Non-parallel version of remark
5503 void CMSCollector::do_remark_non_parallel() {
5504 ResourceMark rm;
5505 HandleMark hm;
5506 GenCollectedHeap* gch = GenCollectedHeap::heap();
5507 MarkRefsIntoAndScanClosure
5508 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5509 &_markStack, &_revisitStack, this,
5510 false /* should_yield */, false /* not precleaning */);
5511 MarkFromDirtyCardsClosure
5512 markFromDirtyCardsClosure(this, _span,
5513 NULL, // space is set further below
5514 &_markBitMap, &_markStack, &_revisitStack,
5515 &mrias_cl);
5516 {
5517 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5518 // Iterate over the dirty cards, setting the corresponding bits in the
5519 // mod union table.
5520 {
5521 ModUnionClosure modUnionClosure(&_modUnionTable);
5522 _ct->ct_bs()->dirty_card_iterate(
5523 _cmsGen->used_region(),
5524 &modUnionClosure);
5525 _ct->ct_bs()->dirty_card_iterate(
5526 _permGen->used_region(),
5527 &modUnionClosure);
5528 }
5529 // Having transferred these marks into the modUnionTable, we just need
5530 // to rescan the marked objects on the dirty cards in the modUnionTable.
5531 // The initial marking may have been done during an asynchronous
5532 // collection so there may be dirty bits in the mod-union table.
5533 const int alignment =
5534 CardTableModRefBS::card_size * BitsPerWord;
5535 {
5536 // ... First handle dirty cards in CMS gen
5537 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5538 MemRegion ur = _cmsGen->used_region();
5539 HeapWord* lb = ur.start();
5540 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5541 MemRegion cms_span(lb, ub);
5542 _modUnionTable.dirty_range_iterate_clear(cms_span,
5543 &markFromDirtyCardsClosure);
5544 verify_work_stacks_empty();
5545 if (PrintCMSStatistics != 0) {
5546 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5547 markFromDirtyCardsClosure.num_dirty_cards());
5548 }
5549 }
5550 {
5551 // .. and then repeat for dirty cards in perm gen
5552 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5553 MemRegion ur = _permGen->used_region();
5554 HeapWord* lb = ur.start();
5555 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5556 MemRegion perm_span(lb, ub);
5557 _modUnionTable.dirty_range_iterate_clear(perm_span,
5558 &markFromDirtyCardsClosure);
5559 verify_work_stacks_empty();
5560 if (PrintCMSStatistics != 0) {
5561 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5562 markFromDirtyCardsClosure.num_dirty_cards());
5563 }
5564 }
5565 }
5566 if (VerifyDuringGC &&
5567 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5568 HandleMark hm; // Discard invalid handles created during verification
5569 Universe::verify(true);
5570 }
5571 {
5572 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5574 verify_work_stacks_empty();
5576 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5577 GenCollectedHeap::StrongRootsScope srs(gch);
5578 gch->gen_process_strong_roots(_cmsGen->level(),
5579 true, // younger gens as roots
5580 false, // use the local StrongRootsScope
5581 true, // collecting perm gen
5582 SharedHeap::ScanningOption(roots_scanning_options()),
5583 &mrias_cl,
5584 true, // walk code active on stacks
5585 NULL);
5586 assert(should_unload_classes()
5587 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5588 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5589 }
5590 verify_work_stacks_empty();
5591 // Restore evacuated mark words, if any, used for overflow list links
5592 if (!CMSOverflowEarlyRestoration) {
5593 restore_preserved_marks_if_any();
5594 }
5595 verify_overflow_empty();
5596 }
5598 ////////////////////////////////////////////////////////
5599 // Parallel Reference Processing Task Proxy Class
5600 ////////////////////////////////////////////////////////
5601 class CMSRefProcTaskProxy: public AbstractGangTask {
5602 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5603 CMSCollector* _collector;
5604 CMSBitMap* _mark_bit_map;
5605 const MemRegion _span;
5606 OopTaskQueueSet* _task_queues;
5607 ParallelTaskTerminator _term;
5608 ProcessTask& _task;
5610 public:
5611 CMSRefProcTaskProxy(ProcessTask& task,
5612 CMSCollector* collector,
5613 const MemRegion& span,
5614 CMSBitMap* mark_bit_map,
5615 int total_workers,
5616 OopTaskQueueSet* task_queues):
5617 AbstractGangTask("Process referents by policy in parallel"),
5618 _task(task),
5619 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5620 _task_queues(task_queues),
5621 _term(total_workers, task_queues)
5622 {
5623 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5624 "Inconsistency in _span");
5625 }
5627 OopTaskQueueSet* task_queues() { return _task_queues; }
5629 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5631 ParallelTaskTerminator* terminator() { return &_term; }
5633 void do_work_steal(int i,
5634 CMSParDrainMarkingStackClosure* drain,
5635 CMSParKeepAliveClosure* keep_alive,
5636 int* seed);
5638 virtual void work(int i);
5639 };
5641 void CMSRefProcTaskProxy::work(int i) {
5642 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5643 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5644 _mark_bit_map,
5645 &_collector->_revisitStack,
5646 work_queue(i));
5647 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5648 _mark_bit_map,
5649 &_collector->_revisitStack,
5650 work_queue(i));
5651 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5652 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5653 if (_task.marks_oops_alive()) {
5654 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5655 _collector->hash_seed(i));
5656 }
5657 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5658 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5659 }
5661 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5662 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5663 EnqueueTask& _task;
5665 public:
5666 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5667 : AbstractGangTask("Enqueue reference objects in parallel"),
5668 _task(task)
5669 { }
5671 virtual void work(int i)
5672 {
5673 _task.work(i);
5674 }
5675 };
5677 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5678 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5679 OopTaskQueue* work_queue):
5680 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5681 _span(span),
5682 _bit_map(bit_map),
5683 _work_queue(work_queue),
5684 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5685 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5686 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5687 { }
5689 // . see if we can share work_queues with ParNew? XXX
5690 void CMSRefProcTaskProxy::do_work_steal(int i,
5691 CMSParDrainMarkingStackClosure* drain,
5692 CMSParKeepAliveClosure* keep_alive,
5693 int* seed) {
5694 OopTaskQueue* work_q = work_queue(i);
5695 NOT_PRODUCT(int num_steals = 0;)
5696 oop obj_to_scan;
5698 while (true) {
5699 // Completely finish any left over work from (an) earlier round(s)
5700 drain->trim_queue(0);
5701 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5702 (size_t)ParGCDesiredObjsFromOverflowList);
5703 // Now check if there's any work in the overflow list
5704 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5705 work_q)) {
5706 // Found something in global overflow list;
5707 // not yet ready to go stealing work from others.
5708 // We'd like to assert(work_q->size() != 0, ...)
5709 // because we just took work from the overflow list,
5710 // but of course we can't, since all of that might have
5711 // been already stolen from us.
5712 continue;
5713 }
5714 // Verify that we have no work before we resort to stealing
5715 assert(work_q->size() == 0, "Have work, shouldn't steal");
5716 // Try to steal from other queues that have work
5717 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5718 NOT_PRODUCT(num_steals++;)
5719 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5720 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5721 // Do scanning work
5722 obj_to_scan->oop_iterate(keep_alive);
5723 // Loop around, finish this work, and try to steal some more
5724 } else if (terminator()->offer_termination()) {
5725 break; // nirvana from the infinite cycle
5726 }
5727 }
5728 NOT_PRODUCT(
5729 if (PrintCMSStatistics != 0) {
5730 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5731 }
5732 )
5733 }
5735 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5736 {
5737 GenCollectedHeap* gch = GenCollectedHeap::heap();
5738 WorkGang* workers = gch->workers();
5739 assert(workers != NULL, "Need parallel worker threads.");
5740 int n_workers = workers->total_workers();
5741 CMSRefProcTaskProxy rp_task(task, &_collector,
5742 _collector.ref_processor()->span(),
5743 _collector.markBitMap(),
5744 n_workers, _collector.task_queues());
5745 workers->run_task(&rp_task);
5746 }
5748 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5749 {
5751 GenCollectedHeap* gch = GenCollectedHeap::heap();
5752 WorkGang* workers = gch->workers();
5753 assert(workers != NULL, "Need parallel worker threads.");
5754 CMSRefEnqueueTaskProxy enq_task(task);
5755 workers->run_task(&enq_task);
5756 }
5758 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5760 ResourceMark rm;
5761 HandleMark hm;
5763 ReferenceProcessor* rp = ref_processor();
5764 assert(rp->span().equals(_span), "Spans should be equal");
5765 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5766 // Process weak references.
5767 rp->setup_policy(clear_all_soft_refs);
5768 verify_work_stacks_empty();
5770 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5771 &_markStack, &_revisitStack,
5772 false /* !preclean */);
5773 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5774 _span, &_markBitMap, &_markStack,
5775 &cmsKeepAliveClosure, false /* !preclean */);
5776 {
5777 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5778 if (rp->processing_is_mt()) {
5779 CMSRefProcTaskExecutor task_executor(*this);
5780 rp->process_discovered_references(&_is_alive_closure,
5781 &cmsKeepAliveClosure,
5782 &cmsDrainMarkingStackClosure,
5783 &task_executor);
5784 } else {
5785 rp->process_discovered_references(&_is_alive_closure,
5786 &cmsKeepAliveClosure,
5787 &cmsDrainMarkingStackClosure,
5788 NULL);
5789 }
5790 verify_work_stacks_empty();
5791 }
5793 if (should_unload_classes()) {
5794 {
5795 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5797 // Follow SystemDictionary roots and unload classes
5798 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5800 // Follow CodeCache roots and unload any methods marked for unloading
5801 CodeCache::do_unloading(&_is_alive_closure,
5802 &cmsKeepAliveClosure,
5803 purged_class);
5805 cmsDrainMarkingStackClosure.do_void();
5806 verify_work_stacks_empty();
5808 // Update subklass/sibling/implementor links in KlassKlass descendants
5809 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5810 oop k;
5811 while ((k = _revisitStack.pop()) != NULL) {
5812 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5813 &_is_alive_closure,
5814 &cmsKeepAliveClosure);
5815 }
5816 assert(!ClassUnloading ||
5817 (_markStack.isEmpty() && overflow_list_is_empty()),
5818 "Should not have found new reachable objects");
5819 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5820 cmsDrainMarkingStackClosure.do_void();
5821 verify_work_stacks_empty();
5822 }
5824 {
5825 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5826 // Now clean up stale oops in SymbolTable and StringTable
5827 SymbolTable::unlink(&_is_alive_closure);
5828 StringTable::unlink(&_is_alive_closure);
5829 }
5830 }
5832 verify_work_stacks_empty();
5833 // Restore any preserved marks as a result of mark stack or
5834 // work queue overflow
5835 restore_preserved_marks_if_any(); // done single-threaded for now
5837 rp->set_enqueuing_is_done(true);
5838 if (rp->processing_is_mt()) {
5839 CMSRefProcTaskExecutor task_executor(*this);
5840 rp->enqueue_discovered_references(&task_executor);
5841 } else {
5842 rp->enqueue_discovered_references(NULL);
5843 }
5844 rp->verify_no_references_recorded();
5845 assert(!rp->discovery_enabled(), "should have been disabled");
5847 // JVMTI object tagging is based on JNI weak refs. If any of these
5848 // refs were cleared then JVMTI needs to update its maps and
5849 // maybe post ObjectFrees to agents.
5850 JvmtiExport::cms_ref_processing_epilogue();
5851 }
5853 #ifndef PRODUCT
5854 void CMSCollector::check_correct_thread_executing() {
5855 Thread* t = Thread::current();
5856 // Only the VM thread or the CMS thread should be here.
5857 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5858 "Unexpected thread type");
5859 // If this is the vm thread, the foreground process
5860 // should not be waiting. Note that _foregroundGCIsActive is
5861 // true while the foreground collector is waiting.
5862 if (_foregroundGCShouldWait) {
5863 // We cannot be the VM thread
5864 assert(t->is_ConcurrentGC_thread(),
5865 "Should be CMS thread");
5866 } else {
5867 // We can be the CMS thread only if we are in a stop-world
5868 // phase of CMS collection.
5869 if (t->is_ConcurrentGC_thread()) {
5870 assert(_collectorState == InitialMarking ||
5871 _collectorState == FinalMarking,
5872 "Should be a stop-world phase");
5873 // The CMS thread should be holding the CMS_token.
5874 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5875 "Potential interference with concurrently "
5876 "executing VM thread");
5877 }
5878 }
5879 }
5880 #endif
5882 void CMSCollector::sweep(bool asynch) {
5883 assert(_collectorState == Sweeping, "just checking");
5884 check_correct_thread_executing();
5885 verify_work_stacks_empty();
5886 verify_overflow_empty();
5887 increment_sweep_count();
5888 _inter_sweep_timer.stop();
5889 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5890 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5892 // PermGen verification support: If perm gen sweeping is disabled in
5893 // this cycle, we preserve the perm gen object "deadness" information
5894 // in the perm_gen_verify_bit_map. In order to do that we traverse
5895 // all blocks in perm gen and mark all dead objects.
5896 if (verifying() && !should_unload_classes()) {
5897 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5898 "Should have already been allocated");
5899 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5900 markBitMap(), perm_gen_verify_bit_map());
5901 if (asynch) {
5902 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5903 bitMapLock());
5904 _permGen->cmsSpace()->blk_iterate(&mdo);
5905 } else {
5906 // In the case of synchronous sweep, we already have
5907 // the requisite locks/tokens.
5908 _permGen->cmsSpace()->blk_iterate(&mdo);
5909 }
5910 }
5912 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5913 _intra_sweep_timer.reset();
5914 _intra_sweep_timer.start();
5915 if (asynch) {
5916 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5917 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5918 // First sweep the old gen then the perm gen
5919 {
5920 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5921 bitMapLock());
5922 sweepWork(_cmsGen, asynch);
5923 }
5925 // Now repeat for perm gen
5926 if (should_unload_classes()) {
5927 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5928 bitMapLock());
5929 sweepWork(_permGen, asynch);
5930 }
5932 // Update Universe::_heap_*_at_gc figures.
5933 // We need all the free list locks to make the abstract state
5934 // transition from Sweeping to Resetting. See detailed note
5935 // further below.
5936 {
5937 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5938 _permGen->freelistLock());
5939 // Update heap occupancy information which is used as
5940 // input to soft ref clearing policy at the next gc.
5941 Universe::update_heap_info_at_gc();
5942 _collectorState = Resizing;
5943 }
5944 } else {
5945 // already have needed locks
5946 sweepWork(_cmsGen, asynch);
5948 if (should_unload_classes()) {
5949 sweepWork(_permGen, asynch);
5950 }
5951 // Update heap occupancy information which is used as
5952 // input to soft ref clearing policy at the next gc.
5953 Universe::update_heap_info_at_gc();
5954 _collectorState = Resizing;
5955 }
5956 verify_work_stacks_empty();
5957 verify_overflow_empty();
5959 _intra_sweep_timer.stop();
5960 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5962 _inter_sweep_timer.reset();
5963 _inter_sweep_timer.start();
5965 update_time_of_last_gc(os::javaTimeMillis());
5967 // NOTE on abstract state transitions:
5968 // Mutators allocate-live and/or mark the mod-union table dirty
5969 // based on the state of the collection. The former is done in
5970 // the interval [Marking, Sweeping] and the latter in the interval
5971 // [Marking, Sweeping). Thus the transitions into the Marking state
5972 // and out of the Sweeping state must be synchronously visible
5973 // globally to the mutators.
5974 // The transition into the Marking state happens with the world
5975 // stopped so the mutators will globally see it. Sweeping is
5976 // done asynchronously by the background collector so the transition
5977 // from the Sweeping state to the Resizing state must be done
5978 // under the freelistLock (as is the check for whether to
5979 // allocate-live and whether to dirty the mod-union table).
5980 assert(_collectorState == Resizing, "Change of collector state to"
5981 " Resizing must be done under the freelistLocks (plural)");
5983 // Now that sweeping has been completed, if the GCH's
5984 // incremental_collection_will_fail flag is set, clear it,
5985 // thus inviting a younger gen collection to promote into
5986 // this generation. If such a promotion may still fail,
5987 // the flag will be set again when a young collection is
5988 // attempted.
5989 // I think the incremental_collection_will_fail flag's use
5990 // is specific to a 2 generation collection policy, so i'll
5991 // assert that that's the configuration we are operating within.
5992 // The use of the flag can and should be generalized appropriately
5993 // in the future to deal with a general n-generation system.
5995 GenCollectedHeap* gch = GenCollectedHeap::heap();
5996 assert(gch->collector_policy()->is_two_generation_policy(),
5997 "Resetting of incremental_collection_will_fail flag"
5998 " may be incorrect otherwise");
5999 gch->clear_incremental_collection_will_fail();
6000 gch->update_full_collections_completed(_collection_count_start);
6001 }
6003 // FIX ME!!! Looks like this belongs in CFLSpace, with
6004 // CMSGen merely delegating to it.
6005 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6006 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6007 HeapWord* minAddr = _cmsSpace->bottom();
6008 HeapWord* largestAddr =
6009 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
6010 if (largestAddr == NULL) {
6011 // The dictionary appears to be empty. In this case
6012 // try to coalesce at the end of the heap.
6013 largestAddr = _cmsSpace->end();
6014 }
6015 size_t largestOffset = pointer_delta(largestAddr, minAddr);
6016 size_t nearLargestOffset =
6017 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6018 if (PrintFLSStatistics != 0) {
6019 gclog_or_tty->print_cr(
6020 "CMS: Large Block: " PTR_FORMAT ";"
6021 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6022 largestAddr,
6023 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6024 }
6025 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6026 }
6028 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6029 return addr >= _cmsSpace->nearLargestChunk();
6030 }
6032 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6033 return _cmsSpace->find_chunk_at_end();
6034 }
6036 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6037 bool full) {
6038 // The next lower level has been collected. Gather any statistics
6039 // that are of interest at this point.
6040 if (!full && (current_level + 1) == level()) {
6041 // Gather statistics on the young generation collection.
6042 collector()->stats().record_gc0_end(used());
6043 }
6044 }
6046 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6047 GenCollectedHeap* gch = GenCollectedHeap::heap();
6048 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6049 "Wrong type of heap");
6050 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6051 gch->gen_policy()->size_policy();
6052 assert(sp->is_gc_cms_adaptive_size_policy(),
6053 "Wrong type of size policy");
6054 return sp;
6055 }
6057 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6058 if (PrintGCDetails && Verbose) {
6059 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6060 }
6061 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6062 _debug_collection_type =
6063 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6064 if (PrintGCDetails && Verbose) {
6065 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6066 }
6067 }
6069 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6070 bool asynch) {
6071 // We iterate over the space(s) underlying this generation,
6072 // checking the mark bit map to see if the bits corresponding
6073 // to specific blocks are marked or not. Blocks that are
6074 // marked are live and are not swept up. All remaining blocks
6075 // are swept up, with coalescing on-the-fly as we sweep up
6076 // contiguous free and/or garbage blocks:
6077 // We need to ensure that the sweeper synchronizes with allocators
6078 // and stop-the-world collectors. In particular, the following
6079 // locks are used:
6080 // . CMS token: if this is held, a stop the world collection cannot occur
6081 // . freelistLock: if this is held no allocation can occur from this
6082 // generation by another thread
6083 // . bitMapLock: if this is held, no other thread can access or update
6084 //
6086 // Note that we need to hold the freelistLock if we use
6087 // block iterate below; else the iterator might go awry if
6088 // a mutator (or promotion) causes block contents to change
6089 // (for instance if the allocator divvies up a block).
6090 // If we hold the free list lock, for all practical purposes
6091 // young generation GC's can't occur (they'll usually need to
6092 // promote), so we might as well prevent all young generation
6093 // GC's while we do a sweeping step. For the same reason, we might
6094 // as well take the bit map lock for the entire duration
6096 // check that we hold the requisite locks
6097 assert(have_cms_token(), "Should hold cms token");
6098 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6099 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6100 "Should possess CMS token to sweep");
6101 assert_lock_strong(gen->freelistLock());
6102 assert_lock_strong(bitMapLock());
6104 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6105 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6106 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6107 _inter_sweep_estimate.padded_average(),
6108 _intra_sweep_estimate.padded_average());
6109 gen->setNearLargestChunk();
6111 {
6112 SweepClosure sweepClosure(this, gen, &_markBitMap,
6113 CMSYield && asynch);
6114 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6115 // We need to free-up/coalesce garbage/blocks from a
6116 // co-terminal free run. This is done in the SweepClosure
6117 // destructor; so, do not remove this scope, else the
6118 // end-of-sweep-census below will be off by a little bit.
6119 }
6120 gen->cmsSpace()->sweep_completed();
6121 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6122 if (should_unload_classes()) { // unloaded classes this cycle,
6123 _concurrent_cycles_since_last_unload = 0; // ... reset count
6124 } else { // did not unload classes,
6125 _concurrent_cycles_since_last_unload++; // ... increment count
6126 }
6127 }
6129 // Reset CMS data structures (for now just the marking bit map)
6130 // preparatory for the next cycle.
6131 void CMSCollector::reset(bool asynch) {
6132 GenCollectedHeap* gch = GenCollectedHeap::heap();
6133 CMSAdaptiveSizePolicy* sp = size_policy();
6134 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6135 if (asynch) {
6136 CMSTokenSyncWithLocks ts(true, bitMapLock());
6138 // If the state is not "Resetting", the foreground thread
6139 // has done a collection and the resetting.
6140 if (_collectorState != Resetting) {
6141 assert(_collectorState == Idling, "The state should only change"
6142 " because the foreground collector has finished the collection");
6143 return;
6144 }
6146 // Clear the mark bitmap (no grey objects to start with)
6147 // for the next cycle.
6148 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6149 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6151 HeapWord* curAddr = _markBitMap.startWord();
6152 while (curAddr < _markBitMap.endWord()) {
6153 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6154 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6155 _markBitMap.clear_large_range(chunk);
6156 if (ConcurrentMarkSweepThread::should_yield() &&
6157 !foregroundGCIsActive() &&
6158 CMSYield) {
6159 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6160 "CMS thread should hold CMS token");
6161 assert_lock_strong(bitMapLock());
6162 bitMapLock()->unlock();
6163 ConcurrentMarkSweepThread::desynchronize(true);
6164 ConcurrentMarkSweepThread::acknowledge_yield_request();
6165 stopTimer();
6166 if (PrintCMSStatistics != 0) {
6167 incrementYields();
6168 }
6169 icms_wait();
6171 // See the comment in coordinator_yield()
6172 for (unsigned i = 0; i < CMSYieldSleepCount &&
6173 ConcurrentMarkSweepThread::should_yield() &&
6174 !CMSCollector::foregroundGCIsActive(); ++i) {
6175 os::sleep(Thread::current(), 1, false);
6176 ConcurrentMarkSweepThread::acknowledge_yield_request();
6177 }
6179 ConcurrentMarkSweepThread::synchronize(true);
6180 bitMapLock()->lock_without_safepoint_check();
6181 startTimer();
6182 }
6183 curAddr = chunk.end();
6184 }
6185 _collectorState = Idling;
6186 } else {
6187 // already have the lock
6188 assert(_collectorState == Resetting, "just checking");
6189 assert_lock_strong(bitMapLock());
6190 _markBitMap.clear_all();
6191 _collectorState = Idling;
6192 }
6194 // Stop incremental mode after a cycle completes, so that any future cycles
6195 // are triggered by allocation.
6196 stop_icms();
6198 NOT_PRODUCT(
6199 if (RotateCMSCollectionTypes) {
6200 _cmsGen->rotate_debug_collection_type();
6201 }
6202 )
6203 }
6205 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6206 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6207 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6208 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6209 TraceCollectorStats tcs(counters());
6211 switch (op) {
6212 case CMS_op_checkpointRootsInitial: {
6213 checkpointRootsInitial(true); // asynch
6214 if (PrintGC) {
6215 _cmsGen->printOccupancy("initial-mark");
6216 }
6217 break;
6218 }
6219 case CMS_op_checkpointRootsFinal: {
6220 checkpointRootsFinal(true, // asynch
6221 false, // !clear_all_soft_refs
6222 false); // !init_mark_was_synchronous
6223 if (PrintGC) {
6224 _cmsGen->printOccupancy("remark");
6225 }
6226 break;
6227 }
6228 default:
6229 fatal("No such CMS_op");
6230 }
6231 }
6233 #ifndef PRODUCT
6234 size_t const CMSCollector::skip_header_HeapWords() {
6235 return FreeChunk::header_size();
6236 }
6238 // Try and collect here conditions that should hold when
6239 // CMS thread is exiting. The idea is that the foreground GC
6240 // thread should not be blocked if it wants to terminate
6241 // the CMS thread and yet continue to run the VM for a while
6242 // after that.
6243 void CMSCollector::verify_ok_to_terminate() const {
6244 assert(Thread::current()->is_ConcurrentGC_thread(),
6245 "should be called by CMS thread");
6246 assert(!_foregroundGCShouldWait, "should be false");
6247 // We could check here that all the various low-level locks
6248 // are not held by the CMS thread, but that is overkill; see
6249 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6250 // is checked.
6251 }
6252 #endif
6254 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6255 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6256 "missing Printezis mark?");
6257 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6258 size_t size = pointer_delta(nextOneAddr + 1, addr);
6259 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6260 "alignment problem");
6261 assert(size >= 3, "Necessary for Printezis marks to work");
6262 return size;
6263 }
6265 // A variant of the above (block_size_using_printezis_bits()) except
6266 // that we return 0 if the P-bits are not yet set.
6267 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6268 if (_markBitMap.isMarked(addr)) {
6269 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6270 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6271 size_t size = pointer_delta(nextOneAddr + 1, addr);
6272 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6273 "alignment problem");
6274 assert(size >= 3, "Necessary for Printezis marks to work");
6275 return size;
6276 } else {
6277 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6278 return 0;
6279 }
6280 }
6282 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6283 size_t sz = 0;
6284 oop p = (oop)addr;
6285 if (p->klass_or_null() != NULL && p->is_parsable()) {
6286 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6287 } else {
6288 sz = block_size_using_printezis_bits(addr);
6289 }
6290 assert(sz > 0, "size must be nonzero");
6291 HeapWord* next_block = addr + sz;
6292 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6293 CardTableModRefBS::card_size);
6294 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6295 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6296 "must be different cards");
6297 return next_card;
6298 }
6301 // CMS Bit Map Wrapper /////////////////////////////////////////
6303 // Construct a CMS bit map infrastructure, but don't create the
6304 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6305 // further below.
6306 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6307 _bm(),
6308 _shifter(shifter),
6309 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6310 {
6311 _bmStartWord = 0;
6312 _bmWordSize = 0;
6313 }
6315 bool CMSBitMap::allocate(MemRegion mr) {
6316 _bmStartWord = mr.start();
6317 _bmWordSize = mr.word_size();
6318 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6319 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6320 if (!brs.is_reserved()) {
6321 warning("CMS bit map allocation failure");
6322 return false;
6323 }
6324 // For now we'll just commit all of the bit map up fromt.
6325 // Later on we'll try to be more parsimonious with swap.
6326 if (!_virtual_space.initialize(brs, brs.size())) {
6327 warning("CMS bit map backing store failure");
6328 return false;
6329 }
6330 assert(_virtual_space.committed_size() == brs.size(),
6331 "didn't reserve backing store for all of CMS bit map?");
6332 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6333 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6334 _bmWordSize, "inconsistency in bit map sizing");
6335 _bm.set_size(_bmWordSize >> _shifter);
6337 // bm.clear(); // can we rely on getting zero'd memory? verify below
6338 assert(isAllClear(),
6339 "Expected zero'd memory from ReservedSpace constructor");
6340 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6341 "consistency check");
6342 return true;
6343 }
6345 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6346 HeapWord *next_addr, *end_addr, *last_addr;
6347 assert_locked();
6348 assert(covers(mr), "out-of-range error");
6349 // XXX assert that start and end are appropriately aligned
6350 for (next_addr = mr.start(), end_addr = mr.end();
6351 next_addr < end_addr; next_addr = last_addr) {
6352 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6353 last_addr = dirty_region.end();
6354 if (!dirty_region.is_empty()) {
6355 cl->do_MemRegion(dirty_region);
6356 } else {
6357 assert(last_addr == end_addr, "program logic");
6358 return;
6359 }
6360 }
6361 }
6363 #ifndef PRODUCT
6364 void CMSBitMap::assert_locked() const {
6365 CMSLockVerifier::assert_locked(lock());
6366 }
6368 bool CMSBitMap::covers(MemRegion mr) const {
6369 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6370 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6371 "size inconsistency");
6372 return (mr.start() >= _bmStartWord) &&
6373 (mr.end() <= endWord());
6374 }
6376 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6377 return (start >= _bmStartWord && (start + size) <= endWord());
6378 }
6380 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6381 // verify that there are no 1 bits in the interval [left, right)
6382 FalseBitMapClosure falseBitMapClosure;
6383 iterate(&falseBitMapClosure, left, right);
6384 }
6386 void CMSBitMap::region_invariant(MemRegion mr)
6387 {
6388 assert_locked();
6389 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6390 assert(!mr.is_empty(), "unexpected empty region");
6391 assert(covers(mr), "mr should be covered by bit map");
6392 // convert address range into offset range
6393 size_t start_ofs = heapWordToOffset(mr.start());
6394 // Make sure that end() is appropriately aligned
6395 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6396 (1 << (_shifter+LogHeapWordSize))),
6397 "Misaligned mr.end()");
6398 size_t end_ofs = heapWordToOffset(mr.end());
6399 assert(end_ofs > start_ofs, "Should mark at least one bit");
6400 }
6402 #endif
6404 bool CMSMarkStack::allocate(size_t size) {
6405 // allocate a stack of the requisite depth
6406 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6407 size * sizeof(oop)));
6408 if (!rs.is_reserved()) {
6409 warning("CMSMarkStack allocation failure");
6410 return false;
6411 }
6412 if (!_virtual_space.initialize(rs, rs.size())) {
6413 warning("CMSMarkStack backing store failure");
6414 return false;
6415 }
6416 assert(_virtual_space.committed_size() == rs.size(),
6417 "didn't reserve backing store for all of CMS stack?");
6418 _base = (oop*)(_virtual_space.low());
6419 _index = 0;
6420 _capacity = size;
6421 NOT_PRODUCT(_max_depth = 0);
6422 return true;
6423 }
6425 // XXX FIX ME !!! In the MT case we come in here holding a
6426 // leaf lock. For printing we need to take a further lock
6427 // which has lower rank. We need to recallibrate the two
6428 // lock-ranks involved in order to be able to rpint the
6429 // messages below. (Or defer the printing to the caller.
6430 // For now we take the expedient path of just disabling the
6431 // messages for the problematic case.)
6432 void CMSMarkStack::expand() {
6433 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6434 if (_capacity == MarkStackSizeMax) {
6435 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6436 // We print a warning message only once per CMS cycle.
6437 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6438 }
6439 return;
6440 }
6441 // Double capacity if possible
6442 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6443 // Do not give up existing stack until we have managed to
6444 // get the double capacity that we desired.
6445 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6446 new_capacity * sizeof(oop)));
6447 if (rs.is_reserved()) {
6448 // Release the backing store associated with old stack
6449 _virtual_space.release();
6450 // Reinitialize virtual space for new stack
6451 if (!_virtual_space.initialize(rs, rs.size())) {
6452 fatal("Not enough swap for expanded marking stack");
6453 }
6454 _base = (oop*)(_virtual_space.low());
6455 _index = 0;
6456 _capacity = new_capacity;
6457 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6458 // Failed to double capacity, continue;
6459 // we print a detail message only once per CMS cycle.
6460 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6461 SIZE_FORMAT"K",
6462 _capacity / K, new_capacity / K);
6463 }
6464 }
6467 // Closures
6468 // XXX: there seems to be a lot of code duplication here;
6469 // should refactor and consolidate common code.
6471 // This closure is used to mark refs into the CMS generation in
6472 // the CMS bit map. Called at the first checkpoint. This closure
6473 // assumes that we do not need to re-mark dirty cards; if the CMS
6474 // generation on which this is used is not an oldest (modulo perm gen)
6475 // generation then this will lose younger_gen cards!
6477 MarkRefsIntoClosure::MarkRefsIntoClosure(
6478 MemRegion span, CMSBitMap* bitMap):
6479 _span(span),
6480 _bitMap(bitMap)
6481 {
6482 assert(_ref_processor == NULL, "deliberately left NULL");
6483 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6484 }
6486 void MarkRefsIntoClosure::do_oop(oop obj) {
6487 // if p points into _span, then mark corresponding bit in _markBitMap
6488 assert(obj->is_oop(), "expected an oop");
6489 HeapWord* addr = (HeapWord*)obj;
6490 if (_span.contains(addr)) {
6491 // this should be made more efficient
6492 _bitMap->mark(addr);
6493 }
6494 }
6496 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6497 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6499 // A variant of the above, used for CMS marking verification.
6500 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6501 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6502 _span(span),
6503 _verification_bm(verification_bm),
6504 _cms_bm(cms_bm)
6505 {
6506 assert(_ref_processor == NULL, "deliberately left NULL");
6507 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6508 }
6510 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6511 // if p points into _span, then mark corresponding bit in _markBitMap
6512 assert(obj->is_oop(), "expected an oop");
6513 HeapWord* addr = (HeapWord*)obj;
6514 if (_span.contains(addr)) {
6515 _verification_bm->mark(addr);
6516 if (!_cms_bm->isMarked(addr)) {
6517 oop(addr)->print();
6518 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6519 fatal("... aborting");
6520 }
6521 }
6522 }
6524 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6525 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6527 //////////////////////////////////////////////////
6528 // MarkRefsIntoAndScanClosure
6529 //////////////////////////////////////////////////
6531 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6532 ReferenceProcessor* rp,
6533 CMSBitMap* bit_map,
6534 CMSBitMap* mod_union_table,
6535 CMSMarkStack* mark_stack,
6536 CMSMarkStack* revisit_stack,
6537 CMSCollector* collector,
6538 bool should_yield,
6539 bool concurrent_precleaning):
6540 _collector(collector),
6541 _span(span),
6542 _bit_map(bit_map),
6543 _mark_stack(mark_stack),
6544 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6545 mark_stack, revisit_stack, concurrent_precleaning),
6546 _yield(should_yield),
6547 _concurrent_precleaning(concurrent_precleaning),
6548 _freelistLock(NULL)
6549 {
6550 _ref_processor = rp;
6551 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6552 }
6554 // This closure is used to mark refs into the CMS generation at the
6555 // second (final) checkpoint, and to scan and transitively follow
6556 // the unmarked oops. It is also used during the concurrent precleaning
6557 // phase while scanning objects on dirty cards in the CMS generation.
6558 // The marks are made in the marking bit map and the marking stack is
6559 // used for keeping the (newly) grey objects during the scan.
6560 // The parallel version (Par_...) appears further below.
6561 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6562 if (obj != NULL) {
6563 assert(obj->is_oop(), "expected an oop");
6564 HeapWord* addr = (HeapWord*)obj;
6565 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6566 assert(_collector->overflow_list_is_empty(),
6567 "overflow list should be empty");
6568 if (_span.contains(addr) &&
6569 !_bit_map->isMarked(addr)) {
6570 // mark bit map (object is now grey)
6571 _bit_map->mark(addr);
6572 // push on marking stack (stack should be empty), and drain the
6573 // stack by applying this closure to the oops in the oops popped
6574 // from the stack (i.e. blacken the grey objects)
6575 bool res = _mark_stack->push(obj);
6576 assert(res, "Should have space to push on empty stack");
6577 do {
6578 oop new_oop = _mark_stack->pop();
6579 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6580 assert(new_oop->is_parsable(), "Found unparsable oop");
6581 assert(_bit_map->isMarked((HeapWord*)new_oop),
6582 "only grey objects on this stack");
6583 // iterate over the oops in this oop, marking and pushing
6584 // the ones in CMS heap (i.e. in _span).
6585 new_oop->oop_iterate(&_pushAndMarkClosure);
6586 // check if it's time to yield
6587 do_yield_check();
6588 } while (!_mark_stack->isEmpty() ||
6589 (!_concurrent_precleaning && take_from_overflow_list()));
6590 // if marking stack is empty, and we are not doing this
6591 // during precleaning, then check the overflow list
6592 }
6593 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6594 assert(_collector->overflow_list_is_empty(),
6595 "overflow list was drained above");
6596 // We could restore evacuated mark words, if any, used for
6597 // overflow list links here because the overflow list is
6598 // provably empty here. That would reduce the maximum
6599 // size requirements for preserved_{oop,mark}_stack.
6600 // But we'll just postpone it until we are all done
6601 // so we can just stream through.
6602 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6603 _collector->restore_preserved_marks_if_any();
6604 assert(_collector->no_preserved_marks(), "No preserved marks");
6605 }
6606 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6607 "All preserved marks should have been restored above");
6608 }
6609 }
6611 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6612 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6614 void MarkRefsIntoAndScanClosure::do_yield_work() {
6615 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6616 "CMS thread should hold CMS token");
6617 assert_lock_strong(_freelistLock);
6618 assert_lock_strong(_bit_map->lock());
6619 // relinquish the free_list_lock and bitMaplock()
6620 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6621 _bit_map->lock()->unlock();
6622 _freelistLock->unlock();
6623 ConcurrentMarkSweepThread::desynchronize(true);
6624 ConcurrentMarkSweepThread::acknowledge_yield_request();
6625 _collector->stopTimer();
6626 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6627 if (PrintCMSStatistics != 0) {
6628 _collector->incrementYields();
6629 }
6630 _collector->icms_wait();
6632 // See the comment in coordinator_yield()
6633 for (unsigned i = 0;
6634 i < CMSYieldSleepCount &&
6635 ConcurrentMarkSweepThread::should_yield() &&
6636 !CMSCollector::foregroundGCIsActive();
6637 ++i) {
6638 os::sleep(Thread::current(), 1, false);
6639 ConcurrentMarkSweepThread::acknowledge_yield_request();
6640 }
6642 ConcurrentMarkSweepThread::synchronize(true);
6643 _freelistLock->lock_without_safepoint_check();
6644 _bit_map->lock()->lock_without_safepoint_check();
6645 _collector->startTimer();
6646 }
6648 ///////////////////////////////////////////////////////////
6649 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6650 // MarkRefsIntoAndScanClosure
6651 ///////////////////////////////////////////////////////////
6652 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6653 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6654 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6655 _span(span),
6656 _bit_map(bit_map),
6657 _work_queue(work_queue),
6658 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6659 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6660 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6661 revisit_stack)
6662 {
6663 _ref_processor = rp;
6664 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6665 }
6667 // This closure is used to mark refs into the CMS generation at the
6668 // second (final) checkpoint, and to scan and transitively follow
6669 // the unmarked oops. The marks are made in the marking bit map and
6670 // the work_queue is used for keeping the (newly) grey objects during
6671 // the scan phase whence they are also available for stealing by parallel
6672 // threads. Since the marking bit map is shared, updates are
6673 // synchronized (via CAS).
6674 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6675 if (obj != NULL) {
6676 // Ignore mark word because this could be an already marked oop
6677 // that may be chained at the end of the overflow list.
6678 assert(obj->is_oop(true), "expected an oop");
6679 HeapWord* addr = (HeapWord*)obj;
6680 if (_span.contains(addr) &&
6681 !_bit_map->isMarked(addr)) {
6682 // mark bit map (object will become grey):
6683 // It is possible for several threads to be
6684 // trying to "claim" this object concurrently;
6685 // the unique thread that succeeds in marking the
6686 // object first will do the subsequent push on
6687 // to the work queue (or overflow list).
6688 if (_bit_map->par_mark(addr)) {
6689 // push on work_queue (which may not be empty), and trim the
6690 // queue to an appropriate length by applying this closure to
6691 // the oops in the oops popped from the stack (i.e. blacken the
6692 // grey objects)
6693 bool res = _work_queue->push(obj);
6694 assert(res, "Low water mark should be less than capacity?");
6695 trim_queue(_low_water_mark);
6696 } // Else, another thread claimed the object
6697 }
6698 }
6699 }
6701 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6702 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6704 // This closure is used to rescan the marked objects on the dirty cards
6705 // in the mod union table and the card table proper.
6706 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6707 oop p, MemRegion mr) {
6709 size_t size = 0;
6710 HeapWord* addr = (HeapWord*)p;
6711 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6712 assert(_span.contains(addr), "we are scanning the CMS generation");
6713 // check if it's time to yield
6714 if (do_yield_check()) {
6715 // We yielded for some foreground stop-world work,
6716 // and we have been asked to abort this ongoing preclean cycle.
6717 return 0;
6718 }
6719 if (_bitMap->isMarked(addr)) {
6720 // it's marked; is it potentially uninitialized?
6721 if (p->klass_or_null() != NULL) {
6722 // If is_conc_safe is false, the object may be undergoing
6723 // change by the VM outside a safepoint. Don't try to
6724 // scan it, but rather leave it for the remark phase.
6725 if (CMSPermGenPrecleaningEnabled &&
6726 (!p->is_conc_safe() || !p->is_parsable())) {
6727 // Signal precleaning to redirty the card since
6728 // the klass pointer is already installed.
6729 assert(size == 0, "Initial value");
6730 } else {
6731 assert(p->is_parsable(), "must be parsable.");
6732 // an initialized object; ignore mark word in verification below
6733 // since we are running concurrent with mutators
6734 assert(p->is_oop(true), "should be an oop");
6735 if (p->is_objArray()) {
6736 // objArrays are precisely marked; restrict scanning
6737 // to dirty cards only.
6738 size = CompactibleFreeListSpace::adjustObjectSize(
6739 p->oop_iterate(_scanningClosure, mr));
6740 } else {
6741 // A non-array may have been imprecisely marked; we need
6742 // to scan object in its entirety.
6743 size = CompactibleFreeListSpace::adjustObjectSize(
6744 p->oop_iterate(_scanningClosure));
6745 }
6746 #ifdef DEBUG
6747 size_t direct_size =
6748 CompactibleFreeListSpace::adjustObjectSize(p->size());
6749 assert(size == direct_size, "Inconsistency in size");
6750 assert(size >= 3, "Necessary for Printezis marks to work");
6751 if (!_bitMap->isMarked(addr+1)) {
6752 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6753 } else {
6754 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6755 assert(_bitMap->isMarked(addr+size-1),
6756 "inconsistent Printezis mark");
6757 }
6758 #endif // DEBUG
6759 }
6760 } else {
6761 // an unitialized object
6762 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6763 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6764 size = pointer_delta(nextOneAddr + 1, addr);
6765 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6766 "alignment problem");
6767 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6768 // will dirty the card when the klass pointer is installed in the
6769 // object (signalling the completion of initialization).
6770 }
6771 } else {
6772 // Either a not yet marked object or an uninitialized object
6773 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6774 // An uninitialized object, skip to the next card, since
6775 // we may not be able to read its P-bits yet.
6776 assert(size == 0, "Initial value");
6777 } else {
6778 // An object not (yet) reached by marking: we merely need to
6779 // compute its size so as to go look at the next block.
6780 assert(p->is_oop(true), "should be an oop");
6781 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6782 }
6783 }
6784 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6785 return size;
6786 }
6788 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6789 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6790 "CMS thread should hold CMS token");
6791 assert_lock_strong(_freelistLock);
6792 assert_lock_strong(_bitMap->lock());
6793 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6794 // relinquish the free_list_lock and bitMaplock()
6795 _bitMap->lock()->unlock();
6796 _freelistLock->unlock();
6797 ConcurrentMarkSweepThread::desynchronize(true);
6798 ConcurrentMarkSweepThread::acknowledge_yield_request();
6799 _collector->stopTimer();
6800 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6801 if (PrintCMSStatistics != 0) {
6802 _collector->incrementYields();
6803 }
6804 _collector->icms_wait();
6806 // See the comment in coordinator_yield()
6807 for (unsigned i = 0; i < CMSYieldSleepCount &&
6808 ConcurrentMarkSweepThread::should_yield() &&
6809 !CMSCollector::foregroundGCIsActive(); ++i) {
6810 os::sleep(Thread::current(), 1, false);
6811 ConcurrentMarkSweepThread::acknowledge_yield_request();
6812 }
6814 ConcurrentMarkSweepThread::synchronize(true);
6815 _freelistLock->lock_without_safepoint_check();
6816 _bitMap->lock()->lock_without_safepoint_check();
6817 _collector->startTimer();
6818 }
6821 //////////////////////////////////////////////////////////////////
6822 // SurvivorSpacePrecleanClosure
6823 //////////////////////////////////////////////////////////////////
6824 // This (single-threaded) closure is used to preclean the oops in
6825 // the survivor spaces.
6826 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6828 HeapWord* addr = (HeapWord*)p;
6829 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6830 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6831 assert(p->klass_or_null() != NULL, "object should be initializd");
6832 assert(p->is_parsable(), "must be parsable.");
6833 // an initialized object; ignore mark word in verification below
6834 // since we are running concurrent with mutators
6835 assert(p->is_oop(true), "should be an oop");
6836 // Note that we do not yield while we iterate over
6837 // the interior oops of p, pushing the relevant ones
6838 // on our marking stack.
6839 size_t size = p->oop_iterate(_scanning_closure);
6840 do_yield_check();
6841 // Observe that below, we do not abandon the preclean
6842 // phase as soon as we should; rather we empty the
6843 // marking stack before returning. This is to satisfy
6844 // some existing assertions. In general, it may be a
6845 // good idea to abort immediately and complete the marking
6846 // from the grey objects at a later time.
6847 while (!_mark_stack->isEmpty()) {
6848 oop new_oop = _mark_stack->pop();
6849 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6850 assert(new_oop->is_parsable(), "Found unparsable oop");
6851 assert(_bit_map->isMarked((HeapWord*)new_oop),
6852 "only grey objects on this stack");
6853 // iterate over the oops in this oop, marking and pushing
6854 // the ones in CMS heap (i.e. in _span).
6855 new_oop->oop_iterate(_scanning_closure);
6856 // check if it's time to yield
6857 do_yield_check();
6858 }
6859 unsigned int after_count =
6860 GenCollectedHeap::heap()->total_collections();
6861 bool abort = (_before_count != after_count) ||
6862 _collector->should_abort_preclean();
6863 return abort ? 0 : size;
6864 }
6866 void SurvivorSpacePrecleanClosure::do_yield_work() {
6867 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6868 "CMS thread should hold CMS token");
6869 assert_lock_strong(_bit_map->lock());
6870 DEBUG_ONLY(RememberKlassesChecker smx(false);)
6871 // Relinquish the bit map lock
6872 _bit_map->lock()->unlock();
6873 ConcurrentMarkSweepThread::desynchronize(true);
6874 ConcurrentMarkSweepThread::acknowledge_yield_request();
6875 _collector->stopTimer();
6876 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6877 if (PrintCMSStatistics != 0) {
6878 _collector->incrementYields();
6879 }
6880 _collector->icms_wait();
6882 // See the comment in coordinator_yield()
6883 for (unsigned i = 0; i < CMSYieldSleepCount &&
6884 ConcurrentMarkSweepThread::should_yield() &&
6885 !CMSCollector::foregroundGCIsActive(); ++i) {
6886 os::sleep(Thread::current(), 1, false);
6887 ConcurrentMarkSweepThread::acknowledge_yield_request();
6888 }
6890 ConcurrentMarkSweepThread::synchronize(true);
6891 _bit_map->lock()->lock_without_safepoint_check();
6892 _collector->startTimer();
6893 }
6895 // This closure is used to rescan the marked objects on the dirty cards
6896 // in the mod union table and the card table proper. In the parallel
6897 // case, although the bitMap is shared, we do a single read so the
6898 // isMarked() query is "safe".
6899 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6900 // Ignore mark word because we are running concurrent with mutators
6901 assert(p->is_oop_or_null(true), "expected an oop or null");
6902 HeapWord* addr = (HeapWord*)p;
6903 assert(_span.contains(addr), "we are scanning the CMS generation");
6904 bool is_obj_array = false;
6905 #ifdef DEBUG
6906 if (!_parallel) {
6907 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6908 assert(_collector->overflow_list_is_empty(),
6909 "overflow list should be empty");
6911 }
6912 #endif // DEBUG
6913 if (_bit_map->isMarked(addr)) {
6914 // Obj arrays are precisely marked, non-arrays are not;
6915 // so we scan objArrays precisely and non-arrays in their
6916 // entirety.
6917 if (p->is_objArray()) {
6918 is_obj_array = true;
6919 if (_parallel) {
6920 p->oop_iterate(_par_scan_closure, mr);
6921 } else {
6922 p->oop_iterate(_scan_closure, mr);
6923 }
6924 } else {
6925 if (_parallel) {
6926 p->oop_iterate(_par_scan_closure);
6927 } else {
6928 p->oop_iterate(_scan_closure);
6929 }
6930 }
6931 }
6932 #ifdef DEBUG
6933 if (!_parallel) {
6934 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6935 assert(_collector->overflow_list_is_empty(),
6936 "overflow list should be empty");
6938 }
6939 #endif // DEBUG
6940 return is_obj_array;
6941 }
6943 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6944 MemRegion span,
6945 CMSBitMap* bitMap, CMSMarkStack* markStack,
6946 CMSMarkStack* revisitStack,
6947 bool should_yield, bool verifying):
6948 _collector(collector),
6949 _span(span),
6950 _bitMap(bitMap),
6951 _mut(&collector->_modUnionTable),
6952 _markStack(markStack),
6953 _revisitStack(revisitStack),
6954 _yield(should_yield),
6955 _skipBits(0)
6956 {
6957 assert(_markStack->isEmpty(), "stack should be empty");
6958 _finger = _bitMap->startWord();
6959 _threshold = _finger;
6960 assert(_collector->_restart_addr == NULL, "Sanity check");
6961 assert(_span.contains(_finger), "Out of bounds _finger?");
6962 DEBUG_ONLY(_verifying = verifying;)
6963 }
6965 void MarkFromRootsClosure::reset(HeapWord* addr) {
6966 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6967 assert(_span.contains(addr), "Out of bounds _finger?");
6968 _finger = addr;
6969 _threshold = (HeapWord*)round_to(
6970 (intptr_t)_finger, CardTableModRefBS::card_size);
6971 }
6973 // Should revisit to see if this should be restructured for
6974 // greater efficiency.
6975 bool MarkFromRootsClosure::do_bit(size_t offset) {
6976 if (_skipBits > 0) {
6977 _skipBits--;
6978 return true;
6979 }
6980 // convert offset into a HeapWord*
6981 HeapWord* addr = _bitMap->startWord() + offset;
6982 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6983 "address out of range");
6984 assert(_bitMap->isMarked(addr), "tautology");
6985 if (_bitMap->isMarked(addr+1)) {
6986 // this is an allocated but not yet initialized object
6987 assert(_skipBits == 0, "tautology");
6988 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6989 oop p = oop(addr);
6990 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6991 DEBUG_ONLY(if (!_verifying) {)
6992 // We re-dirty the cards on which this object lies and increase
6993 // the _threshold so that we'll come back to scan this object
6994 // during the preclean or remark phase. (CMSCleanOnEnter)
6995 if (CMSCleanOnEnter) {
6996 size_t sz = _collector->block_size_using_printezis_bits(addr);
6997 HeapWord* end_card_addr = (HeapWord*)round_to(
6998 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6999 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7000 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7001 // Bump _threshold to end_card_addr; note that
7002 // _threshold cannot possibly exceed end_card_addr, anyhow.
7003 // This prevents future clearing of the card as the scan proceeds
7004 // to the right.
7005 assert(_threshold <= end_card_addr,
7006 "Because we are just scanning into this object");
7007 if (_threshold < end_card_addr) {
7008 _threshold = end_card_addr;
7009 }
7010 if (p->klass_or_null() != NULL) {
7011 // Redirty the range of cards...
7012 _mut->mark_range(redirty_range);
7013 } // ...else the setting of klass will dirty the card anyway.
7014 }
7015 DEBUG_ONLY(})
7016 return true;
7017 }
7018 }
7019 scanOopsInOop(addr);
7020 return true;
7021 }
7023 // We take a break if we've been at this for a while,
7024 // so as to avoid monopolizing the locks involved.
7025 void MarkFromRootsClosure::do_yield_work() {
7026 // First give up the locks, then yield, then re-lock
7027 // We should probably use a constructor/destructor idiom to
7028 // do this unlock/lock or modify the MutexUnlocker class to
7029 // serve our purpose. XXX
7030 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7031 "CMS thread should hold CMS token");
7032 assert_lock_strong(_bitMap->lock());
7033 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7034 _bitMap->lock()->unlock();
7035 ConcurrentMarkSweepThread::desynchronize(true);
7036 ConcurrentMarkSweepThread::acknowledge_yield_request();
7037 _collector->stopTimer();
7038 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7039 if (PrintCMSStatistics != 0) {
7040 _collector->incrementYields();
7041 }
7042 _collector->icms_wait();
7044 // See the comment in coordinator_yield()
7045 for (unsigned i = 0; i < CMSYieldSleepCount &&
7046 ConcurrentMarkSweepThread::should_yield() &&
7047 !CMSCollector::foregroundGCIsActive(); ++i) {
7048 os::sleep(Thread::current(), 1, false);
7049 ConcurrentMarkSweepThread::acknowledge_yield_request();
7050 }
7052 ConcurrentMarkSweepThread::synchronize(true);
7053 _bitMap->lock()->lock_without_safepoint_check();
7054 _collector->startTimer();
7055 }
7057 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7058 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7059 assert(_markStack->isEmpty(),
7060 "should drain stack to limit stack usage");
7061 // convert ptr to an oop preparatory to scanning
7062 oop obj = oop(ptr);
7063 // Ignore mark word in verification below, since we
7064 // may be running concurrent with mutators.
7065 assert(obj->is_oop(true), "should be an oop");
7066 assert(_finger <= ptr, "_finger runneth ahead");
7067 // advance the finger to right end of this object
7068 _finger = ptr + obj->size();
7069 assert(_finger > ptr, "we just incremented it above");
7070 // On large heaps, it may take us some time to get through
7071 // the marking phase (especially if running iCMS). During
7072 // this time it's possible that a lot of mutations have
7073 // accumulated in the card table and the mod union table --
7074 // these mutation records are redundant until we have
7075 // actually traced into the corresponding card.
7076 // Here, we check whether advancing the finger would make
7077 // us cross into a new card, and if so clear corresponding
7078 // cards in the MUT (preclean them in the card-table in the
7079 // future).
7081 DEBUG_ONLY(if (!_verifying) {)
7082 // The clean-on-enter optimization is disabled by default,
7083 // until we fix 6178663.
7084 if (CMSCleanOnEnter && (_finger > _threshold)) {
7085 // [_threshold, _finger) represents the interval
7086 // of cards to be cleared in MUT (or precleaned in card table).
7087 // The set of cards to be cleared is all those that overlap
7088 // with the interval [_threshold, _finger); note that
7089 // _threshold is always kept card-aligned but _finger isn't
7090 // always card-aligned.
7091 HeapWord* old_threshold = _threshold;
7092 assert(old_threshold == (HeapWord*)round_to(
7093 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7094 "_threshold should always be card-aligned");
7095 _threshold = (HeapWord*)round_to(
7096 (intptr_t)_finger, CardTableModRefBS::card_size);
7097 MemRegion mr(old_threshold, _threshold);
7098 assert(!mr.is_empty(), "Control point invariant");
7099 assert(_span.contains(mr), "Should clear within span");
7100 // XXX When _finger crosses from old gen into perm gen
7101 // we may be doing unnecessary cleaning; do better in the
7102 // future by detecting that condition and clearing fewer
7103 // MUT/CT entries.
7104 _mut->clear_range(mr);
7105 }
7106 DEBUG_ONLY(})
7107 // Note: the finger doesn't advance while we drain
7108 // the stack below.
7109 PushOrMarkClosure pushOrMarkClosure(_collector,
7110 _span, _bitMap, _markStack,
7111 _revisitStack,
7112 _finger, this);
7113 bool res = _markStack->push(obj);
7114 assert(res, "Empty non-zero size stack should have space for single push");
7115 while (!_markStack->isEmpty()) {
7116 oop new_oop = _markStack->pop();
7117 // Skip verifying header mark word below because we are
7118 // running concurrent with mutators.
7119 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7120 // now scan this oop's oops
7121 new_oop->oop_iterate(&pushOrMarkClosure);
7122 do_yield_check();
7123 }
7124 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7125 }
7127 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7128 CMSCollector* collector, MemRegion span,
7129 CMSBitMap* bit_map,
7130 OopTaskQueue* work_queue,
7131 CMSMarkStack* overflow_stack,
7132 CMSMarkStack* revisit_stack,
7133 bool should_yield):
7134 _collector(collector),
7135 _whole_span(collector->_span),
7136 _span(span),
7137 _bit_map(bit_map),
7138 _mut(&collector->_modUnionTable),
7139 _work_queue(work_queue),
7140 _overflow_stack(overflow_stack),
7141 _revisit_stack(revisit_stack),
7142 _yield(should_yield),
7143 _skip_bits(0),
7144 _task(task)
7145 {
7146 assert(_work_queue->size() == 0, "work_queue should be empty");
7147 _finger = span.start();
7148 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7149 assert(_span.contains(_finger), "Out of bounds _finger?");
7150 }
7152 // Should revisit to see if this should be restructured for
7153 // greater efficiency.
7154 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7155 if (_skip_bits > 0) {
7156 _skip_bits--;
7157 return true;
7158 }
7159 // convert offset into a HeapWord*
7160 HeapWord* addr = _bit_map->startWord() + offset;
7161 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7162 "address out of range");
7163 assert(_bit_map->isMarked(addr), "tautology");
7164 if (_bit_map->isMarked(addr+1)) {
7165 // this is an allocated object that might not yet be initialized
7166 assert(_skip_bits == 0, "tautology");
7167 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7168 oop p = oop(addr);
7169 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7170 // in the case of Clean-on-Enter optimization, redirty card
7171 // and avoid clearing card by increasing the threshold.
7172 return true;
7173 }
7174 }
7175 scan_oops_in_oop(addr);
7176 return true;
7177 }
7179 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7180 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7181 // Should we assert that our work queue is empty or
7182 // below some drain limit?
7183 assert(_work_queue->size() == 0,
7184 "should drain stack to limit stack usage");
7185 // convert ptr to an oop preparatory to scanning
7186 oop obj = oop(ptr);
7187 // Ignore mark word in verification below, since we
7188 // may be running concurrent with mutators.
7189 assert(obj->is_oop(true), "should be an oop");
7190 assert(_finger <= ptr, "_finger runneth ahead");
7191 // advance the finger to right end of this object
7192 _finger = ptr + obj->size();
7193 assert(_finger > ptr, "we just incremented it above");
7194 // On large heaps, it may take us some time to get through
7195 // the marking phase (especially if running iCMS). During
7196 // this time it's possible that a lot of mutations have
7197 // accumulated in the card table and the mod union table --
7198 // these mutation records are redundant until we have
7199 // actually traced into the corresponding card.
7200 // Here, we check whether advancing the finger would make
7201 // us cross into a new card, and if so clear corresponding
7202 // cards in the MUT (preclean them in the card-table in the
7203 // future).
7205 // The clean-on-enter optimization is disabled by default,
7206 // until we fix 6178663.
7207 if (CMSCleanOnEnter && (_finger > _threshold)) {
7208 // [_threshold, _finger) represents the interval
7209 // of cards to be cleared in MUT (or precleaned in card table).
7210 // The set of cards to be cleared is all those that overlap
7211 // with the interval [_threshold, _finger); note that
7212 // _threshold is always kept card-aligned but _finger isn't
7213 // always card-aligned.
7214 HeapWord* old_threshold = _threshold;
7215 assert(old_threshold == (HeapWord*)round_to(
7216 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7217 "_threshold should always be card-aligned");
7218 _threshold = (HeapWord*)round_to(
7219 (intptr_t)_finger, CardTableModRefBS::card_size);
7220 MemRegion mr(old_threshold, _threshold);
7221 assert(!mr.is_empty(), "Control point invariant");
7222 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7223 // XXX When _finger crosses from old gen into perm gen
7224 // we may be doing unnecessary cleaning; do better in the
7225 // future by detecting that condition and clearing fewer
7226 // MUT/CT entries.
7227 _mut->clear_range(mr);
7228 }
7230 // Note: the local finger doesn't advance while we drain
7231 // the stack below, but the global finger sure can and will.
7232 HeapWord** gfa = _task->global_finger_addr();
7233 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7234 _span, _bit_map,
7235 _work_queue,
7236 _overflow_stack,
7237 _revisit_stack,
7238 _finger,
7239 gfa, this);
7240 bool res = _work_queue->push(obj); // overflow could occur here
7241 assert(res, "Will hold once we use workqueues");
7242 while (true) {
7243 oop new_oop;
7244 if (!_work_queue->pop_local(new_oop)) {
7245 // We emptied our work_queue; check if there's stuff that can
7246 // be gotten from the overflow stack.
7247 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7248 _overflow_stack, _work_queue)) {
7249 do_yield_check();
7250 continue;
7251 } else { // done
7252 break;
7253 }
7254 }
7255 // Skip verifying header mark word below because we are
7256 // running concurrent with mutators.
7257 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7258 // now scan this oop's oops
7259 new_oop->oop_iterate(&pushOrMarkClosure);
7260 do_yield_check();
7261 }
7262 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7263 }
7265 // Yield in response to a request from VM Thread or
7266 // from mutators.
7267 void Par_MarkFromRootsClosure::do_yield_work() {
7268 assert(_task != NULL, "sanity");
7269 _task->yield();
7270 }
7272 // A variant of the above used for verifying CMS marking work.
7273 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7274 MemRegion span,
7275 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7276 CMSMarkStack* mark_stack):
7277 _collector(collector),
7278 _span(span),
7279 _verification_bm(verification_bm),
7280 _cms_bm(cms_bm),
7281 _mark_stack(mark_stack),
7282 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7283 mark_stack)
7284 {
7285 assert(_mark_stack->isEmpty(), "stack should be empty");
7286 _finger = _verification_bm->startWord();
7287 assert(_collector->_restart_addr == NULL, "Sanity check");
7288 assert(_span.contains(_finger), "Out of bounds _finger?");
7289 }
7291 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7292 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7293 assert(_span.contains(addr), "Out of bounds _finger?");
7294 _finger = addr;
7295 }
7297 // Should revisit to see if this should be restructured for
7298 // greater efficiency.
7299 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7300 // convert offset into a HeapWord*
7301 HeapWord* addr = _verification_bm->startWord() + offset;
7302 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7303 "address out of range");
7304 assert(_verification_bm->isMarked(addr), "tautology");
7305 assert(_cms_bm->isMarked(addr), "tautology");
7307 assert(_mark_stack->isEmpty(),
7308 "should drain stack to limit stack usage");
7309 // convert addr to an oop preparatory to scanning
7310 oop obj = oop(addr);
7311 assert(obj->is_oop(), "should be an oop");
7312 assert(_finger <= addr, "_finger runneth ahead");
7313 // advance the finger to right end of this object
7314 _finger = addr + obj->size();
7315 assert(_finger > addr, "we just incremented it above");
7316 // Note: the finger doesn't advance while we drain
7317 // the stack below.
7318 bool res = _mark_stack->push(obj);
7319 assert(res, "Empty non-zero size stack should have space for single push");
7320 while (!_mark_stack->isEmpty()) {
7321 oop new_oop = _mark_stack->pop();
7322 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7323 // now scan this oop's oops
7324 new_oop->oop_iterate(&_pam_verify_closure);
7325 }
7326 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7327 return true;
7328 }
7330 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7331 CMSCollector* collector, MemRegion span,
7332 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7333 CMSMarkStack* mark_stack):
7334 OopClosure(collector->ref_processor()),
7335 _collector(collector),
7336 _span(span),
7337 _verification_bm(verification_bm),
7338 _cms_bm(cms_bm),
7339 _mark_stack(mark_stack)
7340 { }
7342 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7343 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7345 // Upon stack overflow, we discard (part of) the stack,
7346 // remembering the least address amongst those discarded
7347 // in CMSCollector's _restart_address.
7348 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7349 // Remember the least grey address discarded
7350 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7351 _collector->lower_restart_addr(ra);
7352 _mark_stack->reset(); // discard stack contents
7353 _mark_stack->expand(); // expand the stack if possible
7354 }
7356 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7357 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7358 HeapWord* addr = (HeapWord*)obj;
7359 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7360 // Oop lies in _span and isn't yet grey or black
7361 _verification_bm->mark(addr); // now grey
7362 if (!_cms_bm->isMarked(addr)) {
7363 oop(addr)->print();
7364 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7365 addr);
7366 fatal("... aborting");
7367 }
7369 if (!_mark_stack->push(obj)) { // stack overflow
7370 if (PrintCMSStatistics != 0) {
7371 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7372 SIZE_FORMAT, _mark_stack->capacity());
7373 }
7374 assert(_mark_stack->isFull(), "Else push should have succeeded");
7375 handle_stack_overflow(addr);
7376 }
7377 // anything including and to the right of _finger
7378 // will be scanned as we iterate over the remainder of the
7379 // bit map
7380 }
7381 }
7383 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7384 MemRegion span,
7385 CMSBitMap* bitMap, CMSMarkStack* markStack,
7386 CMSMarkStack* revisitStack,
7387 HeapWord* finger, MarkFromRootsClosure* parent) :
7388 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7389 _span(span),
7390 _bitMap(bitMap),
7391 _markStack(markStack),
7392 _finger(finger),
7393 _parent(parent)
7394 { }
7396 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7397 MemRegion span,
7398 CMSBitMap* bit_map,
7399 OopTaskQueue* work_queue,
7400 CMSMarkStack* overflow_stack,
7401 CMSMarkStack* revisit_stack,
7402 HeapWord* finger,
7403 HeapWord** global_finger_addr,
7404 Par_MarkFromRootsClosure* parent) :
7405 Par_KlassRememberingOopClosure(collector,
7406 collector->ref_processor(),
7407 revisit_stack),
7408 _whole_span(collector->_span),
7409 _span(span),
7410 _bit_map(bit_map),
7411 _work_queue(work_queue),
7412 _overflow_stack(overflow_stack),
7413 _finger(finger),
7414 _global_finger_addr(global_finger_addr),
7415 _parent(parent)
7416 { }
7418 // Assumes thread-safe access by callers, who are
7419 // responsible for mutual exclusion.
7420 void CMSCollector::lower_restart_addr(HeapWord* low) {
7421 assert(_span.contains(low), "Out of bounds addr");
7422 if (_restart_addr == NULL) {
7423 _restart_addr = low;
7424 } else {
7425 _restart_addr = MIN2(_restart_addr, low);
7426 }
7427 }
7429 // Upon stack overflow, we discard (part of) the stack,
7430 // remembering the least address amongst those discarded
7431 // in CMSCollector's _restart_address.
7432 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7433 // Remember the least grey address discarded
7434 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7435 _collector->lower_restart_addr(ra);
7436 _markStack->reset(); // discard stack contents
7437 _markStack->expand(); // expand the stack if possible
7438 }
7440 // Upon stack overflow, we discard (part of) the stack,
7441 // remembering the least address amongst those discarded
7442 // in CMSCollector's _restart_address.
7443 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7444 // We need to do this under a mutex to prevent other
7445 // workers from interfering with the work done below.
7446 MutexLockerEx ml(_overflow_stack->par_lock(),
7447 Mutex::_no_safepoint_check_flag);
7448 // Remember the least grey address discarded
7449 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7450 _collector->lower_restart_addr(ra);
7451 _overflow_stack->reset(); // discard stack contents
7452 _overflow_stack->expand(); // expand the stack if possible
7453 }
7455 void PushOrMarkClosure::do_oop(oop obj) {
7456 // Ignore mark word because we are running concurrent with mutators.
7457 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7458 HeapWord* addr = (HeapWord*)obj;
7459 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7460 // Oop lies in _span and isn't yet grey or black
7461 _bitMap->mark(addr); // now grey
7462 if (addr < _finger) {
7463 // the bit map iteration has already either passed, or
7464 // sampled, this bit in the bit map; we'll need to
7465 // use the marking stack to scan this oop's oops.
7466 bool simulate_overflow = false;
7467 NOT_PRODUCT(
7468 if (CMSMarkStackOverflowALot &&
7469 _collector->simulate_overflow()) {
7470 // simulate a stack overflow
7471 simulate_overflow = true;
7472 }
7473 )
7474 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7475 if (PrintCMSStatistics != 0) {
7476 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7477 SIZE_FORMAT, _markStack->capacity());
7478 }
7479 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7480 handle_stack_overflow(addr);
7481 }
7482 }
7483 // anything including and to the right of _finger
7484 // will be scanned as we iterate over the remainder of the
7485 // bit map
7486 do_yield_check();
7487 }
7488 }
7490 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7491 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7493 void Par_PushOrMarkClosure::do_oop(oop obj) {
7494 // Ignore mark word because we are running concurrent with mutators.
7495 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7496 HeapWord* addr = (HeapWord*)obj;
7497 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7498 // Oop lies in _span and isn't yet grey or black
7499 // We read the global_finger (volatile read) strictly after marking oop
7500 bool res = _bit_map->par_mark(addr); // now grey
7501 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7502 // Should we push this marked oop on our stack?
7503 // -- if someone else marked it, nothing to do
7504 // -- if target oop is above global finger nothing to do
7505 // -- if target oop is in chunk and above local finger
7506 // then nothing to do
7507 // -- else push on work queue
7508 if ( !res // someone else marked it, they will deal with it
7509 || (addr >= *gfa) // will be scanned in a later task
7510 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7511 return;
7512 }
7513 // the bit map iteration has already either passed, or
7514 // sampled, this bit in the bit map; we'll need to
7515 // use the marking stack to scan this oop's oops.
7516 bool simulate_overflow = false;
7517 NOT_PRODUCT(
7518 if (CMSMarkStackOverflowALot &&
7519 _collector->simulate_overflow()) {
7520 // simulate a stack overflow
7521 simulate_overflow = true;
7522 }
7523 )
7524 if (simulate_overflow ||
7525 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7526 // stack overflow
7527 if (PrintCMSStatistics != 0) {
7528 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7529 SIZE_FORMAT, _overflow_stack->capacity());
7530 }
7531 // We cannot assert that the overflow stack is full because
7532 // it may have been emptied since.
7533 assert(simulate_overflow ||
7534 _work_queue->size() == _work_queue->max_elems(),
7535 "Else push should have succeeded");
7536 handle_stack_overflow(addr);
7537 }
7538 do_yield_check();
7539 }
7540 }
7542 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7543 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7545 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7546 ReferenceProcessor* rp,
7547 CMSMarkStack* revisit_stack) :
7548 OopClosure(rp),
7549 _collector(collector),
7550 _revisit_stack(revisit_stack),
7551 _should_remember_klasses(collector->should_unload_classes()) {}
7553 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7554 MemRegion span,
7555 ReferenceProcessor* rp,
7556 CMSBitMap* bit_map,
7557 CMSBitMap* mod_union_table,
7558 CMSMarkStack* mark_stack,
7559 CMSMarkStack* revisit_stack,
7560 bool concurrent_precleaning):
7561 KlassRememberingOopClosure(collector, rp, revisit_stack),
7562 _span(span),
7563 _bit_map(bit_map),
7564 _mod_union_table(mod_union_table),
7565 _mark_stack(mark_stack),
7566 _concurrent_precleaning(concurrent_precleaning)
7567 {
7568 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7569 }
7571 // Grey object rescan during pre-cleaning and second checkpoint phases --
7572 // the non-parallel version (the parallel version appears further below.)
7573 void PushAndMarkClosure::do_oop(oop obj) {
7574 // Ignore mark word verification. If during concurrent precleaning,
7575 // the object monitor may be locked. If during the checkpoint
7576 // phases, the object may already have been reached by a different
7577 // path and may be at the end of the global overflow list (so
7578 // the mark word may be NULL).
7579 assert(obj->is_oop_or_null(true /* ignore mark word */),
7580 "expected an oop or NULL");
7581 HeapWord* addr = (HeapWord*)obj;
7582 // Check if oop points into the CMS generation
7583 // and is not marked
7584 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7585 // a white object ...
7586 _bit_map->mark(addr); // ... now grey
7587 // push on the marking stack (grey set)
7588 bool simulate_overflow = false;
7589 NOT_PRODUCT(
7590 if (CMSMarkStackOverflowALot &&
7591 _collector->simulate_overflow()) {
7592 // simulate a stack overflow
7593 simulate_overflow = true;
7594 }
7595 )
7596 if (simulate_overflow || !_mark_stack->push(obj)) {
7597 if (_concurrent_precleaning) {
7598 // During precleaning we can just dirty the appropriate card(s)
7599 // in the mod union table, thus ensuring that the object remains
7600 // in the grey set and continue. In the case of object arrays
7601 // we need to dirty all of the cards that the object spans,
7602 // since the rescan of object arrays will be limited to the
7603 // dirty cards.
7604 // Note that no one can be intefering with us in this action
7605 // of dirtying the mod union table, so no locking or atomics
7606 // are required.
7607 if (obj->is_objArray()) {
7608 size_t sz = obj->size();
7609 HeapWord* end_card_addr = (HeapWord*)round_to(
7610 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7611 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7612 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7613 _mod_union_table->mark_range(redirty_range);
7614 } else {
7615 _mod_union_table->mark(addr);
7616 }
7617 _collector->_ser_pmc_preclean_ovflw++;
7618 } else {
7619 // During the remark phase, we need to remember this oop
7620 // in the overflow list.
7621 _collector->push_on_overflow_list(obj);
7622 _collector->_ser_pmc_remark_ovflw++;
7623 }
7624 }
7625 }
7626 }
7628 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7629 MemRegion span,
7630 ReferenceProcessor* rp,
7631 CMSBitMap* bit_map,
7632 OopTaskQueue* work_queue,
7633 CMSMarkStack* revisit_stack):
7634 Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7635 _span(span),
7636 _bit_map(bit_map),
7637 _work_queue(work_queue)
7638 {
7639 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7640 }
7642 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7643 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7645 // Grey object rescan during second checkpoint phase --
7646 // the parallel version.
7647 void Par_PushAndMarkClosure::do_oop(oop obj) {
7648 // In the assert below, we ignore the mark word because
7649 // this oop may point to an already visited object that is
7650 // on the overflow stack (in which case the mark word has
7651 // been hijacked for chaining into the overflow stack --
7652 // if this is the last object in the overflow stack then
7653 // its mark word will be NULL). Because this object may
7654 // have been subsequently popped off the global overflow
7655 // stack, and the mark word possibly restored to the prototypical
7656 // value, by the time we get to examined this failing assert in
7657 // the debugger, is_oop_or_null(false) may subsequently start
7658 // to hold.
7659 assert(obj->is_oop_or_null(true),
7660 "expected an oop or NULL");
7661 HeapWord* addr = (HeapWord*)obj;
7662 // Check if oop points into the CMS generation
7663 // and is not marked
7664 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7665 // a white object ...
7666 // If we manage to "claim" the object, by being the
7667 // first thread to mark it, then we push it on our
7668 // marking stack
7669 if (_bit_map->par_mark(addr)) { // ... now grey
7670 // push on work queue (grey set)
7671 bool simulate_overflow = false;
7672 NOT_PRODUCT(
7673 if (CMSMarkStackOverflowALot &&
7674 _collector->par_simulate_overflow()) {
7675 // simulate a stack overflow
7676 simulate_overflow = true;
7677 }
7678 )
7679 if (simulate_overflow || !_work_queue->push(obj)) {
7680 _collector->par_push_on_overflow_list(obj);
7681 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7682 }
7683 } // Else, some other thread got there first
7684 }
7685 }
7687 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7688 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7690 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7691 // TBD
7692 }
7694 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7695 // TBD
7696 }
7698 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7699 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7700 Mutex* bml = _collector->bitMapLock();
7701 assert_lock_strong(bml);
7702 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7703 "CMS thread should hold CMS token");
7705 bml->unlock();
7706 ConcurrentMarkSweepThread::desynchronize(true);
7708 ConcurrentMarkSweepThread::acknowledge_yield_request();
7710 _collector->stopTimer();
7711 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7712 if (PrintCMSStatistics != 0) {
7713 _collector->incrementYields();
7714 }
7715 _collector->icms_wait();
7717 // See the comment in coordinator_yield()
7718 for (unsigned i = 0; i < CMSYieldSleepCount &&
7719 ConcurrentMarkSweepThread::should_yield() &&
7720 !CMSCollector::foregroundGCIsActive(); ++i) {
7721 os::sleep(Thread::current(), 1, false);
7722 ConcurrentMarkSweepThread::acknowledge_yield_request();
7723 }
7725 ConcurrentMarkSweepThread::synchronize(true);
7726 bml->lock();
7728 _collector->startTimer();
7729 }
7731 bool CMSPrecleanRefsYieldClosure::should_return() {
7732 if (ConcurrentMarkSweepThread::should_yield()) {
7733 do_yield_work();
7734 }
7735 return _collector->foregroundGCIsActive();
7736 }
7738 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7739 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7740 "mr should be aligned to start at a card boundary");
7741 // We'd like to assert:
7742 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7743 // "mr should be a range of cards");
7744 // However, that would be too strong in one case -- the last
7745 // partition ends at _unallocated_block which, in general, can be
7746 // an arbitrary boundary, not necessarily card aligned.
7747 if (PrintCMSStatistics != 0) {
7748 _num_dirty_cards +=
7749 mr.word_size()/CardTableModRefBS::card_size_in_words;
7750 }
7751 _space->object_iterate_mem(mr, &_scan_cl);
7752 }
7754 SweepClosure::SweepClosure(CMSCollector* collector,
7755 ConcurrentMarkSweepGeneration* g,
7756 CMSBitMap* bitMap, bool should_yield) :
7757 _collector(collector),
7758 _g(g),
7759 _sp(g->cmsSpace()),
7760 _limit(_sp->sweep_limit()),
7761 _freelistLock(_sp->freelistLock()),
7762 _bitMap(bitMap),
7763 _yield(should_yield),
7764 _inFreeRange(false), // No free range at beginning of sweep
7765 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7766 _lastFreeRangeCoalesced(false),
7767 _freeFinger(g->used_region().start())
7768 {
7769 NOT_PRODUCT(
7770 _numObjectsFreed = 0;
7771 _numWordsFreed = 0;
7772 _numObjectsLive = 0;
7773 _numWordsLive = 0;
7774 _numObjectsAlreadyFree = 0;
7775 _numWordsAlreadyFree = 0;
7776 _last_fc = NULL;
7778 _sp->initializeIndexedFreeListArrayReturnedBytes();
7779 _sp->dictionary()->initializeDictReturnedBytes();
7780 )
7781 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7782 "sweep _limit out of bounds");
7783 if (CMSTraceSweeper) {
7784 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7785 }
7786 }
7788 // We need this destructor to reclaim any space at the end
7789 // of the space, which do_blk below may not have added back to
7790 // the free lists. [basically dealing with the "fringe effect"]
7791 SweepClosure::~SweepClosure() {
7792 assert_lock_strong(_freelistLock);
7793 // this should be treated as the end of a free run if any
7794 // The current free range should be returned to the free lists
7795 // as one coalesced chunk.
7796 if (inFreeRange()) {
7797 flushCurFreeChunk(freeFinger(),
7798 pointer_delta(_limit, freeFinger()));
7799 assert(freeFinger() < _limit, "the finger pointeth off base");
7800 if (CMSTraceSweeper) {
7801 gclog_or_tty->print("destructor:");
7802 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7803 "[coalesced:"SIZE_FORMAT"]\n",
7804 freeFinger(), pointer_delta(_limit, freeFinger()),
7805 lastFreeRangeCoalesced());
7806 }
7807 }
7808 NOT_PRODUCT(
7809 if (Verbose && PrintGC) {
7810 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7811 SIZE_FORMAT " bytes",
7812 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7813 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7814 SIZE_FORMAT" bytes "
7815 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7816 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7817 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7818 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7819 sizeof(HeapWord);
7820 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7822 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7823 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7824 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7825 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7826 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7827 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7828 indexListReturnedBytes);
7829 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7830 dictReturnedBytes);
7831 }
7832 }
7833 )
7834 // Now, in debug mode, just null out the sweep_limit
7835 NOT_PRODUCT(_sp->clear_sweep_limit();)
7836 if (CMSTraceSweeper) {
7837 gclog_or_tty->print("end of sweep\n================\n");
7838 }
7839 }
7841 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7842 bool freeRangeInFreeLists) {
7843 if (CMSTraceSweeper) {
7844 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7845 freeFinger, _sp->block_size(freeFinger),
7846 freeRangeInFreeLists);
7847 }
7848 assert(!inFreeRange(), "Trampling existing free range");
7849 set_inFreeRange(true);
7850 set_lastFreeRangeCoalesced(false);
7852 set_freeFinger(freeFinger);
7853 set_freeRangeInFreeLists(freeRangeInFreeLists);
7854 if (CMSTestInFreeList) {
7855 if (freeRangeInFreeLists) {
7856 FreeChunk* fc = (FreeChunk*) freeFinger;
7857 assert(fc->isFree(), "A chunk on the free list should be free.");
7858 assert(fc->size() > 0, "Free range should have a size");
7859 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7860 }
7861 }
7862 }
7864 // Note that the sweeper runs concurrently with mutators. Thus,
7865 // it is possible for direct allocation in this generation to happen
7866 // in the middle of the sweep. Note that the sweeper also coalesces
7867 // contiguous free blocks. Thus, unless the sweeper and the allocator
7868 // synchronize appropriately freshly allocated blocks may get swept up.
7869 // This is accomplished by the sweeper locking the free lists while
7870 // it is sweeping. Thus blocks that are determined to be free are
7871 // indeed free. There is however one additional complication:
7872 // blocks that have been allocated since the final checkpoint and
7873 // mark, will not have been marked and so would be treated as
7874 // unreachable and swept up. To prevent this, the allocator marks
7875 // the bit map when allocating during the sweep phase. This leads,
7876 // however, to a further complication -- objects may have been allocated
7877 // but not yet initialized -- in the sense that the header isn't yet
7878 // installed. The sweeper can not then determine the size of the block
7879 // in order to skip over it. To deal with this case, we use a technique
7880 // (due to Printezis) to encode such uninitialized block sizes in the
7881 // bit map. Since the bit map uses a bit per every HeapWord, but the
7882 // CMS generation has a minimum object size of 3 HeapWords, it follows
7883 // that "normal marks" won't be adjacent in the bit map (there will
7884 // always be at least two 0 bits between successive 1 bits). We make use
7885 // of these "unused" bits to represent uninitialized blocks -- the bit
7886 // corresponding to the start of the uninitialized object and the next
7887 // bit are both set. Finally, a 1 bit marks the end of the object that
7888 // started with the two consecutive 1 bits to indicate its potentially
7889 // uninitialized state.
7891 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7892 FreeChunk* fc = (FreeChunk*)addr;
7893 size_t res;
7895 // check if we are done sweepinrg
7896 if (addr == _limit) { // we have swept up to the limit, do nothing more
7897 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7898 "sweep _limit out of bounds");
7899 // help the closure application finish
7900 return pointer_delta(_sp->end(), _limit);
7901 }
7902 assert(addr <= _limit, "sweep invariant");
7904 // check if we should yield
7905 do_yield_check(addr);
7906 if (fc->isFree()) {
7907 // Chunk that is already free
7908 res = fc->size();
7909 doAlreadyFreeChunk(fc);
7910 debug_only(_sp->verifyFreeLists());
7911 assert(res == fc->size(), "Don't expect the size to change");
7912 NOT_PRODUCT(
7913 _numObjectsAlreadyFree++;
7914 _numWordsAlreadyFree += res;
7915 )
7916 NOT_PRODUCT(_last_fc = fc;)
7917 } else if (!_bitMap->isMarked(addr)) {
7918 // Chunk is fresh garbage
7919 res = doGarbageChunk(fc);
7920 debug_only(_sp->verifyFreeLists());
7921 NOT_PRODUCT(
7922 _numObjectsFreed++;
7923 _numWordsFreed += res;
7924 )
7925 } else {
7926 // Chunk that is alive.
7927 res = doLiveChunk(fc);
7928 debug_only(_sp->verifyFreeLists());
7929 NOT_PRODUCT(
7930 _numObjectsLive++;
7931 _numWordsLive += res;
7932 )
7933 }
7934 return res;
7935 }
7937 // For the smart allocation, record following
7938 // split deaths - a free chunk is removed from its free list because
7939 // it is being split into two or more chunks.
7940 // split birth - a free chunk is being added to its free list because
7941 // a larger free chunk has been split and resulted in this free chunk.
7942 // coal death - a free chunk is being removed from its free list because
7943 // it is being coalesced into a large free chunk.
7944 // coal birth - a free chunk is being added to its free list because
7945 // it was created when two or more free chunks where coalesced into
7946 // this free chunk.
7947 //
7948 // These statistics are used to determine the desired number of free
7949 // chunks of a given size. The desired number is chosen to be relative
7950 // to the end of a CMS sweep. The desired number at the end of a sweep
7951 // is the
7952 // count-at-end-of-previous-sweep (an amount that was enough)
7953 // - count-at-beginning-of-current-sweep (the excess)
7954 // + split-births (gains in this size during interval)
7955 // - split-deaths (demands on this size during interval)
7956 // where the interval is from the end of one sweep to the end of the
7957 // next.
7958 //
7959 // When sweeping the sweeper maintains an accumulated chunk which is
7960 // the chunk that is made up of chunks that have been coalesced. That
7961 // will be termed the left-hand chunk. A new chunk of garbage that
7962 // is being considered for coalescing will be referred to as the
7963 // right-hand chunk.
7964 //
7965 // When making a decision on whether to coalesce a right-hand chunk with
7966 // the current left-hand chunk, the current count vs. the desired count
7967 // of the left-hand chunk is considered. Also if the right-hand chunk
7968 // is near the large chunk at the end of the heap (see
7969 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7970 // left-hand chunk is coalesced.
7971 //
7972 // When making a decision about whether to split a chunk, the desired count
7973 // vs. the current count of the candidate to be split is also considered.
7974 // If the candidate is underpopulated (currently fewer chunks than desired)
7975 // a chunk of an overpopulated (currently more chunks than desired) size may
7976 // be chosen. The "hint" associated with a free list, if non-null, points
7977 // to a free list which may be overpopulated.
7978 //
7980 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7981 size_t size = fc->size();
7982 // Chunks that cannot be coalesced are not in the
7983 // free lists.
7984 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7985 assert(_sp->verifyChunkInFreeLists(fc),
7986 "free chunk should be in free lists");
7987 }
7988 // a chunk that is already free, should not have been
7989 // marked in the bit map
7990 HeapWord* addr = (HeapWord*) fc;
7991 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7992 // Verify that the bit map has no bits marked between
7993 // addr and purported end of this block.
7994 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7996 // Some chunks cannot be coalesced in under any circumstances.
7997 // See the definition of cantCoalesce().
7998 if (!fc->cantCoalesce()) {
7999 // This chunk can potentially be coalesced.
8000 if (_sp->adaptive_freelists()) {
8001 // All the work is done in
8002 doPostIsFreeOrGarbageChunk(fc, size);
8003 } else { // Not adaptive free lists
8004 // this is a free chunk that can potentially be coalesced by the sweeper;
8005 if (!inFreeRange()) {
8006 // if the next chunk is a free block that can't be coalesced
8007 // it doesn't make sense to remove this chunk from the free lists
8008 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8009 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
8010 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
8011 nextChunk->isFree() && // which is free...
8012 nextChunk->cantCoalesce()) { // ... but cant be coalesced
8013 // nothing to do
8014 } else {
8015 // Potentially the start of a new free range:
8016 // Don't eagerly remove it from the free lists.
8017 // No need to remove it if it will just be put
8018 // back again. (Also from a pragmatic point of view
8019 // if it is a free block in a region that is beyond
8020 // any allocated blocks, an assertion will fail)
8021 // Remember the start of a free run.
8022 initialize_free_range(addr, true);
8023 // end - can coalesce with next chunk
8024 }
8025 } else {
8026 // the midst of a free range, we are coalescing
8027 debug_only(record_free_block_coalesced(fc);)
8028 if (CMSTraceSweeper) {
8029 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8030 }
8031 // remove it from the free lists
8032 _sp->removeFreeChunkFromFreeLists(fc);
8033 set_lastFreeRangeCoalesced(true);
8034 // If the chunk is being coalesced and the current free range is
8035 // in the free lists, remove the current free range so that it
8036 // will be returned to the free lists in its entirety - all
8037 // the coalesced pieces included.
8038 if (freeRangeInFreeLists()) {
8039 FreeChunk* ffc = (FreeChunk*) freeFinger();
8040 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8041 "Size of free range is inconsistent with chunk size.");
8042 if (CMSTestInFreeList) {
8043 assert(_sp->verifyChunkInFreeLists(ffc),
8044 "free range is not in free lists");
8045 }
8046 _sp->removeFreeChunkFromFreeLists(ffc);
8047 set_freeRangeInFreeLists(false);
8048 }
8049 }
8050 }
8051 } else {
8052 // Code path common to both original and adaptive free lists.
8054 // cant coalesce with previous block; this should be treated
8055 // as the end of a free run if any
8056 if (inFreeRange()) {
8057 // we kicked some butt; time to pick up the garbage
8058 assert(freeFinger() < addr, "the finger pointeth off base");
8059 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8060 }
8061 // else, nothing to do, just continue
8062 }
8063 }
8065 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
8066 // This is a chunk of garbage. It is not in any free list.
8067 // Add it to a free list or let it possibly be coalesced into
8068 // a larger chunk.
8069 HeapWord* addr = (HeapWord*) fc;
8070 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8072 if (_sp->adaptive_freelists()) {
8073 // Verify that the bit map has no bits marked between
8074 // addr and purported end of just dead object.
8075 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8077 doPostIsFreeOrGarbageChunk(fc, size);
8078 } else {
8079 if (!inFreeRange()) {
8080 // start of a new free range
8081 assert(size > 0, "A free range should have a size");
8082 initialize_free_range(addr, false);
8084 } else {
8085 // this will be swept up when we hit the end of the
8086 // free range
8087 if (CMSTraceSweeper) {
8088 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8089 }
8090 // If the chunk is being coalesced and the current free range is
8091 // in the free lists, remove the current free range so that it
8092 // will be returned to the free lists in its entirety - all
8093 // the coalesced pieces included.
8094 if (freeRangeInFreeLists()) {
8095 FreeChunk* ffc = (FreeChunk*)freeFinger();
8096 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8097 "Size of free range is inconsistent with chunk size.");
8098 if (CMSTestInFreeList) {
8099 assert(_sp->verifyChunkInFreeLists(ffc),
8100 "free range is not in free lists");
8101 }
8102 _sp->removeFreeChunkFromFreeLists(ffc);
8103 set_freeRangeInFreeLists(false);
8104 }
8105 set_lastFreeRangeCoalesced(true);
8106 }
8107 // this will be swept up when we hit the end of the free range
8109 // Verify that the bit map has no bits marked between
8110 // addr and purported end of just dead object.
8111 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8112 }
8113 return size;
8114 }
8116 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8117 HeapWord* addr = (HeapWord*) fc;
8118 // The sweeper has just found a live object. Return any accumulated
8119 // left hand chunk to the free lists.
8120 if (inFreeRange()) {
8121 if (_sp->adaptive_freelists()) {
8122 flushCurFreeChunk(freeFinger(),
8123 pointer_delta(addr, freeFinger()));
8124 } else { // not adaptive freelists
8125 set_inFreeRange(false);
8126 // Add the free range back to the free list if it is not already
8127 // there.
8128 if (!freeRangeInFreeLists()) {
8129 assert(freeFinger() < addr, "the finger pointeth off base");
8130 if (CMSTraceSweeper) {
8131 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8132 "[coalesced:%d]\n",
8133 freeFinger(), pointer_delta(addr, freeFinger()),
8134 lastFreeRangeCoalesced());
8135 }
8136 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8137 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8138 }
8139 }
8140 }
8142 // Common code path for original and adaptive free lists.
8144 // this object is live: we'd normally expect this to be
8145 // an oop, and like to assert the following:
8146 // assert(oop(addr)->is_oop(), "live block should be an oop");
8147 // However, as we commented above, this may be an object whose
8148 // header hasn't yet been initialized.
8149 size_t size;
8150 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8151 if (_bitMap->isMarked(addr + 1)) {
8152 // Determine the size from the bit map, rather than trying to
8153 // compute it from the object header.
8154 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8155 size = pointer_delta(nextOneAddr + 1, addr);
8156 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8157 "alignment problem");
8159 #ifdef DEBUG
8160 if (oop(addr)->klass_or_null() != NULL &&
8161 ( !_collector->should_unload_classes()
8162 || (oop(addr)->is_parsable()) &&
8163 oop(addr)->is_conc_safe())) {
8164 // Ignore mark word because we are running concurrent with mutators
8165 assert(oop(addr)->is_oop(true), "live block should be an oop");
8166 // is_conc_safe is checked before performing this assertion
8167 // because an object that is not is_conc_safe may yet have
8168 // the return from size() correct.
8169 assert(size ==
8170 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8171 "P-mark and computed size do not agree");
8172 }
8173 #endif
8175 } else {
8176 // This should be an initialized object that's alive.
8177 assert(oop(addr)->klass_or_null() != NULL &&
8178 (!_collector->should_unload_classes()
8179 || oop(addr)->is_parsable()),
8180 "Should be an initialized object");
8181 // Note that there are objects used during class redefinition
8182 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
8183 // which are discarded with their is_conc_safe state still
8184 // false. These object may be floating garbage so may be
8185 // seen here. If they are floating garbage their size
8186 // should be attainable from their klass. Do not that
8187 // is_conc_safe() is true for oop(addr).
8188 // Ignore mark word because we are running concurrent with mutators
8189 assert(oop(addr)->is_oop(true), "live block should be an oop");
8190 // Verify that the bit map has no bits marked between
8191 // addr and purported end of this block.
8192 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8193 assert(size >= 3, "Necessary for Printezis marks to work");
8194 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8195 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8196 }
8197 return size;
8198 }
8200 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8201 size_t chunkSize) {
8202 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8203 // scheme.
8204 bool fcInFreeLists = fc->isFree();
8205 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8206 assert((HeapWord*)fc <= _limit, "sweep invariant");
8207 if (CMSTestInFreeList && fcInFreeLists) {
8208 assert(_sp->verifyChunkInFreeLists(fc),
8209 "free chunk is not in free lists");
8210 }
8213 if (CMSTraceSweeper) {
8214 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8215 }
8217 HeapWord* addr = (HeapWord*) fc;
8219 bool coalesce;
8220 size_t left = pointer_delta(addr, freeFinger());
8221 size_t right = chunkSize;
8222 switch (FLSCoalescePolicy) {
8223 // numeric value forms a coalition aggressiveness metric
8224 case 0: { // never coalesce
8225 coalesce = false;
8226 break;
8227 }
8228 case 1: { // coalesce if left & right chunks on overpopulated lists
8229 coalesce = _sp->coalOverPopulated(left) &&
8230 _sp->coalOverPopulated(right);
8231 break;
8232 }
8233 case 2: { // coalesce if left chunk on overpopulated list (default)
8234 coalesce = _sp->coalOverPopulated(left);
8235 break;
8236 }
8237 case 3: { // coalesce if left OR right chunk on overpopulated list
8238 coalesce = _sp->coalOverPopulated(left) ||
8239 _sp->coalOverPopulated(right);
8240 break;
8241 }
8242 case 4: { // always coalesce
8243 coalesce = true;
8244 break;
8245 }
8246 default:
8247 ShouldNotReachHere();
8248 }
8250 // Should the current free range be coalesced?
8251 // If the chunk is in a free range and either we decided to coalesce above
8252 // or the chunk is near the large block at the end of the heap
8253 // (isNearLargestChunk() returns true), then coalesce this chunk.
8254 bool doCoalesce = inFreeRange() &&
8255 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8256 if (doCoalesce) {
8257 // Coalesce the current free range on the left with the new
8258 // chunk on the right. If either is on a free list,
8259 // it must be removed from the list and stashed in the closure.
8260 if (freeRangeInFreeLists()) {
8261 FreeChunk* ffc = (FreeChunk*)freeFinger();
8262 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8263 "Size of free range is inconsistent with chunk size.");
8264 if (CMSTestInFreeList) {
8265 assert(_sp->verifyChunkInFreeLists(ffc),
8266 "Chunk is not in free lists");
8267 }
8268 _sp->coalDeath(ffc->size());
8269 _sp->removeFreeChunkFromFreeLists(ffc);
8270 set_freeRangeInFreeLists(false);
8271 }
8272 if (fcInFreeLists) {
8273 _sp->coalDeath(chunkSize);
8274 assert(fc->size() == chunkSize,
8275 "The chunk has the wrong size or is not in the free lists");
8276 _sp->removeFreeChunkFromFreeLists(fc);
8277 }
8278 set_lastFreeRangeCoalesced(true);
8279 } else { // not in a free range and/or should not coalesce
8280 // Return the current free range and start a new one.
8281 if (inFreeRange()) {
8282 // In a free range but cannot coalesce with the right hand chunk.
8283 // Put the current free range into the free lists.
8284 flushCurFreeChunk(freeFinger(),
8285 pointer_delta(addr, freeFinger()));
8286 }
8287 // Set up for new free range. Pass along whether the right hand
8288 // chunk is in the free lists.
8289 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8290 }
8291 }
8292 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8293 assert(inFreeRange(), "Should only be called if currently in a free range.");
8294 assert(size > 0,
8295 "A zero sized chunk cannot be added to the free lists.");
8296 if (!freeRangeInFreeLists()) {
8297 if(CMSTestInFreeList) {
8298 FreeChunk* fc = (FreeChunk*) chunk;
8299 fc->setSize(size);
8300 assert(!_sp->verifyChunkInFreeLists(fc),
8301 "chunk should not be in free lists yet");
8302 }
8303 if (CMSTraceSweeper) {
8304 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8305 chunk, size);
8306 }
8307 // A new free range is going to be starting. The current
8308 // free range has not been added to the free lists yet or
8309 // was removed so add it back.
8310 // If the current free range was coalesced, then the death
8311 // of the free range was recorded. Record a birth now.
8312 if (lastFreeRangeCoalesced()) {
8313 _sp->coalBirth(size);
8314 }
8315 _sp->addChunkAndRepairOffsetTable(chunk, size,
8316 lastFreeRangeCoalesced());
8317 }
8318 set_inFreeRange(false);
8319 set_freeRangeInFreeLists(false);
8320 }
8322 // We take a break if we've been at this for a while,
8323 // so as to avoid monopolizing the locks involved.
8324 void SweepClosure::do_yield_work(HeapWord* addr) {
8325 // Return current free chunk being used for coalescing (if any)
8326 // to the appropriate freelist. After yielding, the next
8327 // free block encountered will start a coalescing range of
8328 // free blocks. If the next free block is adjacent to the
8329 // chunk just flushed, they will need to wait for the next
8330 // sweep to be coalesced.
8331 if (inFreeRange()) {
8332 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8333 }
8335 // First give up the locks, then yield, then re-lock.
8336 // We should probably use a constructor/destructor idiom to
8337 // do this unlock/lock or modify the MutexUnlocker class to
8338 // serve our purpose. XXX
8339 assert_lock_strong(_bitMap->lock());
8340 assert_lock_strong(_freelistLock);
8341 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8342 "CMS thread should hold CMS token");
8343 _bitMap->lock()->unlock();
8344 _freelistLock->unlock();
8345 ConcurrentMarkSweepThread::desynchronize(true);
8346 ConcurrentMarkSweepThread::acknowledge_yield_request();
8347 _collector->stopTimer();
8348 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8349 if (PrintCMSStatistics != 0) {
8350 _collector->incrementYields();
8351 }
8352 _collector->icms_wait();
8354 // See the comment in coordinator_yield()
8355 for (unsigned i = 0; i < CMSYieldSleepCount &&
8356 ConcurrentMarkSweepThread::should_yield() &&
8357 !CMSCollector::foregroundGCIsActive(); ++i) {
8358 os::sleep(Thread::current(), 1, false);
8359 ConcurrentMarkSweepThread::acknowledge_yield_request();
8360 }
8362 ConcurrentMarkSweepThread::synchronize(true);
8363 _freelistLock->lock();
8364 _bitMap->lock()->lock_without_safepoint_check();
8365 _collector->startTimer();
8366 }
8368 #ifndef PRODUCT
8369 // This is actually very useful in a product build if it can
8370 // be called from the debugger. Compile it into the product
8371 // as needed.
8372 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8373 return debug_cms_space->verifyChunkInFreeLists(fc);
8374 }
8376 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8377 if (CMSTraceSweeper) {
8378 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8379 }
8380 }
8381 #endif
8383 // CMSIsAliveClosure
8384 bool CMSIsAliveClosure::do_object_b(oop obj) {
8385 HeapWord* addr = (HeapWord*)obj;
8386 return addr != NULL &&
8387 (!_span.contains(addr) || _bit_map->isMarked(addr));
8388 }
8390 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8391 MemRegion span,
8392 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8393 CMSMarkStack* revisit_stack, bool cpc):
8394 KlassRememberingOopClosure(collector, NULL, revisit_stack),
8395 _span(span),
8396 _bit_map(bit_map),
8397 _mark_stack(mark_stack),
8398 _concurrent_precleaning(cpc) {
8399 assert(!_span.is_empty(), "Empty span could spell trouble");
8400 }
8403 // CMSKeepAliveClosure: the serial version
8404 void CMSKeepAliveClosure::do_oop(oop obj) {
8405 HeapWord* addr = (HeapWord*)obj;
8406 if (_span.contains(addr) &&
8407 !_bit_map->isMarked(addr)) {
8408 _bit_map->mark(addr);
8409 bool simulate_overflow = false;
8410 NOT_PRODUCT(
8411 if (CMSMarkStackOverflowALot &&
8412 _collector->simulate_overflow()) {
8413 // simulate a stack overflow
8414 simulate_overflow = true;
8415 }
8416 )
8417 if (simulate_overflow || !_mark_stack->push(obj)) {
8418 if (_concurrent_precleaning) {
8419 // We dirty the overflown object and let the remark
8420 // phase deal with it.
8421 assert(_collector->overflow_list_is_empty(), "Error");
8422 // In the case of object arrays, we need to dirty all of
8423 // the cards that the object spans. No locking or atomics
8424 // are needed since no one else can be mutating the mod union
8425 // table.
8426 if (obj->is_objArray()) {
8427 size_t sz = obj->size();
8428 HeapWord* end_card_addr =
8429 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8430 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8431 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8432 _collector->_modUnionTable.mark_range(redirty_range);
8433 } else {
8434 _collector->_modUnionTable.mark(addr);
8435 }
8436 _collector->_ser_kac_preclean_ovflw++;
8437 } else {
8438 _collector->push_on_overflow_list(obj);
8439 _collector->_ser_kac_ovflw++;
8440 }
8441 }
8442 }
8443 }
8445 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8446 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8448 // CMSParKeepAliveClosure: a parallel version of the above.
8449 // The work queues are private to each closure (thread),
8450 // but (may be) available for stealing by other threads.
8451 void CMSParKeepAliveClosure::do_oop(oop obj) {
8452 HeapWord* addr = (HeapWord*)obj;
8453 if (_span.contains(addr) &&
8454 !_bit_map->isMarked(addr)) {
8455 // In general, during recursive tracing, several threads
8456 // may be concurrently getting here; the first one to
8457 // "tag" it, claims it.
8458 if (_bit_map->par_mark(addr)) {
8459 bool res = _work_queue->push(obj);
8460 assert(res, "Low water mark should be much less than capacity");
8461 // Do a recursive trim in the hope that this will keep
8462 // stack usage lower, but leave some oops for potential stealers
8463 trim_queue(_low_water_mark);
8464 } // Else, another thread got there first
8465 }
8466 }
8468 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8469 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8471 void CMSParKeepAliveClosure::trim_queue(uint max) {
8472 while (_work_queue->size() > max) {
8473 oop new_oop;
8474 if (_work_queue->pop_local(new_oop)) {
8475 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8476 assert(_bit_map->isMarked((HeapWord*)new_oop),
8477 "no white objects on this stack!");
8478 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8479 // iterate over the oops in this oop, marking and pushing
8480 // the ones in CMS heap (i.e. in _span).
8481 new_oop->oop_iterate(&_mark_and_push);
8482 }
8483 }
8484 }
8486 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8487 CMSCollector* collector,
8488 MemRegion span, CMSBitMap* bit_map,
8489 CMSMarkStack* revisit_stack,
8490 OopTaskQueue* work_queue):
8491 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8492 _span(span),
8493 _bit_map(bit_map),
8494 _work_queue(work_queue) { }
8496 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8497 HeapWord* addr = (HeapWord*)obj;
8498 if (_span.contains(addr) &&
8499 !_bit_map->isMarked(addr)) {
8500 if (_bit_map->par_mark(addr)) {
8501 bool simulate_overflow = false;
8502 NOT_PRODUCT(
8503 if (CMSMarkStackOverflowALot &&
8504 _collector->par_simulate_overflow()) {
8505 // simulate a stack overflow
8506 simulate_overflow = true;
8507 }
8508 )
8509 if (simulate_overflow || !_work_queue->push(obj)) {
8510 _collector->par_push_on_overflow_list(obj);
8511 _collector->_par_kac_ovflw++;
8512 }
8513 } // Else another thread got there already
8514 }
8515 }
8517 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8518 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8520 //////////////////////////////////////////////////////////////////
8521 // CMSExpansionCause /////////////////////////////
8522 //////////////////////////////////////////////////////////////////
8523 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8524 switch (cause) {
8525 case _no_expansion:
8526 return "No expansion";
8527 case _satisfy_free_ratio:
8528 return "Free ratio";
8529 case _satisfy_promotion:
8530 return "Satisfy promotion";
8531 case _satisfy_allocation:
8532 return "allocation";
8533 case _allocate_par_lab:
8534 return "Par LAB";
8535 case _allocate_par_spooling_space:
8536 return "Par Spooling Space";
8537 case _adaptive_size_policy:
8538 return "Ergonomics";
8539 default:
8540 return "unknown";
8541 }
8542 }
8544 void CMSDrainMarkingStackClosure::do_void() {
8545 // the max number to take from overflow list at a time
8546 const size_t num = _mark_stack->capacity()/4;
8547 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8548 "Overflow list should be NULL during concurrent phases");
8549 while (!_mark_stack->isEmpty() ||
8550 // if stack is empty, check the overflow list
8551 _collector->take_from_overflow_list(num, _mark_stack)) {
8552 oop obj = _mark_stack->pop();
8553 HeapWord* addr = (HeapWord*)obj;
8554 assert(_span.contains(addr), "Should be within span");
8555 assert(_bit_map->isMarked(addr), "Should be marked");
8556 assert(obj->is_oop(), "Should be an oop");
8557 obj->oop_iterate(_keep_alive);
8558 }
8559 }
8561 void CMSParDrainMarkingStackClosure::do_void() {
8562 // drain queue
8563 trim_queue(0);
8564 }
8566 // Trim our work_queue so its length is below max at return
8567 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8568 while (_work_queue->size() > max) {
8569 oop new_oop;
8570 if (_work_queue->pop_local(new_oop)) {
8571 assert(new_oop->is_oop(), "Expected an oop");
8572 assert(_bit_map->isMarked((HeapWord*)new_oop),
8573 "no white objects on this stack!");
8574 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8575 // iterate over the oops in this oop, marking and pushing
8576 // the ones in CMS heap (i.e. in _span).
8577 new_oop->oop_iterate(&_mark_and_push);
8578 }
8579 }
8580 }
8582 ////////////////////////////////////////////////////////////////////
8583 // Support for Marking Stack Overflow list handling and related code
8584 ////////////////////////////////////////////////////////////////////
8585 // Much of the following code is similar in shape and spirit to the
8586 // code used in ParNewGC. We should try and share that code
8587 // as much as possible in the future.
8589 #ifndef PRODUCT
8590 // Debugging support for CMSStackOverflowALot
8592 // It's OK to call this multi-threaded; the worst thing
8593 // that can happen is that we'll get a bunch of closely
8594 // spaced simulated oveflows, but that's OK, in fact
8595 // probably good as it would exercise the overflow code
8596 // under contention.
8597 bool CMSCollector::simulate_overflow() {
8598 if (_overflow_counter-- <= 0) { // just being defensive
8599 _overflow_counter = CMSMarkStackOverflowInterval;
8600 return true;
8601 } else {
8602 return false;
8603 }
8604 }
8606 bool CMSCollector::par_simulate_overflow() {
8607 return simulate_overflow();
8608 }
8609 #endif
8611 // Single-threaded
8612 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8613 assert(stack->isEmpty(), "Expected precondition");
8614 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8615 size_t i = num;
8616 oop cur = _overflow_list;
8617 const markOop proto = markOopDesc::prototype();
8618 NOT_PRODUCT(ssize_t n = 0;)
8619 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8620 next = oop(cur->mark());
8621 cur->set_mark(proto); // until proven otherwise
8622 assert(cur->is_oop(), "Should be an oop");
8623 bool res = stack->push(cur);
8624 assert(res, "Bit off more than can chew?");
8625 NOT_PRODUCT(n++;)
8626 }
8627 _overflow_list = cur;
8628 #ifndef PRODUCT
8629 assert(_num_par_pushes >= n, "Too many pops?");
8630 _num_par_pushes -=n;
8631 #endif
8632 return !stack->isEmpty();
8633 }
8635 #define BUSY (oop(0x1aff1aff))
8636 // (MT-safe) Get a prefix of at most "num" from the list.
8637 // The overflow list is chained through the mark word of
8638 // each object in the list. We fetch the entire list,
8639 // break off a prefix of the right size and return the
8640 // remainder. If other threads try to take objects from
8641 // the overflow list at that time, they will wait for
8642 // some time to see if data becomes available. If (and
8643 // only if) another thread places one or more object(s)
8644 // on the global list before we have returned the suffix
8645 // to the global list, we will walk down our local list
8646 // to find its end and append the global list to
8647 // our suffix before returning it. This suffix walk can
8648 // prove to be expensive (quadratic in the amount of traffic)
8649 // when there are many objects in the overflow list and
8650 // there is much producer-consumer contention on the list.
8651 // *NOTE*: The overflow list manipulation code here and
8652 // in ParNewGeneration:: are very similar in shape,
8653 // except that in the ParNew case we use the old (from/eden)
8654 // copy of the object to thread the list via its klass word.
8655 // Because of the common code, if you make any changes in
8656 // the code below, please check the ParNew version to see if
8657 // similar changes might be needed.
8658 // CR 6797058 has been filed to consolidate the common code.
8659 bool CMSCollector::par_take_from_overflow_list(size_t num,
8660 OopTaskQueue* work_q) {
8661 assert(work_q->size() == 0, "First empty local work queue");
8662 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8663 if (_overflow_list == NULL) {
8664 return false;
8665 }
8666 // Grab the entire list; we'll put back a suffix
8667 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8668 Thread* tid = Thread::current();
8669 size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
8670 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8671 // If the list is busy, we spin for a short while,
8672 // sleeping between attempts to get the list.
8673 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8674 os::sleep(tid, sleep_time_millis, false);
8675 if (_overflow_list == NULL) {
8676 // Nothing left to take
8677 return false;
8678 } else if (_overflow_list != BUSY) {
8679 // Try and grab the prefix
8680 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8681 }
8682 }
8683 // If the list was found to be empty, or we spun long
8684 // enough, we give up and return empty-handed. If we leave
8685 // the list in the BUSY state below, it must be the case that
8686 // some other thread holds the overflow list and will set it
8687 // to a non-BUSY state in the future.
8688 if (prefix == NULL || prefix == BUSY) {
8689 // Nothing to take or waited long enough
8690 if (prefix == NULL) {
8691 // Write back the NULL in case we overwrote it with BUSY above
8692 // and it is still the same value.
8693 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8694 }
8695 return false;
8696 }
8697 assert(prefix != NULL && prefix != BUSY, "Error");
8698 size_t i = num;
8699 oop cur = prefix;
8700 // Walk down the first "num" objects, unless we reach the end.
8701 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8702 if (cur->mark() == NULL) {
8703 // We have "num" or fewer elements in the list, so there
8704 // is nothing to return to the global list.
8705 // Write back the NULL in lieu of the BUSY we wrote
8706 // above, if it is still the same value.
8707 if (_overflow_list == BUSY) {
8708 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8709 }
8710 } else {
8711 // Chop off the suffix and rerturn it to the global list.
8712 assert(cur->mark() != BUSY, "Error");
8713 oop suffix_head = cur->mark(); // suffix will be put back on global list
8714 cur->set_mark(NULL); // break off suffix
8715 // It's possible that the list is still in the empty(busy) state
8716 // we left it in a short while ago; in that case we may be
8717 // able to place back the suffix without incurring the cost
8718 // of a walk down the list.
8719 oop observed_overflow_list = _overflow_list;
8720 oop cur_overflow_list = observed_overflow_list;
8721 bool attached = false;
8722 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8723 observed_overflow_list =
8724 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8725 if (cur_overflow_list == observed_overflow_list) {
8726 attached = true;
8727 break;
8728 } else cur_overflow_list = observed_overflow_list;
8729 }
8730 if (!attached) {
8731 // Too bad, someone else sneaked in (at least) an element; we'll need
8732 // to do a splice. Find tail of suffix so we can prepend suffix to global
8733 // list.
8734 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8735 oop suffix_tail = cur;
8736 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8737 "Tautology");
8738 observed_overflow_list = _overflow_list;
8739 do {
8740 cur_overflow_list = observed_overflow_list;
8741 if (cur_overflow_list != BUSY) {
8742 // Do the splice ...
8743 suffix_tail->set_mark(markOop(cur_overflow_list));
8744 } else { // cur_overflow_list == BUSY
8745 suffix_tail->set_mark(NULL);
8746 }
8747 // ... and try to place spliced list back on overflow_list ...
8748 observed_overflow_list =
8749 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8750 } while (cur_overflow_list != observed_overflow_list);
8751 // ... until we have succeeded in doing so.
8752 }
8753 }
8755 // Push the prefix elements on work_q
8756 assert(prefix != NULL, "control point invariant");
8757 const markOop proto = markOopDesc::prototype();
8758 oop next;
8759 NOT_PRODUCT(ssize_t n = 0;)
8760 for (cur = prefix; cur != NULL; cur = next) {
8761 next = oop(cur->mark());
8762 cur->set_mark(proto); // until proven otherwise
8763 assert(cur->is_oop(), "Should be an oop");
8764 bool res = work_q->push(cur);
8765 assert(res, "Bit off more than we can chew?");
8766 NOT_PRODUCT(n++;)
8767 }
8768 #ifndef PRODUCT
8769 assert(_num_par_pushes >= n, "Too many pops?");
8770 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8771 #endif
8772 return true;
8773 }
8775 // Single-threaded
8776 void CMSCollector::push_on_overflow_list(oop p) {
8777 NOT_PRODUCT(_num_par_pushes++;)
8778 assert(p->is_oop(), "Not an oop");
8779 preserve_mark_if_necessary(p);
8780 p->set_mark((markOop)_overflow_list);
8781 _overflow_list = p;
8782 }
8784 // Multi-threaded; use CAS to prepend to overflow list
8785 void CMSCollector::par_push_on_overflow_list(oop p) {
8786 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8787 assert(p->is_oop(), "Not an oop");
8788 par_preserve_mark_if_necessary(p);
8789 oop observed_overflow_list = _overflow_list;
8790 oop cur_overflow_list;
8791 do {
8792 cur_overflow_list = observed_overflow_list;
8793 if (cur_overflow_list != BUSY) {
8794 p->set_mark(markOop(cur_overflow_list));
8795 } else {
8796 p->set_mark(NULL);
8797 }
8798 observed_overflow_list =
8799 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8800 } while (cur_overflow_list != observed_overflow_list);
8801 }
8802 #undef BUSY
8804 // Single threaded
8805 // General Note on GrowableArray: pushes may silently fail
8806 // because we are (temporarily) out of C-heap for expanding
8807 // the stack. The problem is quite ubiquitous and affects
8808 // a lot of code in the JVM. The prudent thing for GrowableArray
8809 // to do (for now) is to exit with an error. However, that may
8810 // be too draconian in some cases because the caller may be
8811 // able to recover without much harm. For such cases, we
8812 // should probably introduce a "soft_push" method which returns
8813 // an indication of success or failure with the assumption that
8814 // the caller may be able to recover from a failure; code in
8815 // the VM can then be changed, incrementally, to deal with such
8816 // failures where possible, thus, incrementally hardening the VM
8817 // in such low resource situations.
8818 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8819 if (_preserved_oop_stack == NULL) {
8820 assert(_preserved_mark_stack == NULL,
8821 "bijection with preserved_oop_stack");
8822 // Allocate the stacks
8823 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8824 GrowableArray<oop>(PreserveMarkStackSize, true);
8825 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8826 GrowableArray<markOop>(PreserveMarkStackSize, true);
8827 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8828 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8829 "Preserved Mark/Oop Stack for CMS (C-heap)");
8830 }
8831 }
8832 _preserved_oop_stack->push(p);
8833 _preserved_mark_stack->push(m);
8834 assert(m == p->mark(), "Mark word changed");
8835 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8836 "bijection");
8837 }
8839 // Single threaded
8840 void CMSCollector::preserve_mark_if_necessary(oop p) {
8841 markOop m = p->mark();
8842 if (m->must_be_preserved(p)) {
8843 preserve_mark_work(p, m);
8844 }
8845 }
8847 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8848 markOop m = p->mark();
8849 if (m->must_be_preserved(p)) {
8850 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8851 // Even though we read the mark word without holding
8852 // the lock, we are assured that it will not change
8853 // because we "own" this oop, so no other thread can
8854 // be trying to push it on the overflow list; see
8855 // the assertion in preserve_mark_work() that checks
8856 // that m == p->mark().
8857 preserve_mark_work(p, m);
8858 }
8859 }
8861 // We should be able to do this multi-threaded,
8862 // a chunk of stack being a task (this is
8863 // correct because each oop only ever appears
8864 // once in the overflow list. However, it's
8865 // not very easy to completely overlap this with
8866 // other operations, so will generally not be done
8867 // until all work's been completed. Because we
8868 // expect the preserved oop stack (set) to be small,
8869 // it's probably fine to do this single-threaded.
8870 // We can explore cleverer concurrent/overlapped/parallel
8871 // processing of preserved marks if we feel the
8872 // need for this in the future. Stack overflow should
8873 // be so rare in practice and, when it happens, its
8874 // effect on performance so great that this will
8875 // likely just be in the noise anyway.
8876 void CMSCollector::restore_preserved_marks_if_any() {
8877 if (_preserved_oop_stack == NULL) {
8878 assert(_preserved_mark_stack == NULL,
8879 "bijection with preserved_oop_stack");
8880 return;
8881 }
8883 assert(SafepointSynchronize::is_at_safepoint(),
8884 "world should be stopped");
8885 assert(Thread::current()->is_ConcurrentGC_thread() ||
8886 Thread::current()->is_VM_thread(),
8887 "should be single-threaded");
8889 int length = _preserved_oop_stack->length();
8890 assert(_preserved_mark_stack->length() == length, "bijection");
8891 for (int i = 0; i < length; i++) {
8892 oop p = _preserved_oop_stack->at(i);
8893 assert(p->is_oop(), "Should be an oop");
8894 assert(_span.contains(p), "oop should be in _span");
8895 assert(p->mark() == markOopDesc::prototype(),
8896 "Set when taken from overflow list");
8897 markOop m = _preserved_mark_stack->at(i);
8898 p->set_mark(m);
8899 }
8900 _preserved_mark_stack->clear();
8901 _preserved_oop_stack->clear();
8902 assert(_preserved_mark_stack->is_empty() &&
8903 _preserved_oop_stack->is_empty(),
8904 "stacks were cleared above");
8905 }
8907 #ifndef PRODUCT
8908 bool CMSCollector::no_preserved_marks() const {
8909 return ( ( _preserved_mark_stack == NULL
8910 && _preserved_oop_stack == NULL)
8911 || ( _preserved_mark_stack->is_empty()
8912 && _preserved_oop_stack->is_empty()));
8913 }
8914 #endif
8916 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8917 {
8918 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8919 CMSAdaptiveSizePolicy* size_policy =
8920 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8921 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8922 "Wrong type for size policy");
8923 return size_policy;
8924 }
8926 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8927 size_t desired_promo_size) {
8928 if (cur_promo_size < desired_promo_size) {
8929 size_t expand_bytes = desired_promo_size - cur_promo_size;
8930 if (PrintAdaptiveSizePolicy && Verbose) {
8931 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8932 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8933 expand_bytes);
8934 }
8935 expand(expand_bytes,
8936 MinHeapDeltaBytes,
8937 CMSExpansionCause::_adaptive_size_policy);
8938 } else if (desired_promo_size < cur_promo_size) {
8939 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8940 if (PrintAdaptiveSizePolicy && Verbose) {
8941 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8942 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8943 shrink_bytes);
8944 }
8945 shrink(shrink_bytes);
8946 }
8947 }
8949 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8950 GenCollectedHeap* gch = GenCollectedHeap::heap();
8951 CMSGCAdaptivePolicyCounters* counters =
8952 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8953 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8954 "Wrong kind of counters");
8955 return counters;
8956 }
8959 void ASConcurrentMarkSweepGeneration::update_counters() {
8960 if (UsePerfData) {
8961 _space_counters->update_all();
8962 _gen_counters->update_all();
8963 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8964 GenCollectedHeap* gch = GenCollectedHeap::heap();
8965 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8966 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8967 "Wrong gc statistics type");
8968 counters->update_counters(gc_stats_l);
8969 }
8970 }
8972 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8973 if (UsePerfData) {
8974 _space_counters->update_used(used);
8975 _space_counters->update_capacity();
8976 _gen_counters->update_all();
8978 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8979 GenCollectedHeap* gch = GenCollectedHeap::heap();
8980 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8981 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8982 "Wrong gc statistics type");
8983 counters->update_counters(gc_stats_l);
8984 }
8985 }
8987 // The desired expansion delta is computed so that:
8988 // . desired free percentage or greater is used
8989 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8990 assert_locked_or_safepoint(Heap_lock);
8992 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8994 // If incremental collection failed, we just want to expand
8995 // to the limit.
8996 if (incremental_collection_failed()) {
8997 clear_incremental_collection_failed();
8998 grow_to_reserved();
8999 return;
9000 }
9002 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
9004 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
9005 "Wrong type of heap");
9006 int prev_level = level() - 1;
9007 assert(prev_level >= 0, "The cms generation is the lowest generation");
9008 Generation* prev_gen = gch->get_gen(prev_level);
9009 assert(prev_gen->kind() == Generation::ASParNew,
9010 "Wrong type of young generation");
9011 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
9012 size_t cur_eden = younger_gen->eden()->capacity();
9013 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
9014 size_t cur_promo = free();
9015 size_policy->compute_tenured_generation_free_space(cur_promo,
9016 max_available(),
9017 cur_eden);
9018 resize(cur_promo, size_policy->promo_size());
9020 // Record the new size of the space in the cms generation
9021 // that is available for promotions. This is temporary.
9022 // It should be the desired promo size.
9023 size_policy->avg_cms_promo()->sample(free());
9024 size_policy->avg_old_live()->sample(used());
9026 if (UsePerfData) {
9027 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9028 counters->update_cms_capacity_counter(capacity());
9029 }
9030 }
9032 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9033 assert_locked_or_safepoint(Heap_lock);
9034 assert_lock_strong(freelistLock());
9035 HeapWord* old_end = _cmsSpace->end();
9036 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9037 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9038 FreeChunk* chunk_at_end = find_chunk_at_end();
9039 if (chunk_at_end == NULL) {
9040 // No room to shrink
9041 if (PrintGCDetails && Verbose) {
9042 gclog_or_tty->print_cr("No room to shrink: old_end "
9043 PTR_FORMAT " unallocated_start " PTR_FORMAT
9044 " chunk_at_end " PTR_FORMAT,
9045 old_end, unallocated_start, chunk_at_end);
9046 }
9047 return;
9048 } else {
9050 // Find the chunk at the end of the space and determine
9051 // how much it can be shrunk.
9052 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9053 size_t aligned_shrinkable_size_in_bytes =
9054 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9055 assert(unallocated_start <= chunk_at_end->end(),
9056 "Inconsistent chunk at end of space");
9057 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9058 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9060 // Shrink the underlying space
9061 _virtual_space.shrink_by(bytes);
9062 if (PrintGCDetails && Verbose) {
9063 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9064 " desired_bytes " SIZE_FORMAT
9065 " shrinkable_size_in_bytes " SIZE_FORMAT
9066 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9067 " bytes " SIZE_FORMAT,
9068 desired_bytes, shrinkable_size_in_bytes,
9069 aligned_shrinkable_size_in_bytes, bytes);
9070 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9071 " unallocated_start " SIZE_FORMAT,
9072 old_end, unallocated_start);
9073 }
9075 // If the space did shrink (shrinking is not guaranteed),
9076 // shrink the chunk at the end by the appropriate amount.
9077 if (((HeapWord*)_virtual_space.high()) < old_end) {
9078 size_t new_word_size =
9079 heap_word_size(_virtual_space.committed_size());
9081 // Have to remove the chunk from the dictionary because it is changing
9082 // size and might be someplace elsewhere in the dictionary.
9084 // Get the chunk at end, shrink it, and put it
9085 // back.
9086 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9087 size_t word_size_change = word_size_before - new_word_size;
9088 size_t chunk_at_end_old_size = chunk_at_end->size();
9089 assert(chunk_at_end_old_size >= word_size_change,
9090 "Shrink is too large");
9091 chunk_at_end->setSize(chunk_at_end_old_size -
9092 word_size_change);
9093 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9094 word_size_change);
9096 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9098 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9099 _bts->resize(new_word_size); // resize the block offset shared array
9100 Universe::heap()->barrier_set()->resize_covered_region(mr);
9101 _cmsSpace->assert_locked();
9102 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9104 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9106 // update the space and generation capacity counters
9107 if (UsePerfData) {
9108 _space_counters->update_capacity();
9109 _gen_counters->update_all();
9110 }
9112 if (Verbose && PrintGCDetails) {
9113 size_t new_mem_size = _virtual_space.committed_size();
9114 size_t old_mem_size = new_mem_size + bytes;
9115 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9116 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9117 }
9118 }
9120 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9121 "Inconsistency at end of space");
9122 assert(chunk_at_end->end() == _cmsSpace->end(),
9123 "Shrinking is inconsistent");
9124 return;
9125 }
9126 }
9128 // Transfer some number of overflown objects to usual marking
9129 // stack. Return true if some objects were transferred.
9130 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9131 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9132 (size_t)ParGCDesiredObjsFromOverflowList);
9134 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9135 assert(_collector->overflow_list_is_empty() || res,
9136 "If list is not empty, we should have taken something");
9137 assert(!res || !_mark_stack->isEmpty(),
9138 "If we took something, it should now be on our stack");
9139 return res;
9140 }
9142 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9143 size_t res = _sp->block_size_no_stall(addr, _collector);
9144 assert(res != 0, "Should always be able to compute a size");
9145 if (_sp->block_is_obj(addr)) {
9146 if (_live_bit_map->isMarked(addr)) {
9147 // It can't have been dead in a previous cycle
9148 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9149 } else {
9150 _dead_bit_map->mark(addr); // mark the dead object
9151 }
9152 }
9153 return res;
9154 }