Tue, 26 Aug 2008 14:54:48 -0700
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
Summary: Fixed CMSConcMarkingTask::reset() to store the restart address upon a marking stack overflow and to use it as the base, suitably aligned, for restarting the scan in CMSConcMarkingTask::do_scan_and_mark().
Reviewed-by: jcoomes, tonyp
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(UseCompressedOops ||
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
195 "Offset of FreeChunk::_prev within FreeChunk must match"
196 " that of OopDesc::_klass within OopDesc");
197 )
198 if (ParallelGCThreads > 0) {
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
200 _par_gc_thread_states =
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
202 if (_par_gc_thread_states == NULL) {
203 vm_exit_during_initialization("Could not allocate par gc structs");
204 }
205 for (uint i = 0; i < ParallelGCThreads; i++) {
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
207 if (_par_gc_thread_states[i] == NULL) {
208 vm_exit_during_initialization("Could not allocate par gc structs");
209 }
210 }
211 } else {
212 _par_gc_thread_states = NULL;
213 }
214 _incremental_collection_failed = false;
215 // The "dilatation_factor" is the expansion that can occur on
216 // account of the fact that the minimum object size in the CMS
217 // generation may be larger than that in, say, a contiguous young
218 // generation.
219 // Ideally, in the calculation below, we'd compute the dilatation
220 // factor as: MinChunkSize/(promoting_gen's min object size)
221 // Since we do not have such a general query interface for the
222 // promoting generation, we'll instead just use the mimimum
223 // object size (which today is a header's worth of space);
224 // note that all arithmetic is in units of HeapWords.
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
226 assert(_dilatation_factor >= 1.0, "from previous assert");
227 }
230 // The field "_initiating_occupancy" represents the occupancy percentage
231 // at which we trigger a new collection cycle. Unless explicitly specified
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
233 // is calculated by:
234 //
235 // Let "f" be MinHeapFreeRatio in
236 //
237 // _intiating_occupancy = 100-f +
238 // f * (CMSTrigger[Perm]Ratio/100)
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
240 //
241 // That is, if we assume the heap is at its desired maximum occupancy at the
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
243 // space be allocated before initiating a new collection cycle.
244 //
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
247 if (io >= 0) {
248 _initiating_occupancy = (double)io / 100.0;
249 } else {
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0;
253 }
254 }
257 void ConcurrentMarkSweepGeneration::ref_processor_init() {
258 assert(collector() != NULL, "no collector");
259 collector()->ref_processor_init();
260 }
262 void CMSCollector::ref_processor_init() {
263 if (_ref_processor == NULL) {
264 // Allocate and initialize a reference processor
265 _ref_processor = ReferenceProcessor::create_ref_processor(
266 _span, // span
267 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
268 _cmsGen->refs_discovery_is_mt(), // mt_discovery
269 &_is_alive_closure,
270 ParallelGCThreads,
271 ParallelRefProcEnabled);
272 // Initialize the _ref_processor field of CMSGen
273 _cmsGen->set_ref_processor(_ref_processor);
275 // Allocate a dummy ref processor for perm gen.
276 ReferenceProcessor* rp2 = new ReferenceProcessor();
277 if (rp2 == NULL) {
278 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
279 }
280 _permGen->set_ref_processor(rp2);
281 }
282 }
284 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
285 GenCollectedHeap* gch = GenCollectedHeap::heap();
286 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
287 "Wrong type of heap");
288 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
289 gch->gen_policy()->size_policy();
290 assert(sp->is_gc_cms_adaptive_size_policy(),
291 "Wrong type of size policy");
292 return sp;
293 }
295 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
296 CMSGCAdaptivePolicyCounters* results =
297 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
298 assert(
299 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
300 "Wrong gc policy counter kind");
301 return results;
302 }
305 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
307 const char* gen_name = "old";
309 // Generation Counters - generation 1, 1 subspace
310 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
312 _space_counters = new GSpaceCounters(gen_name, 0,
313 _virtual_space.reserved_size(),
314 this, _gen_counters);
315 }
317 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
318 _cms_gen(cms_gen)
319 {
320 assert(alpha <= 100, "bad value");
321 _saved_alpha = alpha;
323 // Initialize the alphas to the bootstrap value of 100.
324 _gc0_alpha = _cms_alpha = 100;
326 _cms_begin_time.update();
327 _cms_end_time.update();
329 _gc0_duration = 0.0;
330 _gc0_period = 0.0;
331 _gc0_promoted = 0;
333 _cms_duration = 0.0;
334 _cms_period = 0.0;
335 _cms_allocated = 0;
337 _cms_used_at_gc0_begin = 0;
338 _cms_used_at_gc0_end = 0;
339 _allow_duty_cycle_reduction = false;
340 _valid_bits = 0;
341 _icms_duty_cycle = CMSIncrementalDutyCycle;
342 }
344 // If promotion failure handling is on use
345 // the padded average size of the promotion for each
346 // young generation collection.
347 double CMSStats::time_until_cms_gen_full() const {
348 size_t cms_free = _cms_gen->cmsSpace()->free();
349 GenCollectedHeap* gch = GenCollectedHeap::heap();
350 size_t expected_promotion = gch->get_gen(0)->capacity();
351 if (HandlePromotionFailure) {
352 expected_promotion = MIN2(
353 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
354 expected_promotion);
355 }
356 if (cms_free > expected_promotion) {
357 // Start a cms collection if there isn't enough space to promote
358 // for the next minor collection. Use the padded average as
359 // a safety factor.
360 cms_free -= expected_promotion;
362 // Adjust by the safety factor.
363 double cms_free_dbl = (double)cms_free;
364 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
366 if (PrintGCDetails && Verbose) {
367 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
368 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
369 cms_free, expected_promotion);
370 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
371 cms_free_dbl, cms_consumption_rate() + 1.0);
372 }
373 // Add 1 in case the consumption rate goes to zero.
374 return cms_free_dbl / (cms_consumption_rate() + 1.0);
375 }
376 return 0.0;
377 }
379 // Compare the duration of the cms collection to the
380 // time remaining before the cms generation is empty.
381 // Note that the time from the start of the cms collection
382 // to the start of the cms sweep (less than the total
383 // duration of the cms collection) can be used. This
384 // has been tried and some applications experienced
385 // promotion failures early in execution. This was
386 // possibly because the averages were not accurate
387 // enough at the beginning.
388 double CMSStats::time_until_cms_start() const {
389 // We add "gc0_period" to the "work" calculation
390 // below because this query is done (mostly) at the
391 // end of a scavenge, so we need to conservatively
392 // account for that much possible delay
393 // in the query so as to avoid concurrent mode failures
394 // due to starting the collection just a wee bit too
395 // late.
396 double work = cms_duration() + gc0_period();
397 double deadline = time_until_cms_gen_full();
398 if (work > deadline) {
399 if (Verbose && PrintGCDetails) {
400 gclog_or_tty->print(
401 " CMSCollector: collect because of anticipated promotion "
402 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
403 gc0_period(), time_until_cms_gen_full());
404 }
405 return 0.0;
406 }
407 return work - deadline;
408 }
410 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
411 // amount of change to prevent wild oscillation.
412 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
413 unsigned int new_duty_cycle) {
414 assert(old_duty_cycle <= 100, "bad input value");
415 assert(new_duty_cycle <= 100, "bad input value");
417 // Note: use subtraction with caution since it may underflow (values are
418 // unsigned). Addition is safe since we're in the range 0-100.
419 unsigned int damped_duty_cycle = new_duty_cycle;
420 if (new_duty_cycle < old_duty_cycle) {
421 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
422 if (new_duty_cycle + largest_delta < old_duty_cycle) {
423 damped_duty_cycle = old_duty_cycle - largest_delta;
424 }
425 } else if (new_duty_cycle > old_duty_cycle) {
426 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
427 if (new_duty_cycle > old_duty_cycle + largest_delta) {
428 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
429 }
430 }
431 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
433 if (CMSTraceIncrementalPacing) {
434 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
435 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
436 }
437 return damped_duty_cycle;
438 }
440 unsigned int CMSStats::icms_update_duty_cycle_impl() {
441 assert(CMSIncrementalPacing && valid(),
442 "should be handled in icms_update_duty_cycle()");
444 double cms_time_so_far = cms_timer().seconds();
445 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
446 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
448 // Avoid division by 0.
449 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
450 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
452 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
453 if (new_duty_cycle > _icms_duty_cycle) {
454 // Avoid very small duty cycles (1 or 2); 0 is allowed.
455 if (new_duty_cycle > 2) {
456 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
457 new_duty_cycle);
458 }
459 } else if (_allow_duty_cycle_reduction) {
460 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
461 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
462 // Respect the minimum duty cycle.
463 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
464 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
465 }
467 if (PrintGCDetails || CMSTraceIncrementalPacing) {
468 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
469 }
471 _allow_duty_cycle_reduction = false;
472 return _icms_duty_cycle;
473 }
475 #ifndef PRODUCT
476 void CMSStats::print_on(outputStream *st) const {
477 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
478 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
479 gc0_duration(), gc0_period(), gc0_promoted());
480 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
481 cms_duration(), cms_duration_per_mb(),
482 cms_period(), cms_allocated());
483 st->print(",cms_since_beg=%g,cms_since_end=%g",
484 cms_time_since_begin(), cms_time_since_end());
485 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
486 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
487 if (CMSIncrementalMode) {
488 st->print(",dc=%d", icms_duty_cycle());
489 }
491 if (valid()) {
492 st->print(",promo_rate=%g,cms_alloc_rate=%g",
493 promotion_rate(), cms_allocation_rate());
494 st->print(",cms_consumption_rate=%g,time_until_full=%g",
495 cms_consumption_rate(), time_until_cms_gen_full());
496 }
497 st->print(" ");
498 }
499 #endif // #ifndef PRODUCT
501 CMSCollector::CollectorState CMSCollector::_collectorState =
502 CMSCollector::Idling;
503 bool CMSCollector::_foregroundGCIsActive = false;
504 bool CMSCollector::_foregroundGCShouldWait = false;
506 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
507 ConcurrentMarkSweepGeneration* permGen,
508 CardTableRS* ct,
509 ConcurrentMarkSweepPolicy* cp):
510 _cmsGen(cmsGen),
511 _permGen(permGen),
512 _ct(ct),
513 _ref_processor(NULL), // will be set later
514 _conc_workers(NULL), // may be set later
515 _abort_preclean(false),
516 _start_sampling(false),
517 _between_prologue_and_epilogue(false),
518 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
519 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
520 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
521 -1 /* lock-free */, "No_lock" /* dummy */),
522 _modUnionClosure(&_modUnionTable),
523 _modUnionClosurePar(&_modUnionTable),
524 // Adjust my span to cover old (cms) gen and perm gen
525 _span(cmsGen->reserved()._union(permGen->reserved())),
526 // Construct the is_alive_closure with _span & markBitMap
527 _is_alive_closure(_span, &_markBitMap),
528 _restart_addr(NULL),
529 _overflow_list(NULL),
530 _preserved_oop_stack(NULL),
531 _preserved_mark_stack(NULL),
532 _stats(cmsGen),
533 _eden_chunk_array(NULL), // may be set in ctor body
534 _eden_chunk_capacity(0), // -- ditto --
535 _eden_chunk_index(0), // -- ditto --
536 _survivor_plab_array(NULL), // -- ditto --
537 _survivor_chunk_array(NULL), // -- ditto --
538 _survivor_chunk_capacity(0), // -- ditto --
539 _survivor_chunk_index(0), // -- ditto --
540 _ser_pmc_preclean_ovflw(0),
541 _ser_pmc_remark_ovflw(0),
542 _par_pmc_remark_ovflw(0),
543 _ser_kac_ovflw(0),
544 _par_kac_ovflw(0),
545 #ifndef PRODUCT
546 _num_par_pushes(0),
547 #endif
548 _collection_count_start(0),
549 _verifying(false),
550 _icms_start_limit(NULL),
551 _icms_stop_limit(NULL),
552 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
553 _completed_initialization(false),
554 _collector_policy(cp),
555 _should_unload_classes(false),
556 _concurrent_cycles_since_last_unload(0),
557 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
558 {
559 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
560 ExplicitGCInvokesConcurrent = true;
561 }
562 // Now expand the span and allocate the collection support structures
563 // (MUT, marking bit map etc.) to cover both generations subject to
564 // collection.
566 // First check that _permGen is adjacent to _cmsGen and above it.
567 assert( _cmsGen->reserved().word_size() > 0
568 && _permGen->reserved().word_size() > 0,
569 "generations should not be of zero size");
570 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
571 "_cmsGen and _permGen should not overlap");
572 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
573 "_cmsGen->end() different from _permGen->start()");
575 // For use by dirty card to oop closures.
576 _cmsGen->cmsSpace()->set_collector(this);
577 _permGen->cmsSpace()->set_collector(this);
579 // Allocate MUT and marking bit map
580 {
581 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
582 if (!_markBitMap.allocate(_span)) {
583 warning("Failed to allocate CMS Bit Map");
584 return;
585 }
586 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
587 }
588 {
589 _modUnionTable.allocate(_span);
590 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
591 }
593 if (!_markStack.allocate(CMSMarkStackSize)) {
594 warning("Failed to allocate CMS Marking Stack");
595 return;
596 }
597 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
598 warning("Failed to allocate CMS Revisit Stack");
599 return;
600 }
602 // Support for multi-threaded concurrent phases
603 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
604 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
605 // just for now
606 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
607 }
608 if (ParallelCMSThreads > 1) {
609 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
610 ParallelCMSThreads, true);
611 if (_conc_workers == NULL) {
612 warning("GC/CMS: _conc_workers allocation failure: "
613 "forcing -CMSConcurrentMTEnabled");
614 CMSConcurrentMTEnabled = false;
615 }
616 } else {
617 CMSConcurrentMTEnabled = false;
618 }
619 }
620 if (!CMSConcurrentMTEnabled) {
621 ParallelCMSThreads = 0;
622 } else {
623 // Turn off CMSCleanOnEnter optimization temporarily for
624 // the MT case where it's not fixed yet; see 6178663.
625 CMSCleanOnEnter = false;
626 }
627 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
628 "Inconsistency");
630 // Parallel task queues; these are shared for the
631 // concurrent and stop-world phases of CMS, but
632 // are not shared with parallel scavenge (ParNew).
633 {
634 uint i;
635 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
637 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
638 || ParallelRefProcEnabled)
639 && num_queues > 0) {
640 _task_queues = new OopTaskQueueSet(num_queues);
641 if (_task_queues == NULL) {
642 warning("task_queues allocation failure.");
643 return;
644 }
645 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
646 if (_hash_seed == NULL) {
647 warning("_hash_seed array allocation failure");
648 return;
649 }
651 // XXX use a global constant instead of 64!
652 typedef struct OopTaskQueuePadded {
653 OopTaskQueue work_queue;
654 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
655 } OopTaskQueuePadded;
657 for (i = 0; i < num_queues; i++) {
658 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
659 if (q_padded == NULL) {
660 warning("work_queue allocation failure.");
661 return;
662 }
663 _task_queues->register_queue(i, &q_padded->work_queue);
664 }
665 for (i = 0; i < num_queues; i++) {
666 _task_queues->queue(i)->initialize();
667 _hash_seed[i] = 17; // copied from ParNew
668 }
669 }
670 }
672 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
673 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
675 // Clip CMSBootstrapOccupancy between 0 and 100.
676 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
677 /(double)100;
679 _full_gcs_since_conc_gc = 0;
681 // Now tell CMS generations the identity of their collector
682 ConcurrentMarkSweepGeneration::set_collector(this);
684 // Create & start a CMS thread for this CMS collector
685 _cmsThread = ConcurrentMarkSweepThread::start(this);
686 assert(cmsThread() != NULL, "CMS Thread should have been created");
687 assert(cmsThread()->collector() == this,
688 "CMS Thread should refer to this gen");
689 assert(CGC_lock != NULL, "Where's the CGC_lock?");
691 // Support for parallelizing young gen rescan
692 GenCollectedHeap* gch = GenCollectedHeap::heap();
693 _young_gen = gch->prev_gen(_cmsGen);
694 if (gch->supports_inline_contig_alloc()) {
695 _top_addr = gch->top_addr();
696 _end_addr = gch->end_addr();
697 assert(_young_gen != NULL, "no _young_gen");
698 _eden_chunk_index = 0;
699 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
700 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
701 if (_eden_chunk_array == NULL) {
702 _eden_chunk_capacity = 0;
703 warning("GC/CMS: _eden_chunk_array allocation failure");
704 }
705 }
706 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
708 // Support for parallelizing survivor space rescan
709 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
710 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
711 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
712 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
713 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
714 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
715 || _cursor == NULL) {
716 warning("Failed to allocate survivor plab/chunk array");
717 if (_survivor_plab_array != NULL) {
718 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
719 _survivor_plab_array = NULL;
720 }
721 if (_survivor_chunk_array != NULL) {
722 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
723 _survivor_chunk_array = NULL;
724 }
725 if (_cursor != NULL) {
726 FREE_C_HEAP_ARRAY(size_t, _cursor);
727 _cursor = NULL;
728 }
729 } else {
730 _survivor_chunk_capacity = 2*max_plab_samples;
731 for (uint i = 0; i < ParallelGCThreads; i++) {
732 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
733 if (vec == NULL) {
734 warning("Failed to allocate survivor plab array");
735 for (int j = i; j > 0; j--) {
736 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
737 }
738 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
739 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
740 _survivor_plab_array = NULL;
741 _survivor_chunk_array = NULL;
742 _survivor_chunk_capacity = 0;
743 break;
744 } else {
745 ChunkArray* cur =
746 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
747 max_plab_samples);
748 assert(cur->end() == 0, "Should be 0");
749 assert(cur->array() == vec, "Should be vec");
750 assert(cur->capacity() == max_plab_samples, "Error");
751 }
752 }
753 }
754 }
755 assert( ( _survivor_plab_array != NULL
756 && _survivor_chunk_array != NULL)
757 || ( _survivor_chunk_capacity == 0
758 && _survivor_chunk_index == 0),
759 "Error");
761 // Choose what strong roots should be scanned depending on verification options
762 // and perm gen collection mode.
763 if (!CMSClassUnloadingEnabled) {
764 // If class unloading is disabled we want to include all classes into the root set.
765 add_root_scanning_option(SharedHeap::SO_AllClasses);
766 } else {
767 add_root_scanning_option(SharedHeap::SO_SystemClasses);
768 }
770 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
771 _gc_counters = new CollectorCounters("CMS", 1);
772 _completed_initialization = true;
773 _sweep_timer.start(); // start of time
774 }
776 const char* ConcurrentMarkSweepGeneration::name() const {
777 return "concurrent mark-sweep generation";
778 }
779 void ConcurrentMarkSweepGeneration::update_counters() {
780 if (UsePerfData) {
781 _space_counters->update_all();
782 _gen_counters->update_all();
783 }
784 }
786 // this is an optimized version of update_counters(). it takes the
787 // used value as a parameter rather than computing it.
788 //
789 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
790 if (UsePerfData) {
791 _space_counters->update_used(used);
792 _space_counters->update_capacity();
793 _gen_counters->update_all();
794 }
795 }
797 void ConcurrentMarkSweepGeneration::print() const {
798 Generation::print();
799 cmsSpace()->print();
800 }
802 #ifndef PRODUCT
803 void ConcurrentMarkSweepGeneration::print_statistics() {
804 cmsSpace()->printFLCensus(0);
805 }
806 #endif
808 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
809 GenCollectedHeap* gch = GenCollectedHeap::heap();
810 if (PrintGCDetails) {
811 if (Verbose) {
812 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
813 level(), short_name(), s, used(), capacity());
814 } else {
815 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
816 level(), short_name(), s, used() / K, capacity() / K);
817 }
818 }
819 if (Verbose) {
820 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
821 gch->used(), gch->capacity());
822 } else {
823 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
824 gch->used() / K, gch->capacity() / K);
825 }
826 }
828 size_t
829 ConcurrentMarkSweepGeneration::contiguous_available() const {
830 // dld proposes an improvement in precision here. If the committed
831 // part of the space ends in a free block we should add that to
832 // uncommitted size in the calculation below. Will make this
833 // change later, staying with the approximation below for the
834 // time being. -- ysr.
835 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
836 }
838 size_t
839 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
840 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
841 }
843 size_t ConcurrentMarkSweepGeneration::max_available() const {
844 return free() + _virtual_space.uncommitted_size();
845 }
847 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
848 size_t max_promotion_in_bytes,
849 bool younger_handles_promotion_failure) const {
851 // This is the most conservative test. Full promotion is
852 // guaranteed if this is used. The multiplicative factor is to
853 // account for the worst case "dilatation".
854 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
855 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
856 adjusted_max_promo_bytes = (double)max_uintx;
857 }
858 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
860 if (younger_handles_promotion_failure && !result) {
861 // Full promotion is not guaranteed because fragmentation
862 // of the cms generation can prevent the full promotion.
863 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
865 if (!result) {
866 // With promotion failure handling the test for the ability
867 // to support the promotion does not have to be guaranteed.
868 // Use an average of the amount promoted.
869 result = max_available() >= (size_t)
870 gc_stats()->avg_promoted()->padded_average();
871 if (PrintGC && Verbose && result) {
872 gclog_or_tty->print_cr(
873 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
874 " max_available: " SIZE_FORMAT
875 " avg_promoted: " SIZE_FORMAT,
876 max_available(), (size_t)
877 gc_stats()->avg_promoted()->padded_average());
878 }
879 } else {
880 if (PrintGC && Verbose) {
881 gclog_or_tty->print_cr(
882 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
883 " max_available: " SIZE_FORMAT
884 " adj_max_promo_bytes: " SIZE_FORMAT,
885 max_available(), (size_t)adjusted_max_promo_bytes);
886 }
887 }
888 } else {
889 if (PrintGC && Verbose) {
890 gclog_or_tty->print_cr(
891 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
892 " contiguous_available: " SIZE_FORMAT
893 " adj_max_promo_bytes: " SIZE_FORMAT,
894 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
895 }
896 }
897 return result;
898 }
900 CompactibleSpace*
901 ConcurrentMarkSweepGeneration::first_compaction_space() const {
902 return _cmsSpace;
903 }
905 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
906 // Clear the promotion information. These pointers can be adjusted
907 // along with all the other pointers into the heap but
908 // compaction is expected to be a rare event with
909 // a heap using cms so don't do it without seeing the need.
910 if (ParallelGCThreads > 0) {
911 for (uint i = 0; i < ParallelGCThreads; i++) {
912 _par_gc_thread_states[i]->promo.reset();
913 }
914 }
915 }
917 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
918 blk->do_space(_cmsSpace);
919 }
921 void ConcurrentMarkSweepGeneration::compute_new_size() {
922 assert_locked_or_safepoint(Heap_lock);
924 // If incremental collection failed, we just want to expand
925 // to the limit.
926 if (incremental_collection_failed()) {
927 clear_incremental_collection_failed();
928 grow_to_reserved();
929 return;
930 }
932 size_t expand_bytes = 0;
933 double free_percentage = ((double) free()) / capacity();
934 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
935 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
937 // compute expansion delta needed for reaching desired free percentage
938 if (free_percentage < desired_free_percentage) {
939 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
940 assert(desired_capacity >= capacity(), "invalid expansion size");
941 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
942 }
943 if (expand_bytes > 0) {
944 if (PrintGCDetails && Verbose) {
945 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
946 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
947 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
948 gclog_or_tty->print_cr(" Desired free fraction %f",
949 desired_free_percentage);
950 gclog_or_tty->print_cr(" Maximum free fraction %f",
951 maximum_free_percentage);
952 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
953 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
954 desired_capacity/1000);
955 int prev_level = level() - 1;
956 if (prev_level >= 0) {
957 size_t prev_size = 0;
958 GenCollectedHeap* gch = GenCollectedHeap::heap();
959 Generation* prev_gen = gch->_gens[prev_level];
960 prev_size = prev_gen->capacity();
961 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
962 prev_size/1000);
963 }
964 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
965 unsafe_max_alloc_nogc()/1000);
966 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
967 contiguous_available()/1000);
968 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
969 expand_bytes);
970 }
971 // safe if expansion fails
972 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
973 if (PrintGCDetails && Verbose) {
974 gclog_or_tty->print_cr(" Expanded free fraction %f",
975 ((double) free()) / capacity());
976 }
977 }
978 }
980 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
981 return cmsSpace()->freelistLock();
982 }
984 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
985 bool tlab) {
986 CMSSynchronousYieldRequest yr;
987 MutexLockerEx x(freelistLock(),
988 Mutex::_no_safepoint_check_flag);
989 return have_lock_and_allocate(size, tlab);
990 }
992 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
993 bool tlab) {
994 assert_lock_strong(freelistLock());
995 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
996 HeapWord* res = cmsSpace()->allocate(adjustedSize);
997 // Allocate the object live (grey) if the background collector has
998 // started marking. This is necessary because the marker may
999 // have passed this address and consequently this object will
1000 // not otherwise be greyed and would be incorrectly swept up.
1001 // Note that if this object contains references, the writing
1002 // of those references will dirty the card containing this object
1003 // allowing the object to be blackened (and its references scanned)
1004 // either during a preclean phase or at the final checkpoint.
1005 if (res != NULL) {
1006 collector()->direct_allocated(res, adjustedSize);
1007 _direct_allocated_words += adjustedSize;
1008 // allocation counters
1009 NOT_PRODUCT(
1010 _numObjectsAllocated++;
1011 _numWordsAllocated += (int)adjustedSize;
1012 )
1013 }
1014 return res;
1015 }
1017 // In the case of direct allocation by mutators in a generation that
1018 // is being concurrently collected, the object must be allocated
1019 // live (grey) if the background collector has started marking.
1020 // This is necessary because the marker may
1021 // have passed this address and consequently this object will
1022 // not otherwise be greyed and would be incorrectly swept up.
1023 // Note that if this object contains references, the writing
1024 // of those references will dirty the card containing this object
1025 // allowing the object to be blackened (and its references scanned)
1026 // either during a preclean phase or at the final checkpoint.
1027 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1028 assert(_markBitMap.covers(start, size), "Out of bounds");
1029 if (_collectorState >= Marking) {
1030 MutexLockerEx y(_markBitMap.lock(),
1031 Mutex::_no_safepoint_check_flag);
1032 // [see comments preceding SweepClosure::do_blk() below for details]
1033 // 1. need to mark the object as live so it isn't collected
1034 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1035 // 3. need to mark the end of the object so sweeper can skip over it
1036 // if it's uninitialized when the sweeper reaches it.
1037 _markBitMap.mark(start); // object is live
1038 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1039 _markBitMap.mark(start + size - 1);
1040 // mark end of object
1041 }
1042 // check that oop looks uninitialized
1043 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1044 }
1046 void CMSCollector::promoted(bool par, HeapWord* start,
1047 bool is_obj_array, size_t obj_size) {
1048 assert(_markBitMap.covers(start), "Out of bounds");
1049 // See comment in direct_allocated() about when objects should
1050 // be allocated live.
1051 if (_collectorState >= Marking) {
1052 // we already hold the marking bit map lock, taken in
1053 // the prologue
1054 if (par) {
1055 _markBitMap.par_mark(start);
1056 } else {
1057 _markBitMap.mark(start);
1058 }
1059 // We don't need to mark the object as uninitialized (as
1060 // in direct_allocated above) because this is being done with the
1061 // world stopped and the object will be initialized by the
1062 // time the sweeper gets to look at it.
1063 assert(SafepointSynchronize::is_at_safepoint(),
1064 "expect promotion only at safepoints");
1066 if (_collectorState < Sweeping) {
1067 // Mark the appropriate cards in the modUnionTable, so that
1068 // this object gets scanned before the sweep. If this is
1069 // not done, CMS generation references in the object might
1070 // not get marked.
1071 // For the case of arrays, which are otherwise precisely
1072 // marked, we need to dirty the entire array, not just its head.
1073 if (is_obj_array) {
1074 // The [par_]mark_range() method expects mr.end() below to
1075 // be aligned to the granularity of a bit's representation
1076 // in the heap. In the case of the MUT below, that's a
1077 // card size.
1078 MemRegion mr(start,
1079 (HeapWord*)round_to((intptr_t)(start + obj_size),
1080 CardTableModRefBS::card_size /* bytes */));
1081 if (par) {
1082 _modUnionTable.par_mark_range(mr);
1083 } else {
1084 _modUnionTable.mark_range(mr);
1085 }
1086 } else { // not an obj array; we can just mark the head
1087 if (par) {
1088 _modUnionTable.par_mark(start);
1089 } else {
1090 _modUnionTable.mark(start);
1091 }
1092 }
1093 }
1094 }
1095 }
1097 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1098 {
1099 size_t delta = pointer_delta(addr, space->bottom());
1100 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1101 }
1103 void CMSCollector::icms_update_allocation_limits()
1104 {
1105 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1106 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1108 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1109 if (CMSTraceIncrementalPacing) {
1110 stats().print();
1111 }
1113 assert(duty_cycle <= 100, "invalid duty cycle");
1114 if (duty_cycle != 0) {
1115 // The duty_cycle is a percentage between 0 and 100; convert to words and
1116 // then compute the offset from the endpoints of the space.
1117 size_t free_words = eden->free() / HeapWordSize;
1118 double free_words_dbl = (double)free_words;
1119 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1120 size_t offset_words = (free_words - duty_cycle_words) / 2;
1122 _icms_start_limit = eden->top() + offset_words;
1123 _icms_stop_limit = eden->end() - offset_words;
1125 // The limits may be adjusted (shifted to the right) by
1126 // CMSIncrementalOffset, to allow the application more mutator time after a
1127 // young gen gc (when all mutators were stopped) and before CMS starts and
1128 // takes away one or more cpus.
1129 if (CMSIncrementalOffset != 0) {
1130 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1131 size_t adjustment = (size_t)adjustment_dbl;
1132 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1133 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1134 _icms_start_limit += adjustment;
1135 _icms_stop_limit = tmp_stop;
1136 }
1137 }
1138 }
1139 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1140 _icms_start_limit = _icms_stop_limit = eden->end();
1141 }
1143 // Install the new start limit.
1144 eden->set_soft_end(_icms_start_limit);
1146 if (CMSTraceIncrementalMode) {
1147 gclog_or_tty->print(" icms alloc limits: "
1148 PTR_FORMAT "," PTR_FORMAT
1149 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1150 _icms_start_limit, _icms_stop_limit,
1151 percent_of_space(eden, _icms_start_limit),
1152 percent_of_space(eden, _icms_stop_limit));
1153 if (Verbose) {
1154 gclog_or_tty->print("eden: ");
1155 eden->print_on(gclog_or_tty);
1156 }
1157 }
1158 }
1160 // Any changes here should try to maintain the invariant
1161 // that if this method is called with _icms_start_limit
1162 // and _icms_stop_limit both NULL, then it should return NULL
1163 // and not notify the icms thread.
1164 HeapWord*
1165 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1166 size_t word_size)
1167 {
1168 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1169 // nop.
1170 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1171 if (top <= _icms_start_limit) {
1172 if (CMSTraceIncrementalMode) {
1173 space->print_on(gclog_or_tty);
1174 gclog_or_tty->stamp();
1175 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1176 ", new limit=" PTR_FORMAT
1177 " (" SIZE_FORMAT "%%)",
1178 top, _icms_stop_limit,
1179 percent_of_space(space, _icms_stop_limit));
1180 }
1181 ConcurrentMarkSweepThread::start_icms();
1182 assert(top < _icms_stop_limit, "Tautology");
1183 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1184 return _icms_stop_limit;
1185 }
1187 // The allocation will cross both the _start and _stop limits, so do the
1188 // stop notification also and return end().
1189 if (CMSTraceIncrementalMode) {
1190 space->print_on(gclog_or_tty);
1191 gclog_or_tty->stamp();
1192 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1193 ", new limit=" PTR_FORMAT
1194 " (" SIZE_FORMAT "%%)",
1195 top, space->end(),
1196 percent_of_space(space, space->end()));
1197 }
1198 ConcurrentMarkSweepThread::stop_icms();
1199 return space->end();
1200 }
1202 if (top <= _icms_stop_limit) {
1203 if (CMSTraceIncrementalMode) {
1204 space->print_on(gclog_or_tty);
1205 gclog_or_tty->stamp();
1206 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1207 ", new limit=" PTR_FORMAT
1208 " (" SIZE_FORMAT "%%)",
1209 top, space->end(),
1210 percent_of_space(space, space->end()));
1211 }
1212 ConcurrentMarkSweepThread::stop_icms();
1213 return space->end();
1214 }
1216 if (CMSTraceIncrementalMode) {
1217 space->print_on(gclog_or_tty);
1218 gclog_or_tty->stamp();
1219 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1220 ", new limit=" PTR_FORMAT,
1221 top, NULL);
1222 }
1223 }
1225 return NULL;
1226 }
1228 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1229 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1230 // allocate, copy and if necessary update promoinfo --
1231 // delegate to underlying space.
1232 assert_lock_strong(freelistLock());
1234 #ifndef PRODUCT
1235 if (Universe::heap()->promotion_should_fail()) {
1236 return NULL;
1237 }
1238 #endif // #ifndef PRODUCT
1240 oop res = _cmsSpace->promote(obj, obj_size);
1241 if (res == NULL) {
1242 // expand and retry
1243 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1244 expand(s*HeapWordSize, MinHeapDeltaBytes,
1245 CMSExpansionCause::_satisfy_promotion);
1246 // Since there's currently no next generation, we don't try to promote
1247 // into a more senior generation.
1248 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1249 "is made to pass on a possibly failing "
1250 "promotion to next generation");
1251 res = _cmsSpace->promote(obj, obj_size);
1252 }
1253 if (res != NULL) {
1254 // See comment in allocate() about when objects should
1255 // be allocated live.
1256 assert(obj->is_oop(), "Will dereference klass pointer below");
1257 collector()->promoted(false, // Not parallel
1258 (HeapWord*)res, obj->is_objArray(), obj_size);
1259 // promotion counters
1260 NOT_PRODUCT(
1261 _numObjectsPromoted++;
1262 _numWordsPromoted +=
1263 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1264 )
1265 }
1266 return res;
1267 }
1270 HeapWord*
1271 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1272 HeapWord* top,
1273 size_t word_sz)
1274 {
1275 return collector()->allocation_limit_reached(space, top, word_sz);
1276 }
1278 // Things to support parallel young-gen collection.
1279 oop
1280 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1281 oop old, markOop m,
1282 size_t word_sz) {
1283 #ifndef PRODUCT
1284 if (Universe::heap()->promotion_should_fail()) {
1285 return NULL;
1286 }
1287 #endif // #ifndef PRODUCT
1289 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1290 PromotionInfo* promoInfo = &ps->promo;
1291 // if we are tracking promotions, then first ensure space for
1292 // promotion (including spooling space for saving header if necessary).
1293 // then allocate and copy, then track promoted info if needed.
1294 // When tracking (see PromotionInfo::track()), the mark word may
1295 // be displaced and in this case restoration of the mark word
1296 // occurs in the (oop_since_save_marks_)iterate phase.
1297 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1298 // Out of space for allocating spooling buffers;
1299 // try expanding and allocating spooling buffers.
1300 if (!expand_and_ensure_spooling_space(promoInfo)) {
1301 return NULL;
1302 }
1303 }
1304 assert(promoInfo->has_spooling_space(), "Control point invariant");
1305 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1306 if (obj_ptr == NULL) {
1307 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1308 if (obj_ptr == NULL) {
1309 return NULL;
1310 }
1311 }
1312 oop obj = oop(obj_ptr);
1313 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1314 // Otherwise, copy the object. Here we must be careful to insert the
1315 // klass pointer last, since this marks the block as an allocated object.
1316 // Except with compressed oops it's the mark word.
1317 HeapWord* old_ptr = (HeapWord*)old;
1318 if (word_sz > (size_t)oopDesc::header_size()) {
1319 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1320 obj_ptr + oopDesc::header_size(),
1321 word_sz - oopDesc::header_size());
1322 }
1324 if (UseCompressedOops) {
1325 // Copy gap missed by (aligned) header size calculation above
1326 obj->set_klass_gap(old->klass_gap());
1327 }
1329 // Restore the mark word copied above.
1330 obj->set_mark(m);
1332 // Now we can track the promoted object, if necessary. We take care
1333 // To delay the transition from uninitialized to full object
1334 // (i.e., insertion of klass pointer) until after, so that it
1335 // atomically becomes a promoted object.
1336 if (promoInfo->tracking()) {
1337 promoInfo->track((PromotedObject*)obj, old->klass());
1338 }
1340 // Finally, install the klass pointer (this should be volatile).
1341 obj->set_klass(old->klass());
1343 assert(old->is_oop(), "Will dereference klass ptr below");
1344 collector()->promoted(true, // parallel
1345 obj_ptr, old->is_objArray(), word_sz);
1347 NOT_PRODUCT(
1348 Atomic::inc(&_numObjectsPromoted);
1349 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1350 &_numWordsPromoted);
1351 )
1353 return obj;
1354 }
1356 void
1357 ConcurrentMarkSweepGeneration::
1358 par_promote_alloc_undo(int thread_num,
1359 HeapWord* obj, size_t word_sz) {
1360 // CMS does not support promotion undo.
1361 ShouldNotReachHere();
1362 }
1364 void
1365 ConcurrentMarkSweepGeneration::
1366 par_promote_alloc_done(int thread_num) {
1367 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1368 ps->lab.retire();
1369 #if CFLS_LAB_REFILL_STATS
1370 if (thread_num == 0) {
1371 _cmsSpace->print_par_alloc_stats();
1372 }
1373 #endif
1374 }
1376 void
1377 ConcurrentMarkSweepGeneration::
1378 par_oop_since_save_marks_iterate_done(int thread_num) {
1379 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1380 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1381 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1382 }
1384 // XXXPERM
1385 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1386 size_t size,
1387 bool tlab)
1388 {
1389 // We allow a STW collection only if a full
1390 // collection was requested.
1391 return full || should_allocate(size, tlab); // FIX ME !!!
1392 // This and promotion failure handling are connected at the
1393 // hip and should be fixed by untying them.
1394 }
1396 bool CMSCollector::shouldConcurrentCollect() {
1397 if (_full_gc_requested) {
1398 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1399 if (Verbose && PrintGCDetails) {
1400 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1401 " gc request");
1402 }
1403 return true;
1404 }
1406 // For debugging purposes, change the type of collection.
1407 // If the rotation is not on the concurrent collection
1408 // type, don't start a concurrent collection.
1409 NOT_PRODUCT(
1410 if (RotateCMSCollectionTypes &&
1411 (_cmsGen->debug_collection_type() !=
1412 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1413 assert(_cmsGen->debug_collection_type() !=
1414 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1415 "Bad cms collection type");
1416 return false;
1417 }
1418 )
1420 FreelistLocker x(this);
1421 // ------------------------------------------------------------------
1422 // Print out lots of information which affects the initiation of
1423 // a collection.
1424 if (PrintCMSInitiationStatistics && stats().valid()) {
1425 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1426 gclog_or_tty->stamp();
1427 gclog_or_tty->print_cr("");
1428 stats().print_on(gclog_or_tty);
1429 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1430 stats().time_until_cms_gen_full());
1431 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1432 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1433 _cmsGen->contiguous_available());
1434 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1435 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1436 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1437 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1438 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1439 }
1440 // ------------------------------------------------------------------
1442 // If the estimated time to complete a cms collection (cms_duration())
1443 // is less than the estimated time remaining until the cms generation
1444 // is full, start a collection.
1445 if (!UseCMSInitiatingOccupancyOnly) {
1446 if (stats().valid()) {
1447 if (stats().time_until_cms_start() == 0.0) {
1448 return true;
1449 }
1450 } else {
1451 // We want to conservatively collect somewhat early in order
1452 // to try and "bootstrap" our CMS/promotion statistics;
1453 // this branch will not fire after the first successful CMS
1454 // collection because the stats should then be valid.
1455 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1456 if (Verbose && PrintGCDetails) {
1457 gclog_or_tty->print_cr(
1458 " CMSCollector: collect for bootstrapping statistics:"
1459 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1460 _bootstrap_occupancy);
1461 }
1462 return true;
1463 }
1464 }
1465 }
1467 // Otherwise, we start a collection cycle if either the perm gen or
1468 // old gen want a collection cycle started. Each may use
1469 // an appropriate criterion for making this decision.
1470 // XXX We need to make sure that the gen expansion
1471 // criterion dovetails well with this. XXX NEED TO FIX THIS
1472 if (_cmsGen->should_concurrent_collect()) {
1473 if (Verbose && PrintGCDetails) {
1474 gclog_or_tty->print_cr("CMS old gen initiated");
1475 }
1476 return true;
1477 }
1479 // We start a collection if we believe an incremental collection may fail;
1480 // this is not likely to be productive in practice because it's probably too
1481 // late anyway.
1482 GenCollectedHeap* gch = GenCollectedHeap::heap();
1483 assert(gch->collector_policy()->is_two_generation_policy(),
1484 "You may want to check the correctness of the following");
1485 if (gch->incremental_collection_will_fail()) {
1486 if (PrintGCDetails && Verbose) {
1487 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1488 }
1489 return true;
1490 }
1492 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1493 bool res = update_should_unload_classes();
1494 if (res) {
1495 if (Verbose && PrintGCDetails) {
1496 gclog_or_tty->print_cr("CMS perm gen initiated");
1497 }
1498 return true;
1499 }
1500 }
1501 return false;
1502 }
1504 // Clear _expansion_cause fields of constituent generations
1505 void CMSCollector::clear_expansion_cause() {
1506 _cmsGen->clear_expansion_cause();
1507 _permGen->clear_expansion_cause();
1508 }
1510 // We should be conservative in starting a collection cycle. To
1511 // start too eagerly runs the risk of collecting too often in the
1512 // extreme. To collect too rarely falls back on full collections,
1513 // which works, even if not optimum in terms of concurrent work.
1514 // As a work around for too eagerly collecting, use the flag
1515 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1516 // giving the user an easily understandable way of controlling the
1517 // collections.
1518 // We want to start a new collection cycle if any of the following
1519 // conditions hold:
1520 // . our current occupancy exceeds the configured initiating occupancy
1521 // for this generation, or
1522 // . we recently needed to expand this space and have not, since that
1523 // expansion, done a collection of this generation, or
1524 // . the underlying space believes that it may be a good idea to initiate
1525 // a concurrent collection (this may be based on criteria such as the
1526 // following: the space uses linear allocation and linear allocation is
1527 // going to fail, or there is believed to be excessive fragmentation in
1528 // the generation, etc... or ...
1529 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1530 // the case of the old generation, not the perm generation; see CR 6543076):
1531 // we may be approaching a point at which allocation requests may fail because
1532 // we will be out of sufficient free space given allocation rate estimates.]
1533 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1535 assert_lock_strong(freelistLock());
1536 if (occupancy() > initiating_occupancy()) {
1537 if (PrintGCDetails && Verbose) {
1538 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1539 short_name(), occupancy(), initiating_occupancy());
1540 }
1541 return true;
1542 }
1543 if (UseCMSInitiatingOccupancyOnly) {
1544 return false;
1545 }
1546 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1547 if (PrintGCDetails && Verbose) {
1548 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1549 short_name());
1550 }
1551 return true;
1552 }
1553 if (_cmsSpace->should_concurrent_collect()) {
1554 if (PrintGCDetails && Verbose) {
1555 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1556 short_name());
1557 }
1558 return true;
1559 }
1560 return false;
1561 }
1563 void ConcurrentMarkSweepGeneration::collect(bool full,
1564 bool clear_all_soft_refs,
1565 size_t size,
1566 bool tlab)
1567 {
1568 collector()->collect(full, clear_all_soft_refs, size, tlab);
1569 }
1571 void CMSCollector::collect(bool full,
1572 bool clear_all_soft_refs,
1573 size_t size,
1574 bool tlab)
1575 {
1576 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1577 // For debugging purposes skip the collection if the state
1578 // is not currently idle
1579 if (TraceCMSState) {
1580 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1581 Thread::current(), full, _collectorState);
1582 }
1583 return;
1584 }
1586 // The following "if" branch is present for defensive reasons.
1587 // In the current uses of this interface, it can be replaced with:
1588 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1589 // But I am not placing that assert here to allow future
1590 // generality in invoking this interface.
1591 if (GC_locker::is_active()) {
1592 // A consistency test for GC_locker
1593 assert(GC_locker::needs_gc(), "Should have been set already");
1594 // Skip this foreground collection, instead
1595 // expanding the heap if necessary.
1596 // Need the free list locks for the call to free() in compute_new_size()
1597 compute_new_size();
1598 return;
1599 }
1600 acquire_control_and_collect(full, clear_all_soft_refs);
1601 _full_gcs_since_conc_gc++;
1603 }
1605 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1606 GenCollectedHeap* gch = GenCollectedHeap::heap();
1607 unsigned int gc_count = gch->total_full_collections();
1608 if (gc_count == full_gc_count) {
1609 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1610 _full_gc_requested = true;
1611 CGC_lock->notify(); // nudge CMS thread
1612 }
1613 }
1616 // The foreground and background collectors need to coordinate in order
1617 // to make sure that they do not mutually interfere with CMS collections.
1618 // When a background collection is active,
1619 // the foreground collector may need to take over (preempt) and
1620 // synchronously complete an ongoing collection. Depending on the
1621 // frequency of the background collections and the heap usage
1622 // of the application, this preemption can be seldom or frequent.
1623 // There are only certain
1624 // points in the background collection that the "collection-baton"
1625 // can be passed to the foreground collector.
1626 //
1627 // The foreground collector will wait for the baton before
1628 // starting any part of the collection. The foreground collector
1629 // will only wait at one location.
1630 //
1631 // The background collector will yield the baton before starting a new
1632 // phase of the collection (e.g., before initial marking, marking from roots,
1633 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1634 // of the loop which switches the phases. The background collector does some
1635 // of the phases (initial mark, final re-mark) with the world stopped.
1636 // Because of locking involved in stopping the world,
1637 // the foreground collector should not block waiting for the background
1638 // collector when it is doing a stop-the-world phase. The background
1639 // collector will yield the baton at an additional point just before
1640 // it enters a stop-the-world phase. Once the world is stopped, the
1641 // background collector checks the phase of the collection. If the
1642 // phase has not changed, it proceeds with the collection. If the
1643 // phase has changed, it skips that phase of the collection. See
1644 // the comments on the use of the Heap_lock in collect_in_background().
1645 //
1646 // Variable used in baton passing.
1647 // _foregroundGCIsActive - Set to true by the foreground collector when
1648 // it wants the baton. The foreground clears it when it has finished
1649 // the collection.
1650 // _foregroundGCShouldWait - Set to true by the background collector
1651 // when it is running. The foreground collector waits while
1652 // _foregroundGCShouldWait is true.
1653 // CGC_lock - monitor used to protect access to the above variables
1654 // and to notify the foreground and background collectors.
1655 // _collectorState - current state of the CMS collection.
1656 //
1657 // The foreground collector
1658 // acquires the CGC_lock
1659 // sets _foregroundGCIsActive
1660 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1661 // various locks acquired in preparation for the collection
1662 // are released so as not to block the background collector
1663 // that is in the midst of a collection
1664 // proceeds with the collection
1665 // clears _foregroundGCIsActive
1666 // returns
1667 //
1668 // The background collector in a loop iterating on the phases of the
1669 // collection
1670 // acquires the CGC_lock
1671 // sets _foregroundGCShouldWait
1672 // if _foregroundGCIsActive is set
1673 // clears _foregroundGCShouldWait, notifies _CGC_lock
1674 // waits on _CGC_lock for _foregroundGCIsActive to become false
1675 // and exits the loop.
1676 // otherwise
1677 // proceed with that phase of the collection
1678 // if the phase is a stop-the-world phase,
1679 // yield the baton once more just before enqueueing
1680 // the stop-world CMS operation (executed by the VM thread).
1681 // returns after all phases of the collection are done
1682 //
1684 void CMSCollector::acquire_control_and_collect(bool full,
1685 bool clear_all_soft_refs) {
1686 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1687 assert(!Thread::current()->is_ConcurrentGC_thread(),
1688 "shouldn't try to acquire control from self!");
1690 // Start the protocol for acquiring control of the
1691 // collection from the background collector (aka CMS thread).
1692 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1693 "VM thread should have CMS token");
1694 // Remember the possibly interrupted state of an ongoing
1695 // concurrent collection
1696 CollectorState first_state = _collectorState;
1698 // Signal to a possibly ongoing concurrent collection that
1699 // we want to do a foreground collection.
1700 _foregroundGCIsActive = true;
1702 // Disable incremental mode during a foreground collection.
1703 ICMSDisabler icms_disabler;
1705 // release locks and wait for a notify from the background collector
1706 // releasing the locks in only necessary for phases which
1707 // do yields to improve the granularity of the collection.
1708 assert_lock_strong(bitMapLock());
1709 // We need to lock the Free list lock for the space that we are
1710 // currently collecting.
1711 assert(haveFreelistLocks(), "Must be holding free list locks");
1712 bitMapLock()->unlock();
1713 releaseFreelistLocks();
1714 {
1715 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1716 if (_foregroundGCShouldWait) {
1717 // We are going to be waiting for action for the CMS thread;
1718 // it had better not be gone (for instance at shutdown)!
1719 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1720 "CMS thread must be running");
1721 // Wait here until the background collector gives us the go-ahead
1722 ConcurrentMarkSweepThread::clear_CMS_flag(
1723 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1724 // Get a possibly blocked CMS thread going:
1725 // Note that we set _foregroundGCIsActive true above,
1726 // without protection of the CGC_lock.
1727 CGC_lock->notify();
1728 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1729 "Possible deadlock");
1730 while (_foregroundGCShouldWait) {
1731 // wait for notification
1732 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1733 // Possibility of delay/starvation here, since CMS token does
1734 // not know to give priority to VM thread? Actually, i think
1735 // there wouldn't be any delay/starvation, but the proof of
1736 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1737 }
1738 ConcurrentMarkSweepThread::set_CMS_flag(
1739 ConcurrentMarkSweepThread::CMS_vm_has_token);
1740 }
1741 }
1742 // The CMS_token is already held. Get back the other locks.
1743 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1744 "VM thread should have CMS token");
1745 getFreelistLocks();
1746 bitMapLock()->lock_without_safepoint_check();
1747 if (TraceCMSState) {
1748 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1749 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1750 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1751 }
1753 // Check if we need to do a compaction, or if not, whether
1754 // we need to start the mark-sweep from scratch.
1755 bool should_compact = false;
1756 bool should_start_over = false;
1757 decide_foreground_collection_type(clear_all_soft_refs,
1758 &should_compact, &should_start_over);
1760 NOT_PRODUCT(
1761 if (RotateCMSCollectionTypes) {
1762 if (_cmsGen->debug_collection_type() ==
1763 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1764 should_compact = true;
1765 } else if (_cmsGen->debug_collection_type() ==
1766 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1767 should_compact = false;
1768 }
1769 }
1770 )
1772 if (PrintGCDetails && first_state > Idling) {
1773 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1774 if (GCCause::is_user_requested_gc(cause) ||
1775 GCCause::is_serviceability_requested_gc(cause)) {
1776 gclog_or_tty->print(" (concurrent mode interrupted)");
1777 } else {
1778 gclog_or_tty->print(" (concurrent mode failure)");
1779 }
1780 }
1782 if (should_compact) {
1783 // If the collection is being acquired from the background
1784 // collector, there may be references on the discovered
1785 // references lists that have NULL referents (being those
1786 // that were concurrently cleared by a mutator) or
1787 // that are no longer active (having been enqueued concurrently
1788 // by the mutator).
1789 // Scrub the list of those references because Mark-Sweep-Compact
1790 // code assumes referents are not NULL and that all discovered
1791 // Reference objects are active.
1792 ref_processor()->clean_up_discovered_references();
1794 do_compaction_work(clear_all_soft_refs);
1796 // Has the GC time limit been exceeded?
1797 check_gc_time_limit();
1799 } else {
1800 do_mark_sweep_work(clear_all_soft_refs, first_state,
1801 should_start_over);
1802 }
1803 // Reset the expansion cause, now that we just completed
1804 // a collection cycle.
1805 clear_expansion_cause();
1806 _foregroundGCIsActive = false;
1807 return;
1808 }
1810 void CMSCollector::check_gc_time_limit() {
1812 // Ignore explicit GC's. Exiting here does not set the flag and
1813 // does not reset the count. Updating of the averages for system
1814 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1815 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1816 if (GCCause::is_user_requested_gc(gc_cause) ||
1817 GCCause::is_serviceability_requested_gc(gc_cause)) {
1818 return;
1819 }
1821 // Calculate the fraction of the CMS generation was freed during
1822 // the last collection.
1823 // Only consider the STW compacting cost for now.
1824 //
1825 // Note that the gc time limit test only works for the collections
1826 // of the young gen + tenured gen and not for collections of the
1827 // permanent gen. That is because the calculation of the space
1828 // freed by the collection is the free space in the young gen +
1829 // tenured gen.
1831 double fraction_free =
1832 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1833 if ((100.0 * size_policy()->compacting_gc_cost()) >
1834 ((double) GCTimeLimit) &&
1835 ((fraction_free * 100) < GCHeapFreeLimit)) {
1836 size_policy()->inc_gc_time_limit_count();
1837 if (UseGCOverheadLimit &&
1838 (size_policy()->gc_time_limit_count() >
1839 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1840 size_policy()->set_gc_time_limit_exceeded(true);
1841 // Avoid consecutive OOM due to the gc time limit by resetting
1842 // the counter.
1843 size_policy()->reset_gc_time_limit_count();
1844 if (PrintGCDetails) {
1845 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1846 "of %d%%", GCTimeLimit);
1847 }
1848 } else {
1849 if (PrintGCDetails) {
1850 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1851 "of %d%%", GCTimeLimit);
1852 }
1853 }
1854 } else {
1855 size_policy()->reset_gc_time_limit_count();
1856 }
1857 }
1859 // Resize the perm generation and the tenured generation
1860 // after obtaining the free list locks for the
1861 // two generations.
1862 void CMSCollector::compute_new_size() {
1863 assert_locked_or_safepoint(Heap_lock);
1864 FreelistLocker z(this);
1865 _permGen->compute_new_size();
1866 _cmsGen->compute_new_size();
1867 }
1869 // A work method used by foreground collection to determine
1870 // what type of collection (compacting or not, continuing or fresh)
1871 // it should do.
1872 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1873 // and CMSCompactWhenClearAllSoftRefs the default in the future
1874 // and do away with the flags after a suitable period.
1875 void CMSCollector::decide_foreground_collection_type(
1876 bool clear_all_soft_refs, bool* should_compact,
1877 bool* should_start_over) {
1878 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1879 // flag is set, and we have either requested a System.gc() or
1880 // the number of full gc's since the last concurrent cycle
1881 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1882 // or if an incremental collection has failed
1883 GenCollectedHeap* gch = GenCollectedHeap::heap();
1884 assert(gch->collector_policy()->is_two_generation_policy(),
1885 "You may want to check the correctness of the following");
1886 // Inform cms gen if this was due to partial collection failing.
1887 // The CMS gen may use this fact to determine its expansion policy.
1888 if (gch->incremental_collection_will_fail()) {
1889 assert(!_cmsGen->incremental_collection_failed(),
1890 "Should have been noticed, reacted to and cleared");
1891 _cmsGen->set_incremental_collection_failed();
1892 }
1893 *should_compact =
1894 UseCMSCompactAtFullCollection &&
1895 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1896 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1897 gch->incremental_collection_will_fail());
1898 *should_start_over = false;
1899 if (clear_all_soft_refs && !*should_compact) {
1900 // We are about to do a last ditch collection attempt
1901 // so it would normally make sense to do a compaction
1902 // to reclaim as much space as possible.
1903 if (CMSCompactWhenClearAllSoftRefs) {
1904 // Default: The rationale is that in this case either
1905 // we are past the final marking phase, in which case
1906 // we'd have to start over, or so little has been done
1907 // that there's little point in saving that work. Compaction
1908 // appears to be the sensible choice in either case.
1909 *should_compact = true;
1910 } else {
1911 // We have been asked to clear all soft refs, but not to
1912 // compact. Make sure that we aren't past the final checkpoint
1913 // phase, for that is where we process soft refs. If we are already
1914 // past that phase, we'll need to redo the refs discovery phase and
1915 // if necessary clear soft refs that weren't previously
1916 // cleared. We do so by remembering the phase in which
1917 // we came in, and if we are past the refs processing
1918 // phase, we'll choose to just redo the mark-sweep
1919 // collection from scratch.
1920 if (_collectorState > FinalMarking) {
1921 // We are past the refs processing phase;
1922 // start over and do a fresh synchronous CMS cycle
1923 _collectorState = Resetting; // skip to reset to start new cycle
1924 reset(false /* == !asynch */);
1925 *should_start_over = true;
1926 } // else we can continue a possibly ongoing current cycle
1927 }
1928 }
1929 }
1931 // A work method used by the foreground collector to do
1932 // a mark-sweep-compact.
1933 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1934 GenCollectedHeap* gch = GenCollectedHeap::heap();
1935 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1936 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1937 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1938 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1939 }
1941 // Sample collection interval time and reset for collection pause.
1942 if (UseAdaptiveSizePolicy) {
1943 size_policy()->msc_collection_begin();
1944 }
1946 // Temporarily widen the span of the weak reference processing to
1947 // the entire heap.
1948 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1949 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1951 // Temporarily, clear the "is_alive_non_header" field of the
1952 // reference processor.
1953 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1955 // Temporarily make reference _processing_ single threaded (non-MT).
1956 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1958 // Temporarily make refs discovery atomic
1959 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1961 ref_processor()->set_enqueuing_is_done(false);
1962 ref_processor()->enable_discovery();
1963 // If an asynchronous collection finishes, the _modUnionTable is
1964 // all clear. If we are assuming the collection from an asynchronous
1965 // collection, clear the _modUnionTable.
1966 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1967 "_modUnionTable should be clear if the baton was not passed");
1968 _modUnionTable.clear_all();
1970 // We must adjust the allocation statistics being maintained
1971 // in the free list space. We do so by reading and clearing
1972 // the sweep timer and updating the block flux rate estimates below.
1973 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1974 _sweep_timer.stop();
1975 // Note that we do not use this sample to update the _sweep_estimate.
1976 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1977 _sweep_estimate.padded_average());
1979 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1980 ref_processor(), clear_all_soft_refs);
1981 #ifdef ASSERT
1982 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1983 size_t free_size = cms_space->free();
1984 assert(free_size ==
1985 pointer_delta(cms_space->end(), cms_space->compaction_top())
1986 * HeapWordSize,
1987 "All the free space should be compacted into one chunk at top");
1988 assert(cms_space->dictionary()->totalChunkSize(
1989 debug_only(cms_space->freelistLock())) == 0 ||
1990 cms_space->totalSizeInIndexedFreeLists() == 0,
1991 "All the free space should be in a single chunk");
1992 size_t num = cms_space->totalCount();
1993 assert((free_size == 0 && num == 0) ||
1994 (free_size > 0 && (num == 1 || num == 2)),
1995 "There should be at most 2 free chunks after compaction");
1996 #endif // ASSERT
1997 _collectorState = Resetting;
1998 assert(_restart_addr == NULL,
1999 "Should have been NULL'd before baton was passed");
2000 reset(false /* == !asynch */);
2001 _cmsGen->reset_after_compaction();
2002 _concurrent_cycles_since_last_unload = 0;
2004 if (verifying() && !should_unload_classes()) {
2005 perm_gen_verify_bit_map()->clear_all();
2006 }
2008 // Clear any data recorded in the PLAB chunk arrays.
2009 if (_survivor_plab_array != NULL) {
2010 reset_survivor_plab_arrays();
2011 }
2013 // Adjust the per-size allocation stats for the next epoch.
2014 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
2015 // Restart the "sweep timer" for next epoch.
2016 _sweep_timer.reset();
2017 _sweep_timer.start();
2019 // Sample collection pause time and reset for collection interval.
2020 if (UseAdaptiveSizePolicy) {
2021 size_policy()->msc_collection_end(gch->gc_cause());
2022 }
2024 // For a mark-sweep-compact, compute_new_size() will be called
2025 // in the heap's do_collection() method.
2026 }
2028 // A work method used by the foreground collector to do
2029 // a mark-sweep, after taking over from a possibly on-going
2030 // concurrent mark-sweep collection.
2031 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2032 CollectorState first_state, bool should_start_over) {
2033 if (PrintGC && Verbose) {
2034 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2035 "collector with count %d",
2036 _full_gcs_since_conc_gc);
2037 }
2038 switch (_collectorState) {
2039 case Idling:
2040 if (first_state == Idling || should_start_over) {
2041 // The background GC was not active, or should
2042 // restarted from scratch; start the cycle.
2043 _collectorState = InitialMarking;
2044 }
2045 // If first_state was not Idling, then a background GC
2046 // was in progress and has now finished. No need to do it
2047 // again. Leave the state as Idling.
2048 break;
2049 case Precleaning:
2050 // In the foreground case don't do the precleaning since
2051 // it is not done concurrently and there is extra work
2052 // required.
2053 _collectorState = FinalMarking;
2054 }
2055 if (PrintGCDetails &&
2056 (_collectorState > Idling ||
2057 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2058 gclog_or_tty->print(" (concurrent mode failure)");
2059 }
2060 collect_in_foreground(clear_all_soft_refs);
2062 // For a mark-sweep, compute_new_size() will be called
2063 // in the heap's do_collection() method.
2064 }
2067 void CMSCollector::getFreelistLocks() const {
2068 // Get locks for all free lists in all generations that this
2069 // collector is responsible for
2070 _cmsGen->freelistLock()->lock_without_safepoint_check();
2071 _permGen->freelistLock()->lock_without_safepoint_check();
2072 }
2074 void CMSCollector::releaseFreelistLocks() const {
2075 // Release locks for all free lists in all generations that this
2076 // collector is responsible for
2077 _cmsGen->freelistLock()->unlock();
2078 _permGen->freelistLock()->unlock();
2079 }
2081 bool CMSCollector::haveFreelistLocks() const {
2082 // Check locks for all free lists in all generations that this
2083 // collector is responsible for
2084 assert_lock_strong(_cmsGen->freelistLock());
2085 assert_lock_strong(_permGen->freelistLock());
2086 PRODUCT_ONLY(ShouldNotReachHere());
2087 return true;
2088 }
2090 // A utility class that is used by the CMS collector to
2091 // temporarily "release" the foreground collector from its
2092 // usual obligation to wait for the background collector to
2093 // complete an ongoing phase before proceeding.
2094 class ReleaseForegroundGC: public StackObj {
2095 private:
2096 CMSCollector* _c;
2097 public:
2098 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2099 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2100 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2101 // allow a potentially blocked foreground collector to proceed
2102 _c->_foregroundGCShouldWait = false;
2103 if (_c->_foregroundGCIsActive) {
2104 CGC_lock->notify();
2105 }
2106 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2107 "Possible deadlock");
2108 }
2110 ~ReleaseForegroundGC() {
2111 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2112 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2113 _c->_foregroundGCShouldWait = true;
2114 }
2115 };
2117 // There are separate collect_in_background and collect_in_foreground because of
2118 // the different locking requirements of the background collector and the
2119 // foreground collector. There was originally an attempt to share
2120 // one "collect" method between the background collector and the foreground
2121 // collector but the if-then-else required made it cleaner to have
2122 // separate methods.
2123 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2124 assert(Thread::current()->is_ConcurrentGC_thread(),
2125 "A CMS asynchronous collection is only allowed on a CMS thread.");
2127 GenCollectedHeap* gch = GenCollectedHeap::heap();
2128 {
2129 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2130 MutexLockerEx hl(Heap_lock, safepoint_check);
2131 FreelistLocker fll(this);
2132 MutexLockerEx x(CGC_lock, safepoint_check);
2133 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2134 // The foreground collector is active or we're
2135 // not using asynchronous collections. Skip this
2136 // background collection.
2137 assert(!_foregroundGCShouldWait, "Should be clear");
2138 return;
2139 } else {
2140 assert(_collectorState == Idling, "Should be idling before start.");
2141 _collectorState = InitialMarking;
2142 // Reset the expansion cause, now that we are about to begin
2143 // a new cycle.
2144 clear_expansion_cause();
2145 }
2146 // Decide if we want to enable class unloading as part of the
2147 // ensuing concurrent GC cycle.
2148 update_should_unload_classes();
2149 _full_gc_requested = false; // acks all outstanding full gc requests
2150 // Signal that we are about to start a collection
2151 gch->increment_total_full_collections(); // ... starting a collection cycle
2152 _collection_count_start = gch->total_full_collections();
2153 }
2155 // Used for PrintGC
2156 size_t prev_used;
2157 if (PrintGC && Verbose) {
2158 prev_used = _cmsGen->used(); // XXXPERM
2159 }
2161 // The change of the collection state is normally done at this level;
2162 // the exceptions are phases that are executed while the world is
2163 // stopped. For those phases the change of state is done while the
2164 // world is stopped. For baton passing purposes this allows the
2165 // background collector to finish the phase and change state atomically.
2166 // The foreground collector cannot wait on a phase that is done
2167 // while the world is stopped because the foreground collector already
2168 // has the world stopped and would deadlock.
2169 while (_collectorState != Idling) {
2170 if (TraceCMSState) {
2171 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2172 Thread::current(), _collectorState);
2173 }
2174 // The foreground collector
2175 // holds the Heap_lock throughout its collection.
2176 // holds the CMS token (but not the lock)
2177 // except while it is waiting for the background collector to yield.
2178 //
2179 // The foreground collector should be blocked (not for long)
2180 // if the background collector is about to start a phase
2181 // executed with world stopped. If the background
2182 // collector has already started such a phase, the
2183 // foreground collector is blocked waiting for the
2184 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2185 // are executed in the VM thread.
2186 //
2187 // The locking order is
2188 // PendingListLock (PLL) -- if applicable (FinalMarking)
2189 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2190 // CMS token (claimed in
2191 // stop_world_and_do() -->
2192 // safepoint_synchronize() -->
2193 // CMSThread::synchronize())
2195 {
2196 // Check if the FG collector wants us to yield.
2197 CMSTokenSync x(true); // is cms thread
2198 if (waitForForegroundGC()) {
2199 // We yielded to a foreground GC, nothing more to be
2200 // done this round.
2201 assert(_foregroundGCShouldWait == false, "We set it to false in "
2202 "waitForForegroundGC()");
2203 if (TraceCMSState) {
2204 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2205 " exiting collection CMS state %d",
2206 Thread::current(), _collectorState);
2207 }
2208 return;
2209 } else {
2210 // The background collector can run but check to see if the
2211 // foreground collector has done a collection while the
2212 // background collector was waiting to get the CGC_lock
2213 // above. If yes, break so that _foregroundGCShouldWait
2214 // is cleared before returning.
2215 if (_collectorState == Idling) {
2216 break;
2217 }
2218 }
2219 }
2221 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2222 "should be waiting");
2224 switch (_collectorState) {
2225 case InitialMarking:
2226 {
2227 ReleaseForegroundGC x(this);
2228 stats().record_cms_begin();
2230 VM_CMS_Initial_Mark initial_mark_op(this);
2231 VMThread::execute(&initial_mark_op);
2232 }
2233 // The collector state may be any legal state at this point
2234 // since the background collector may have yielded to the
2235 // foreground collector.
2236 break;
2237 case Marking:
2238 // initial marking in checkpointRootsInitialWork has been completed
2239 if (markFromRoots(true)) { // we were successful
2240 assert(_collectorState == Precleaning, "Collector state should "
2241 "have changed");
2242 } else {
2243 assert(_foregroundGCIsActive, "Internal state inconsistency");
2244 }
2245 break;
2246 case Precleaning:
2247 if (UseAdaptiveSizePolicy) {
2248 size_policy()->concurrent_precleaning_begin();
2249 }
2250 // marking from roots in markFromRoots has been completed
2251 preclean();
2252 if (UseAdaptiveSizePolicy) {
2253 size_policy()->concurrent_precleaning_end();
2254 }
2255 assert(_collectorState == AbortablePreclean ||
2256 _collectorState == FinalMarking,
2257 "Collector state should have changed");
2258 break;
2259 case AbortablePreclean:
2260 if (UseAdaptiveSizePolicy) {
2261 size_policy()->concurrent_phases_resume();
2262 }
2263 abortable_preclean();
2264 if (UseAdaptiveSizePolicy) {
2265 size_policy()->concurrent_precleaning_end();
2266 }
2267 assert(_collectorState == FinalMarking, "Collector state should "
2268 "have changed");
2269 break;
2270 case FinalMarking:
2271 {
2272 ReleaseForegroundGC x(this);
2274 VM_CMS_Final_Remark final_remark_op(this);
2275 VMThread::execute(&final_remark_op);
2276 }
2277 assert(_foregroundGCShouldWait, "block post-condition");
2278 break;
2279 case Sweeping:
2280 if (UseAdaptiveSizePolicy) {
2281 size_policy()->concurrent_sweeping_begin();
2282 }
2283 // final marking in checkpointRootsFinal has been completed
2284 sweep(true);
2285 assert(_collectorState == Resizing, "Collector state change "
2286 "to Resizing must be done under the free_list_lock");
2287 _full_gcs_since_conc_gc = 0;
2289 // Stop the timers for adaptive size policy for the concurrent phases
2290 if (UseAdaptiveSizePolicy) {
2291 size_policy()->concurrent_sweeping_end();
2292 size_policy()->concurrent_phases_end(gch->gc_cause(),
2293 gch->prev_gen(_cmsGen)->capacity(),
2294 _cmsGen->free());
2295 }
2297 case Resizing: {
2298 // Sweeping has been completed...
2299 // At this point the background collection has completed.
2300 // Don't move the call to compute_new_size() down
2301 // into code that might be executed if the background
2302 // collection was preempted.
2303 {
2304 ReleaseForegroundGC x(this); // unblock FG collection
2305 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2306 CMSTokenSync z(true); // not strictly needed.
2307 if (_collectorState == Resizing) {
2308 compute_new_size();
2309 _collectorState = Resetting;
2310 } else {
2311 assert(_collectorState == Idling, "The state should only change"
2312 " because the foreground collector has finished the collection");
2313 }
2314 }
2315 break;
2316 }
2317 case Resetting:
2318 // CMS heap resizing has been completed
2319 reset(true);
2320 assert(_collectorState == Idling, "Collector state should "
2321 "have changed");
2322 stats().record_cms_end();
2323 // Don't move the concurrent_phases_end() and compute_new_size()
2324 // calls to here because a preempted background collection
2325 // has it's state set to "Resetting".
2326 break;
2327 case Idling:
2328 default:
2329 ShouldNotReachHere();
2330 break;
2331 }
2332 if (TraceCMSState) {
2333 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2334 Thread::current(), _collectorState);
2335 }
2336 assert(_foregroundGCShouldWait, "block post-condition");
2337 }
2339 // Should this be in gc_epilogue?
2340 collector_policy()->counters()->update_counters();
2342 {
2343 // Clear _foregroundGCShouldWait and, in the event that the
2344 // foreground collector is waiting, notify it, before
2345 // returning.
2346 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2347 _foregroundGCShouldWait = false;
2348 if (_foregroundGCIsActive) {
2349 CGC_lock->notify();
2350 }
2351 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2352 "Possible deadlock");
2353 }
2354 if (TraceCMSState) {
2355 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2356 " exiting collection CMS state %d",
2357 Thread::current(), _collectorState);
2358 }
2359 if (PrintGC && Verbose) {
2360 _cmsGen->print_heap_change(prev_used);
2361 }
2362 }
2364 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2365 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2366 "Foreground collector should be waiting, not executing");
2367 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2368 "may only be done by the VM Thread with the world stopped");
2369 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2370 "VM thread should have CMS token");
2372 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2373 true, gclog_or_tty);)
2374 if (UseAdaptiveSizePolicy) {
2375 size_policy()->ms_collection_begin();
2376 }
2377 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2379 HandleMark hm; // Discard invalid handles created during verification
2381 if (VerifyBeforeGC &&
2382 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2383 Universe::verify(true);
2384 }
2386 bool init_mark_was_synchronous = false; // until proven otherwise
2387 while (_collectorState != Idling) {
2388 if (TraceCMSState) {
2389 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2390 Thread::current(), _collectorState);
2391 }
2392 switch (_collectorState) {
2393 case InitialMarking:
2394 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2395 checkpointRootsInitial(false);
2396 assert(_collectorState == Marking, "Collector state should have changed"
2397 " within checkpointRootsInitial()");
2398 break;
2399 case Marking:
2400 // initial marking in checkpointRootsInitialWork has been completed
2401 if (VerifyDuringGC &&
2402 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2403 gclog_or_tty->print("Verify before initial mark: ");
2404 Universe::verify(true);
2405 }
2406 {
2407 bool res = markFromRoots(false);
2408 assert(res && _collectorState == FinalMarking, "Collector state should "
2409 "have changed");
2410 break;
2411 }
2412 case FinalMarking:
2413 if (VerifyDuringGC &&
2414 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2415 gclog_or_tty->print("Verify before re-mark: ");
2416 Universe::verify(true);
2417 }
2418 checkpointRootsFinal(false, clear_all_soft_refs,
2419 init_mark_was_synchronous);
2420 assert(_collectorState == Sweeping, "Collector state should not "
2421 "have changed within checkpointRootsFinal()");
2422 break;
2423 case Sweeping:
2424 // final marking in checkpointRootsFinal has been completed
2425 if (VerifyDuringGC &&
2426 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2427 gclog_or_tty->print("Verify before sweep: ");
2428 Universe::verify(true);
2429 }
2430 sweep(false);
2431 assert(_collectorState == Resizing, "Incorrect state");
2432 break;
2433 case Resizing: {
2434 // Sweeping has been completed; the actual resize in this case
2435 // is done separately; nothing to be done in this state.
2436 _collectorState = Resetting;
2437 break;
2438 }
2439 case Resetting:
2440 // The heap has been resized.
2441 if (VerifyDuringGC &&
2442 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2443 gclog_or_tty->print("Verify before reset: ");
2444 Universe::verify(true);
2445 }
2446 reset(false);
2447 assert(_collectorState == Idling, "Collector state should "
2448 "have changed");
2449 break;
2450 case Precleaning:
2451 case AbortablePreclean:
2452 // Elide the preclean phase
2453 _collectorState = FinalMarking;
2454 break;
2455 default:
2456 ShouldNotReachHere();
2457 }
2458 if (TraceCMSState) {
2459 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2460 Thread::current(), _collectorState);
2461 }
2462 }
2464 if (UseAdaptiveSizePolicy) {
2465 GenCollectedHeap* gch = GenCollectedHeap::heap();
2466 size_policy()->ms_collection_end(gch->gc_cause());
2467 }
2469 if (VerifyAfterGC &&
2470 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2471 Universe::verify(true);
2472 }
2473 if (TraceCMSState) {
2474 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2475 " exiting collection CMS state %d",
2476 Thread::current(), _collectorState);
2477 }
2478 }
2480 bool CMSCollector::waitForForegroundGC() {
2481 bool res = false;
2482 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2483 "CMS thread should have CMS token");
2484 // Block the foreground collector until the
2485 // background collectors decides whether to
2486 // yield.
2487 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2488 _foregroundGCShouldWait = true;
2489 if (_foregroundGCIsActive) {
2490 // The background collector yields to the
2491 // foreground collector and returns a value
2492 // indicating that it has yielded. The foreground
2493 // collector can proceed.
2494 res = true;
2495 _foregroundGCShouldWait = false;
2496 ConcurrentMarkSweepThread::clear_CMS_flag(
2497 ConcurrentMarkSweepThread::CMS_cms_has_token);
2498 ConcurrentMarkSweepThread::set_CMS_flag(
2499 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2500 // Get a possibly blocked foreground thread going
2501 CGC_lock->notify();
2502 if (TraceCMSState) {
2503 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2504 Thread::current(), _collectorState);
2505 }
2506 while (_foregroundGCIsActive) {
2507 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2508 }
2509 ConcurrentMarkSweepThread::set_CMS_flag(
2510 ConcurrentMarkSweepThread::CMS_cms_has_token);
2511 ConcurrentMarkSweepThread::clear_CMS_flag(
2512 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2513 }
2514 if (TraceCMSState) {
2515 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2516 Thread::current(), _collectorState);
2517 }
2518 return res;
2519 }
2521 // Because of the need to lock the free lists and other structures in
2522 // the collector, common to all the generations that the collector is
2523 // collecting, we need the gc_prologues of individual CMS generations
2524 // delegate to their collector. It may have been simpler had the
2525 // current infrastructure allowed one to call a prologue on a
2526 // collector. In the absence of that we have the generation's
2527 // prologue delegate to the collector, which delegates back
2528 // some "local" work to a worker method in the individual generations
2529 // that it's responsible for collecting, while itself doing any
2530 // work common to all generations it's responsible for. A similar
2531 // comment applies to the gc_epilogue()'s.
2532 // The role of the varaible _between_prologue_and_epilogue is to
2533 // enforce the invocation protocol.
2534 void CMSCollector::gc_prologue(bool full) {
2535 // Call gc_prologue_work() for each CMSGen and PermGen that
2536 // we are responsible for.
2538 // The following locking discipline assumes that we are only called
2539 // when the world is stopped.
2540 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2542 // The CMSCollector prologue must call the gc_prologues for the
2543 // "generations" (including PermGen if any) that it's responsible
2544 // for.
2546 assert( Thread::current()->is_VM_thread()
2547 || ( CMSScavengeBeforeRemark
2548 && Thread::current()->is_ConcurrentGC_thread()),
2549 "Incorrect thread type for prologue execution");
2551 if (_between_prologue_and_epilogue) {
2552 // We have already been invoked; this is a gc_prologue delegation
2553 // from yet another CMS generation that we are responsible for, just
2554 // ignore it since all relevant work has already been done.
2555 return;
2556 }
2558 // set a bit saying prologue has been called; cleared in epilogue
2559 _between_prologue_and_epilogue = true;
2560 // Claim locks for common data structures, then call gc_prologue_work()
2561 // for each CMSGen and PermGen that we are responsible for.
2563 getFreelistLocks(); // gets free list locks on constituent spaces
2564 bitMapLock()->lock_without_safepoint_check();
2566 // Should call gc_prologue_work() for all cms gens we are responsible for
2567 bool registerClosure = _collectorState >= Marking
2568 && _collectorState < Sweeping;
2569 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2570 : &_modUnionClosure;
2571 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2572 _permGen->gc_prologue_work(full, registerClosure, muc);
2574 if (!full) {
2575 stats().record_gc0_begin();
2576 }
2577 }
2579 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2580 // Delegate to CMScollector which knows how to coordinate between
2581 // this and any other CMS generations that it is responsible for
2582 // collecting.
2583 collector()->gc_prologue(full);
2584 }
2586 // This is a "private" interface for use by this generation's CMSCollector.
2587 // Not to be called directly by any other entity (for instance,
2588 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2589 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2590 bool registerClosure, ModUnionClosure* modUnionClosure) {
2591 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2592 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2593 "Should be NULL");
2594 if (registerClosure) {
2595 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2596 }
2597 cmsSpace()->gc_prologue();
2598 // Clear stat counters
2599 NOT_PRODUCT(
2600 assert(_numObjectsPromoted == 0, "check");
2601 assert(_numWordsPromoted == 0, "check");
2602 if (Verbose && PrintGC) {
2603 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2604 SIZE_FORMAT" bytes concurrently",
2605 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2606 }
2607 _numObjectsAllocated = 0;
2608 _numWordsAllocated = 0;
2609 )
2610 }
2612 void CMSCollector::gc_epilogue(bool full) {
2613 // The following locking discipline assumes that we are only called
2614 // when the world is stopped.
2615 assert(SafepointSynchronize::is_at_safepoint(),
2616 "world is stopped assumption");
2618 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2619 // if linear allocation blocks need to be appropriately marked to allow the
2620 // the blocks to be parsable. We also check here whether we need to nudge the
2621 // CMS collector thread to start a new cycle (if it's not already active).
2622 assert( Thread::current()->is_VM_thread()
2623 || ( CMSScavengeBeforeRemark
2624 && Thread::current()->is_ConcurrentGC_thread()),
2625 "Incorrect thread type for epilogue execution");
2627 if (!_between_prologue_and_epilogue) {
2628 // We have already been invoked; this is a gc_epilogue delegation
2629 // from yet another CMS generation that we are responsible for, just
2630 // ignore it since all relevant work has already been done.
2631 return;
2632 }
2633 assert(haveFreelistLocks(), "must have freelist locks");
2634 assert_lock_strong(bitMapLock());
2636 _cmsGen->gc_epilogue_work(full);
2637 _permGen->gc_epilogue_work(full);
2639 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2640 // in case sampling was not already enabled, enable it
2641 _start_sampling = true;
2642 }
2643 // reset _eden_chunk_array so sampling starts afresh
2644 _eden_chunk_index = 0;
2646 size_t cms_used = _cmsGen->cmsSpace()->used();
2647 size_t perm_used = _permGen->cmsSpace()->used();
2649 // update performance counters - this uses a special version of
2650 // update_counters() that allows the utilization to be passed as a
2651 // parameter, avoiding multiple calls to used().
2652 //
2653 _cmsGen->update_counters(cms_used);
2654 _permGen->update_counters(perm_used);
2656 if (CMSIncrementalMode) {
2657 icms_update_allocation_limits();
2658 }
2660 bitMapLock()->unlock();
2661 releaseFreelistLocks();
2663 _between_prologue_and_epilogue = false; // ready for next cycle
2664 }
2666 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2667 collector()->gc_epilogue(full);
2669 // Also reset promotion tracking in par gc thread states.
2670 if (ParallelGCThreads > 0) {
2671 for (uint i = 0; i < ParallelGCThreads; i++) {
2672 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2673 }
2674 }
2675 }
2677 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2678 assert(!incremental_collection_failed(), "Should have been cleared");
2679 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2680 cmsSpace()->gc_epilogue();
2681 // Print stat counters
2682 NOT_PRODUCT(
2683 assert(_numObjectsAllocated == 0, "check");
2684 assert(_numWordsAllocated == 0, "check");
2685 if (Verbose && PrintGC) {
2686 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2687 SIZE_FORMAT" bytes",
2688 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2689 }
2690 _numObjectsPromoted = 0;
2691 _numWordsPromoted = 0;
2692 )
2694 if (PrintGC && Verbose) {
2695 // Call down the chain in contiguous_available needs the freelistLock
2696 // so print this out before releasing the freeListLock.
2697 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2698 contiguous_available());
2699 }
2700 }
2702 #ifndef PRODUCT
2703 bool CMSCollector::have_cms_token() {
2704 Thread* thr = Thread::current();
2705 if (thr->is_VM_thread()) {
2706 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2707 } else if (thr->is_ConcurrentGC_thread()) {
2708 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2709 } else if (thr->is_GC_task_thread()) {
2710 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2711 ParGCRareEvent_lock->owned_by_self();
2712 }
2713 return false;
2714 }
2715 #endif
2717 // Check reachability of the given heap address in CMS generation,
2718 // treating all other generations as roots.
2719 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2720 // We could "guarantee" below, rather than assert, but i'll
2721 // leave these as "asserts" so that an adventurous debugger
2722 // could try this in the product build provided some subset of
2723 // the conditions were met, provided they were intersted in the
2724 // results and knew that the computation below wouldn't interfere
2725 // with other concurrent computations mutating the structures
2726 // being read or written.
2727 assert(SafepointSynchronize::is_at_safepoint(),
2728 "Else mutations in object graph will make answer suspect");
2729 assert(have_cms_token(), "Should hold cms token");
2730 assert(haveFreelistLocks(), "must hold free list locks");
2731 assert_lock_strong(bitMapLock());
2733 // Clear the marking bit map array before starting, but, just
2734 // for kicks, first report if the given address is already marked
2735 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2736 _markBitMap.isMarked(addr) ? "" : " not");
2738 if (verify_after_remark()) {
2739 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2740 bool result = verification_mark_bm()->isMarked(addr);
2741 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2742 result ? "IS" : "is NOT");
2743 return result;
2744 } else {
2745 gclog_or_tty->print_cr("Could not compute result");
2746 return false;
2747 }
2748 }
2750 ////////////////////////////////////////////////////////
2751 // CMS Verification Support
2752 ////////////////////////////////////////////////////////
2753 // Following the remark phase, the following invariant
2754 // should hold -- each object in the CMS heap which is
2755 // marked in markBitMap() should be marked in the verification_mark_bm().
2757 class VerifyMarkedClosure: public BitMapClosure {
2758 CMSBitMap* _marks;
2759 bool _failed;
2761 public:
2762 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2764 void do_bit(size_t offset) {
2765 HeapWord* addr = _marks->offsetToHeapWord(offset);
2766 if (!_marks->isMarked(addr)) {
2767 oop(addr)->print();
2768 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2769 _failed = true;
2770 }
2771 }
2773 bool failed() { return _failed; }
2774 };
2776 bool CMSCollector::verify_after_remark() {
2777 gclog_or_tty->print(" [Verifying CMS Marking... ");
2778 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2779 static bool init = false;
2781 assert(SafepointSynchronize::is_at_safepoint(),
2782 "Else mutations in object graph will make answer suspect");
2783 assert(have_cms_token(),
2784 "Else there may be mutual interference in use of "
2785 " verification data structures");
2786 assert(_collectorState > Marking && _collectorState <= Sweeping,
2787 "Else marking info checked here may be obsolete");
2788 assert(haveFreelistLocks(), "must hold free list locks");
2789 assert_lock_strong(bitMapLock());
2792 // Allocate marking bit map if not already allocated
2793 if (!init) { // first time
2794 if (!verification_mark_bm()->allocate(_span)) {
2795 return false;
2796 }
2797 init = true;
2798 }
2800 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2802 // Turn off refs discovery -- so we will be tracing through refs.
2803 // This is as intended, because by this time
2804 // GC must already have cleared any refs that need to be cleared,
2805 // and traced those that need to be marked; moreover,
2806 // the marking done here is not going to intefere in any
2807 // way with the marking information used by GC.
2808 NoRefDiscovery no_discovery(ref_processor());
2810 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2812 // Clear any marks from a previous round
2813 verification_mark_bm()->clear_all();
2814 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2815 assert(overflow_list_is_empty(), "overflow list should be empty");
2817 GenCollectedHeap* gch = GenCollectedHeap::heap();
2818 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2819 // Update the saved marks which may affect the root scans.
2820 gch->save_marks();
2822 if (CMSRemarkVerifyVariant == 1) {
2823 // In this first variant of verification, we complete
2824 // all marking, then check if the new marks-verctor is
2825 // a subset of the CMS marks-vector.
2826 verify_after_remark_work_1();
2827 } else if (CMSRemarkVerifyVariant == 2) {
2828 // In this second variant of verification, we flag an error
2829 // (i.e. an object reachable in the new marks-vector not reachable
2830 // in the CMS marks-vector) immediately, also indicating the
2831 // identify of an object (A) that references the unmarked object (B) --
2832 // presumably, a mutation to A failed to be picked up by preclean/remark?
2833 verify_after_remark_work_2();
2834 } else {
2835 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2836 CMSRemarkVerifyVariant);
2837 }
2838 gclog_or_tty->print(" done] ");
2839 return true;
2840 }
2842 void CMSCollector::verify_after_remark_work_1() {
2843 ResourceMark rm;
2844 HandleMark hm;
2845 GenCollectedHeap* gch = GenCollectedHeap::heap();
2847 // Mark from roots one level into CMS
2848 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
2849 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2851 gch->gen_process_strong_roots(_cmsGen->level(),
2852 true, // younger gens are roots
2853 true, // collecting perm gen
2854 SharedHeap::ScanningOption(roots_scanning_options()),
2855 NULL, ¬Older);
2857 // Now mark from the roots
2858 assert(_revisitStack.isEmpty(), "Should be empty");
2859 MarkFromRootsClosure markFromRootsClosure(this, _span,
2860 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2861 false /* don't yield */, true /* verifying */);
2862 assert(_restart_addr == NULL, "Expected pre-condition");
2863 verification_mark_bm()->iterate(&markFromRootsClosure);
2864 while (_restart_addr != NULL) {
2865 // Deal with stack overflow: by restarting at the indicated
2866 // address.
2867 HeapWord* ra = _restart_addr;
2868 markFromRootsClosure.reset(ra);
2869 _restart_addr = NULL;
2870 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2871 }
2872 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2873 verify_work_stacks_empty();
2874 // Should reset the revisit stack above, since no class tree
2875 // surgery is forthcoming.
2876 _revisitStack.reset(); // throwing away all contents
2878 // Marking completed -- now verify that each bit marked in
2879 // verification_mark_bm() is also marked in markBitMap(); flag all
2880 // errors by printing corresponding objects.
2881 VerifyMarkedClosure vcl(markBitMap());
2882 verification_mark_bm()->iterate(&vcl);
2883 if (vcl.failed()) {
2884 gclog_or_tty->print("Verification failed");
2885 Universe::heap()->print();
2886 fatal(" ... aborting");
2887 }
2888 }
2890 void CMSCollector::verify_after_remark_work_2() {
2891 ResourceMark rm;
2892 HandleMark hm;
2893 GenCollectedHeap* gch = GenCollectedHeap::heap();
2895 // Mark from roots one level into CMS
2896 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2897 markBitMap(), true /* nmethods */);
2898 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2899 gch->gen_process_strong_roots(_cmsGen->level(),
2900 true, // younger gens are roots
2901 true, // collecting perm gen
2902 SharedHeap::ScanningOption(roots_scanning_options()),
2903 NULL, ¬Older);
2905 // Now mark from the roots
2906 assert(_revisitStack.isEmpty(), "Should be empty");
2907 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2908 verification_mark_bm(), markBitMap(), verification_mark_stack());
2909 assert(_restart_addr == NULL, "Expected pre-condition");
2910 verification_mark_bm()->iterate(&markFromRootsClosure);
2911 while (_restart_addr != NULL) {
2912 // Deal with stack overflow: by restarting at the indicated
2913 // address.
2914 HeapWord* ra = _restart_addr;
2915 markFromRootsClosure.reset(ra);
2916 _restart_addr = NULL;
2917 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2918 }
2919 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2920 verify_work_stacks_empty();
2921 // Should reset the revisit stack above, since no class tree
2922 // surgery is forthcoming.
2923 _revisitStack.reset(); // throwing away all contents
2925 // Marking completed -- now verify that each bit marked in
2926 // verification_mark_bm() is also marked in markBitMap(); flag all
2927 // errors by printing corresponding objects.
2928 VerifyMarkedClosure vcl(markBitMap());
2929 verification_mark_bm()->iterate(&vcl);
2930 assert(!vcl.failed(), "Else verification above should not have succeeded");
2931 }
2933 void ConcurrentMarkSweepGeneration::save_marks() {
2934 // delegate to CMS space
2935 cmsSpace()->save_marks();
2936 for (uint i = 0; i < ParallelGCThreads; i++) {
2937 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2938 }
2939 }
2941 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2942 return cmsSpace()->no_allocs_since_save_marks();
2943 }
2945 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2946 \
2947 void ConcurrentMarkSweepGeneration:: \
2948 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2949 cl->set_generation(this); \
2950 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2951 cl->reset_generation(); \
2952 save_marks(); \
2953 }
2955 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2957 void
2958 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2959 {
2960 // Not currently implemented; need to do the following. -- ysr.
2961 // dld -- I think that is used for some sort of allocation profiler. So it
2962 // really means the objects allocated by the mutator since the last
2963 // GC. We could potentially implement this cheaply by recording only
2964 // the direct allocations in a side data structure.
2965 //
2966 // I think we probably ought not to be required to support these
2967 // iterations at any arbitrary point; I think there ought to be some
2968 // call to enable/disable allocation profiling in a generation/space,
2969 // and the iterator ought to return the objects allocated in the
2970 // gen/space since the enable call, or the last iterator call (which
2971 // will probably be at a GC.) That way, for gens like CM&S that would
2972 // require some extra data structure to support this, we only pay the
2973 // cost when it's in use...
2974 cmsSpace()->object_iterate_since_last_GC(blk);
2975 }
2977 void
2978 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2979 cl->set_generation(this);
2980 younger_refs_in_space_iterate(_cmsSpace, cl);
2981 cl->reset_generation();
2982 }
2984 void
2985 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2986 if (freelistLock()->owned_by_self()) {
2987 Generation::oop_iterate(mr, cl);
2988 } else {
2989 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2990 Generation::oop_iterate(mr, cl);
2991 }
2992 }
2994 void
2995 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
2996 if (freelistLock()->owned_by_self()) {
2997 Generation::oop_iterate(cl);
2998 } else {
2999 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3000 Generation::oop_iterate(cl);
3001 }
3002 }
3004 void
3005 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3006 if (freelistLock()->owned_by_self()) {
3007 Generation::object_iterate(cl);
3008 } else {
3009 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3010 Generation::object_iterate(cl);
3011 }
3012 }
3014 void
3015 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3016 }
3018 void
3019 ConcurrentMarkSweepGeneration::post_compact() {
3020 }
3022 void
3023 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3024 // Fix the linear allocation blocks to look like free blocks.
3026 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3027 // are not called when the heap is verified during universe initialization and
3028 // at vm shutdown.
3029 if (freelistLock()->owned_by_self()) {
3030 cmsSpace()->prepare_for_verify();
3031 } else {
3032 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3033 cmsSpace()->prepare_for_verify();
3034 }
3035 }
3037 void
3038 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3039 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3040 // are not called when the heap is verified during universe initialization and
3041 // at vm shutdown.
3042 if (freelistLock()->owned_by_self()) {
3043 cmsSpace()->verify(false /* ignored */);
3044 } else {
3045 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3046 cmsSpace()->verify(false /* ignored */);
3047 }
3048 }
3050 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3051 _cmsGen->verify(allow_dirty);
3052 _permGen->verify(allow_dirty);
3053 }
3055 #ifndef PRODUCT
3056 bool CMSCollector::overflow_list_is_empty() const {
3057 assert(_num_par_pushes >= 0, "Inconsistency");
3058 if (_overflow_list == NULL) {
3059 assert(_num_par_pushes == 0, "Inconsistency");
3060 }
3061 return _overflow_list == NULL;
3062 }
3064 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3065 // merely consolidate assertion checks that appear to occur together frequently.
3066 void CMSCollector::verify_work_stacks_empty() const {
3067 assert(_markStack.isEmpty(), "Marking stack should be empty");
3068 assert(overflow_list_is_empty(), "Overflow list should be empty");
3069 }
3071 void CMSCollector::verify_overflow_empty() const {
3072 assert(overflow_list_is_empty(), "Overflow list should be empty");
3073 assert(no_preserved_marks(), "No preserved marks");
3074 }
3075 #endif // PRODUCT
3077 // Decide if we want to enable class unloading as part of the
3078 // ensuing concurrent GC cycle. We will collect the perm gen and
3079 // unload classes if it's the case that:
3080 // (1) an explicit gc request has been made and the flag
3081 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3082 // (2) (a) class unloading is enabled at the command line, and
3083 // (b) (i) perm gen threshold has been crossed, or
3084 // (ii) old gen is getting really full, or
3085 // (iii) the previous N CMS collections did not collect the
3086 // perm gen
3087 // NOTE: Provided there is no change in the state of the heap between
3088 // calls to this method, it should have idempotent results. Moreover,
3089 // its results should be monotonically increasing (i.e. going from 0 to 1,
3090 // but not 1 to 0) between successive calls between which the heap was
3091 // not collected. For the implementation below, it must thus rely on
3092 // the property that concurrent_cycles_since_last_unload()
3093 // will not decrease unless a collection cycle happened and that
3094 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3095 // themselves also monotonic in that sense. See check_monotonicity()
3096 // below.
3097 bool CMSCollector::update_should_unload_classes() {
3098 _should_unload_classes = false;
3099 // Condition 1 above
3100 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3101 _should_unload_classes = true;
3102 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3103 // Disjuncts 2.b.(i,ii,iii) above
3104 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3105 CMSClassUnloadingMaxInterval)
3106 || _permGen->should_concurrent_collect()
3107 || _cmsGen->is_too_full();
3108 }
3109 return _should_unload_classes;
3110 }
3112 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3113 bool res = should_concurrent_collect();
3114 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3115 return res;
3116 }
3118 void CMSCollector::setup_cms_unloading_and_verification_state() {
3119 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3120 || VerifyBeforeExit;
3121 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3122 | SharedHeap::SO_CodeCache;
3124 if (should_unload_classes()) { // Should unload classes this cycle
3125 remove_root_scanning_option(rso); // Shrink the root set appropriately
3126 set_verifying(should_verify); // Set verification state for this cycle
3127 return; // Nothing else needs to be done at this time
3128 }
3130 // Not unloading classes this cycle
3131 assert(!should_unload_classes(), "Inconsitency!");
3132 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3133 // We were not verifying, or we _were_ unloading classes in the last cycle,
3134 // AND some verification options are enabled this cycle; in this case,
3135 // we must make sure that the deadness map is allocated if not already so,
3136 // and cleared (if already allocated previously --
3137 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3138 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3139 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3140 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3141 "permanent generation verification disabled");
3142 return; // Note that we leave verification disabled, so we'll retry this
3143 // allocation next cycle. We _could_ remember this failure
3144 // and skip further attempts and permanently disable verification
3145 // attempts if that is considered more desirable.
3146 }
3147 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3148 "_perm_gen_ver_bit_map inconsistency?");
3149 } else {
3150 perm_gen_verify_bit_map()->clear_all();
3151 }
3152 // Include symbols, strings and code cache elements to prevent their resurrection.
3153 add_root_scanning_option(rso);
3154 set_verifying(true);
3155 } else if (verifying() && !should_verify) {
3156 // We were verifying, but some verification flags got disabled.
3157 set_verifying(false);
3158 // Exclude symbols, strings and code cache elements from root scanning to
3159 // reduce IM and RM pauses.
3160 remove_root_scanning_option(rso);
3161 }
3162 }
3165 #ifndef PRODUCT
3166 HeapWord* CMSCollector::block_start(const void* p) const {
3167 const HeapWord* addr = (HeapWord*)p;
3168 if (_span.contains(p)) {
3169 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3170 return _cmsGen->cmsSpace()->block_start(p);
3171 } else {
3172 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3173 "Inconsistent _span?");
3174 return _permGen->cmsSpace()->block_start(p);
3175 }
3176 }
3177 return NULL;
3178 }
3179 #endif
3181 HeapWord*
3182 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3183 bool tlab,
3184 bool parallel) {
3185 assert(!tlab, "Can't deal with TLAB allocation");
3186 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3187 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3188 CMSExpansionCause::_satisfy_allocation);
3189 if (GCExpandToAllocateDelayMillis > 0) {
3190 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3191 }
3192 return have_lock_and_allocate(word_size, tlab);
3193 }
3195 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3196 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3197 // to CardGeneration and share it...
3198 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3199 return CardGeneration::expand(bytes, expand_bytes);
3200 }
3202 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3203 CMSExpansionCause::Cause cause)
3204 {
3206 bool success = expand(bytes, expand_bytes);
3208 // remember why we expanded; this information is used
3209 // by shouldConcurrentCollect() when making decisions on whether to start
3210 // a new CMS cycle.
3211 if (success) {
3212 set_expansion_cause(cause);
3213 if (PrintGCDetails && Verbose) {
3214 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3215 CMSExpansionCause::to_string(cause));
3216 }
3217 }
3218 }
3220 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3221 HeapWord* res = NULL;
3222 MutexLocker x(ParGCRareEvent_lock);
3223 while (true) {
3224 // Expansion by some other thread might make alloc OK now:
3225 res = ps->lab.alloc(word_sz);
3226 if (res != NULL) return res;
3227 // If there's not enough expansion space available, give up.
3228 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3229 return NULL;
3230 }
3231 // Otherwise, we try expansion.
3232 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3233 CMSExpansionCause::_allocate_par_lab);
3234 // Now go around the loop and try alloc again;
3235 // A competing par_promote might beat us to the expansion space,
3236 // so we may go around the loop again if promotion fails agaion.
3237 if (GCExpandToAllocateDelayMillis > 0) {
3238 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3239 }
3240 }
3241 }
3244 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3245 PromotionInfo* promo) {
3246 MutexLocker x(ParGCRareEvent_lock);
3247 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3248 while (true) {
3249 // Expansion by some other thread might make alloc OK now:
3250 if (promo->ensure_spooling_space()) {
3251 assert(promo->has_spooling_space(),
3252 "Post-condition of successful ensure_spooling_space()");
3253 return true;
3254 }
3255 // If there's not enough expansion space available, give up.
3256 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3257 return false;
3258 }
3259 // Otherwise, we try expansion.
3260 expand(refill_size_bytes, MinHeapDeltaBytes,
3261 CMSExpansionCause::_allocate_par_spooling_space);
3262 // Now go around the loop and try alloc again;
3263 // A competing allocation might beat us to the expansion space,
3264 // so we may go around the loop again if allocation fails again.
3265 if (GCExpandToAllocateDelayMillis > 0) {
3266 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3267 }
3268 }
3269 }
3273 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3274 assert_locked_or_safepoint(Heap_lock);
3275 size_t size = ReservedSpace::page_align_size_down(bytes);
3276 if (size > 0) {
3277 shrink_by(size);
3278 }
3279 }
3281 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3282 assert_locked_or_safepoint(Heap_lock);
3283 bool result = _virtual_space.expand_by(bytes);
3284 if (result) {
3285 HeapWord* old_end = _cmsSpace->end();
3286 size_t new_word_size =
3287 heap_word_size(_virtual_space.committed_size());
3288 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3289 _bts->resize(new_word_size); // resize the block offset shared array
3290 Universe::heap()->barrier_set()->resize_covered_region(mr);
3291 // Hmmmm... why doesn't CFLS::set_end verify locking?
3292 // This is quite ugly; FIX ME XXX
3293 _cmsSpace->assert_locked();
3294 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3296 // update the space and generation capacity counters
3297 if (UsePerfData) {
3298 _space_counters->update_capacity();
3299 _gen_counters->update_all();
3300 }
3302 if (Verbose && PrintGC) {
3303 size_t new_mem_size = _virtual_space.committed_size();
3304 size_t old_mem_size = new_mem_size - bytes;
3305 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3306 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3307 }
3308 }
3309 return result;
3310 }
3312 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3313 assert_locked_or_safepoint(Heap_lock);
3314 bool success = true;
3315 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3316 if (remaining_bytes > 0) {
3317 success = grow_by(remaining_bytes);
3318 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3319 }
3320 return success;
3321 }
3323 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3324 assert_locked_or_safepoint(Heap_lock);
3325 assert_lock_strong(freelistLock());
3326 // XXX Fix when compaction is implemented.
3327 warning("Shrinking of CMS not yet implemented");
3328 return;
3329 }
3332 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3333 // phases.
3334 class CMSPhaseAccounting: public StackObj {
3335 public:
3336 CMSPhaseAccounting(CMSCollector *collector,
3337 const char *phase,
3338 bool print_cr = true);
3339 ~CMSPhaseAccounting();
3341 private:
3342 CMSCollector *_collector;
3343 const char *_phase;
3344 elapsedTimer _wallclock;
3345 bool _print_cr;
3347 public:
3348 // Not MT-safe; so do not pass around these StackObj's
3349 // where they may be accessed by other threads.
3350 jlong wallclock_millis() {
3351 assert(_wallclock.is_active(), "Wall clock should not stop");
3352 _wallclock.stop(); // to record time
3353 jlong ret = _wallclock.milliseconds();
3354 _wallclock.start(); // restart
3355 return ret;
3356 }
3357 };
3359 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3360 const char *phase,
3361 bool print_cr) :
3362 _collector(collector), _phase(phase), _print_cr(print_cr) {
3364 if (PrintCMSStatistics != 0) {
3365 _collector->resetYields();
3366 }
3367 if (PrintGCDetails && PrintGCTimeStamps) {
3368 gclog_or_tty->date_stamp(PrintGCDateStamps);
3369 gclog_or_tty->stamp();
3370 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3371 _collector->cmsGen()->short_name(), _phase);
3372 }
3373 _collector->resetTimer();
3374 _wallclock.start();
3375 _collector->startTimer();
3376 }
3378 CMSPhaseAccounting::~CMSPhaseAccounting() {
3379 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3380 _collector->stopTimer();
3381 _wallclock.stop();
3382 if (PrintGCDetails) {
3383 gclog_or_tty->date_stamp(PrintGCDateStamps);
3384 if (PrintGCTimeStamps) {
3385 gclog_or_tty->stamp();
3386 gclog_or_tty->print(": ");
3387 }
3388 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3389 _collector->cmsGen()->short_name(),
3390 _phase, _collector->timerValue(), _wallclock.seconds());
3391 if (_print_cr) {
3392 gclog_or_tty->print_cr("");
3393 }
3394 if (PrintCMSStatistics != 0) {
3395 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3396 _collector->yields());
3397 }
3398 }
3399 }
3401 // CMS work
3403 // Checkpoint the roots into this generation from outside
3404 // this generation. [Note this initial checkpoint need only
3405 // be approximate -- we'll do a catch up phase subsequently.]
3406 void CMSCollector::checkpointRootsInitial(bool asynch) {
3407 assert(_collectorState == InitialMarking, "Wrong collector state");
3408 check_correct_thread_executing();
3409 ReferenceProcessor* rp = ref_processor();
3410 SpecializationStats::clear();
3411 assert(_restart_addr == NULL, "Control point invariant");
3412 if (asynch) {
3413 // acquire locks for subsequent manipulations
3414 MutexLockerEx x(bitMapLock(),
3415 Mutex::_no_safepoint_check_flag);
3416 checkpointRootsInitialWork(asynch);
3417 rp->verify_no_references_recorded();
3418 rp->enable_discovery(); // enable ("weak") refs discovery
3419 _collectorState = Marking;
3420 } else {
3421 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3422 // which recognizes if we are a CMS generation, and doesn't try to turn on
3423 // discovery; verify that they aren't meddling.
3424 assert(!rp->discovery_is_atomic(),
3425 "incorrect setting of discovery predicate");
3426 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3427 "ref discovery for this generation kind");
3428 // already have locks
3429 checkpointRootsInitialWork(asynch);
3430 rp->enable_discovery(); // now enable ("weak") refs discovery
3431 _collectorState = Marking;
3432 }
3433 SpecializationStats::print();
3434 }
3436 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3437 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3438 assert(_collectorState == InitialMarking, "just checking");
3440 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3441 // precede our marking with a collection of all
3442 // younger generations to keep floating garbage to a minimum.
3443 // XXX: we won't do this for now -- it's an optimization to be done later.
3445 // already have locks
3446 assert_lock_strong(bitMapLock());
3447 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3449 // Setup the verification and class unloading state for this
3450 // CMS collection cycle.
3451 setup_cms_unloading_and_verification_state();
3453 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3454 PrintGCDetails && Verbose, true, gclog_or_tty);)
3455 if (UseAdaptiveSizePolicy) {
3456 size_policy()->checkpoint_roots_initial_begin();
3457 }
3459 // Reset all the PLAB chunk arrays if necessary.
3460 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3461 reset_survivor_plab_arrays();
3462 }
3464 ResourceMark rm;
3465 HandleMark hm;
3467 FalseClosure falseClosure;
3468 // In the case of a synchronous collection, we will elide the
3469 // remark step, so it's important to catch all the nmethod oops
3470 // in this step; hence the last argument to the constrcutor below.
3471 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
3472 GenCollectedHeap* gch = GenCollectedHeap::heap();
3474 verify_work_stacks_empty();
3475 verify_overflow_empty();
3477 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3478 // Update the saved marks which may affect the root scans.
3479 gch->save_marks();
3481 // weak reference processing has not started yet.
3482 ref_processor()->set_enqueuing_is_done(false);
3484 {
3485 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3486 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3487 gch->gen_process_strong_roots(_cmsGen->level(),
3488 true, // younger gens are roots
3489 true, // collecting perm gen
3490 SharedHeap::ScanningOption(roots_scanning_options()),
3491 NULL, ¬Older);
3492 }
3494 // Clear mod-union table; it will be dirtied in the prologue of
3495 // CMS generation per each younger generation collection.
3497 assert(_modUnionTable.isAllClear(),
3498 "Was cleared in most recent final checkpoint phase"
3499 " or no bits are set in the gc_prologue before the start of the next "
3500 "subsequent marking phase.");
3502 // Temporarily disabled, since pre/post-consumption closures don't
3503 // care about precleaned cards
3504 #if 0
3505 {
3506 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3507 (HeapWord*)_virtual_space.high());
3508 _ct->ct_bs()->preclean_dirty_cards(mr);
3509 }
3510 #endif
3512 // Save the end of the used_region of the constituent generations
3513 // to be used to limit the extent of sweep in each generation.
3514 save_sweep_limits();
3515 if (UseAdaptiveSizePolicy) {
3516 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3517 }
3518 verify_overflow_empty();
3519 }
3521 bool CMSCollector::markFromRoots(bool asynch) {
3522 // we might be tempted to assert that:
3523 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3524 // "inconsistent argument?");
3525 // However that wouldn't be right, because it's possible that
3526 // a safepoint is indeed in progress as a younger generation
3527 // stop-the-world GC happens even as we mark in this generation.
3528 assert(_collectorState == Marking, "inconsistent state?");
3529 check_correct_thread_executing();
3530 verify_overflow_empty();
3532 bool res;
3533 if (asynch) {
3535 // Start the timers for adaptive size policy for the concurrent phases
3536 // Do it here so that the foreground MS can use the concurrent
3537 // timer since a foreground MS might has the sweep done concurrently
3538 // or STW.
3539 if (UseAdaptiveSizePolicy) {
3540 size_policy()->concurrent_marking_begin();
3541 }
3543 // Weak ref discovery note: We may be discovering weak
3544 // refs in this generation concurrent (but interleaved) with
3545 // weak ref discovery by a younger generation collector.
3547 CMSTokenSyncWithLocks ts(true, bitMapLock());
3548 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3549 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3550 res = markFromRootsWork(asynch);
3551 if (res) {
3552 _collectorState = Precleaning;
3553 } else { // We failed and a foreground collection wants to take over
3554 assert(_foregroundGCIsActive, "internal state inconsistency");
3555 assert(_restart_addr == NULL, "foreground will restart from scratch");
3556 if (PrintGCDetails) {
3557 gclog_or_tty->print_cr("bailing out to foreground collection");
3558 }
3559 }
3560 if (UseAdaptiveSizePolicy) {
3561 size_policy()->concurrent_marking_end();
3562 }
3563 } else {
3564 assert(SafepointSynchronize::is_at_safepoint(),
3565 "inconsistent with asynch == false");
3566 if (UseAdaptiveSizePolicy) {
3567 size_policy()->ms_collection_marking_begin();
3568 }
3569 // already have locks
3570 res = markFromRootsWork(asynch);
3571 _collectorState = FinalMarking;
3572 if (UseAdaptiveSizePolicy) {
3573 GenCollectedHeap* gch = GenCollectedHeap::heap();
3574 size_policy()->ms_collection_marking_end(gch->gc_cause());
3575 }
3576 }
3577 verify_overflow_empty();
3578 return res;
3579 }
3581 bool CMSCollector::markFromRootsWork(bool asynch) {
3582 // iterate over marked bits in bit map, doing a full scan and mark
3583 // from these roots using the following algorithm:
3584 // . if oop is to the right of the current scan pointer,
3585 // mark corresponding bit (we'll process it later)
3586 // . else (oop is to left of current scan pointer)
3587 // push oop on marking stack
3588 // . drain the marking stack
3590 // Note that when we do a marking step we need to hold the
3591 // bit map lock -- recall that direct allocation (by mutators)
3592 // and promotion (by younger generation collectors) is also
3593 // marking the bit map. [the so-called allocate live policy.]
3594 // Because the implementation of bit map marking is not
3595 // robust wrt simultaneous marking of bits in the same word,
3596 // we need to make sure that there is no such interference
3597 // between concurrent such updates.
3599 // already have locks
3600 assert_lock_strong(bitMapLock());
3602 // Clear the revisit stack, just in case there are any
3603 // obsolete contents from a short-circuited previous CMS cycle.
3604 _revisitStack.reset();
3605 verify_work_stacks_empty();
3606 verify_overflow_empty();
3607 assert(_revisitStack.isEmpty(), "tabula rasa");
3609 bool result = false;
3610 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3611 result = do_marking_mt(asynch);
3612 } else {
3613 result = do_marking_st(asynch);
3614 }
3615 return result;
3616 }
3618 // Forward decl
3619 class CMSConcMarkingTask;
3621 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3622 CMSCollector* _collector;
3623 CMSConcMarkingTask* _task;
3624 bool _yield;
3625 protected:
3626 virtual void yield();
3627 public:
3628 // "n_threads" is the number of threads to be terminated.
3629 // "queue_set" is a set of work queues of other threads.
3630 // "collector" is the CMS collector associated with this task terminator.
3631 // "yield" indicates whether we need the gang as a whole to yield.
3632 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3633 CMSCollector* collector, bool yield) :
3634 ParallelTaskTerminator(n_threads, queue_set),
3635 _collector(collector),
3636 _yield(yield) { }
3638 void set_task(CMSConcMarkingTask* task) {
3639 _task = task;
3640 }
3641 };
3643 // MT Concurrent Marking Task
3644 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3645 CMSCollector* _collector;
3646 YieldingFlexibleWorkGang* _workers; // the whole gang
3647 int _n_workers; // requested/desired # workers
3648 bool _asynch;
3649 bool _result;
3650 CompactibleFreeListSpace* _cms_space;
3651 CompactibleFreeListSpace* _perm_space;
3652 HeapWord* _global_finger;
3653 HeapWord* _restart_addr;
3655 // Exposed here for yielding support
3656 Mutex* const _bit_map_lock;
3658 // The per thread work queues, available here for stealing
3659 OopTaskQueueSet* _task_queues;
3660 CMSConcMarkingTerminator _term;
3662 public:
3663 CMSConcMarkingTask(CMSCollector* collector,
3664 CompactibleFreeListSpace* cms_space,
3665 CompactibleFreeListSpace* perm_space,
3666 bool asynch, int n_workers,
3667 YieldingFlexibleWorkGang* workers,
3668 OopTaskQueueSet* task_queues):
3669 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3670 _collector(collector),
3671 _cms_space(cms_space),
3672 _perm_space(perm_space),
3673 _asynch(asynch), _n_workers(n_workers), _result(true),
3674 _workers(workers), _task_queues(task_queues),
3675 _term(n_workers, task_queues, _collector, asynch),
3676 _bit_map_lock(collector->bitMapLock())
3677 {
3678 assert(n_workers <= workers->total_workers(),
3679 "Else termination won't work correctly today"); // XXX FIX ME!
3680 _requested_size = n_workers;
3681 _term.set_task(this);
3682 assert(_cms_space->bottom() < _perm_space->bottom(),
3683 "Finger incorrectly initialized below");
3684 _restart_addr = _global_finger = _cms_space->bottom();
3685 }
3688 OopTaskQueueSet* task_queues() { return _task_queues; }
3690 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3692 HeapWord** global_finger_addr() { return &_global_finger; }
3694 CMSConcMarkingTerminator* terminator() { return &_term; }
3696 void work(int i);
3698 virtual void coordinator_yield(); // stuff done by coordinator
3699 bool result() { return _result; }
3701 void reset(HeapWord* ra) {
3702 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3703 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3704 assert(ra < _perm_space->end(), "ra too large");
3705 _restart_addr = _global_finger = ra;
3706 _term.reset_for_reuse();
3707 }
3709 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3710 OopTaskQueue* work_q);
3712 private:
3713 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3714 void do_work_steal(int i);
3715 void bump_global_finger(HeapWord* f);
3716 };
3718 void CMSConcMarkingTerminator::yield() {
3719 if (ConcurrentMarkSweepThread::should_yield() &&
3720 !_collector->foregroundGCIsActive() &&
3721 _yield) {
3722 _task->yield();
3723 } else {
3724 ParallelTaskTerminator::yield();
3725 }
3726 }
3728 ////////////////////////////////////////////////////////////////
3729 // Concurrent Marking Algorithm Sketch
3730 ////////////////////////////////////////////////////////////////
3731 // Until all tasks exhausted (both spaces):
3732 // -- claim next available chunk
3733 // -- bump global finger via CAS
3734 // -- find first object that starts in this chunk
3735 // and start scanning bitmap from that position
3736 // -- scan marked objects for oops
3737 // -- CAS-mark target, and if successful:
3738 // . if target oop is above global finger (volatile read)
3739 // nothing to do
3740 // . if target oop is in chunk and above local finger
3741 // then nothing to do
3742 // . else push on work-queue
3743 // -- Deal with possible overflow issues:
3744 // . local work-queue overflow causes stuff to be pushed on
3745 // global (common) overflow queue
3746 // . always first empty local work queue
3747 // . then get a batch of oops from global work queue if any
3748 // . then do work stealing
3749 // -- When all tasks claimed (both spaces)
3750 // and local work queue empty,
3751 // then in a loop do:
3752 // . check global overflow stack; steal a batch of oops and trace
3753 // . try to steal from other threads oif GOS is empty
3754 // . if neither is available, offer termination
3755 // -- Terminate and return result
3756 //
3757 void CMSConcMarkingTask::work(int i) {
3758 elapsedTimer _timer;
3759 ResourceMark rm;
3760 HandleMark hm;
3762 DEBUG_ONLY(_collector->verify_overflow_empty();)
3764 // Before we begin work, our work queue should be empty
3765 assert(work_queue(i)->size() == 0, "Expected to be empty");
3766 // Scan the bitmap covering _cms_space, tracing through grey objects.
3767 _timer.start();
3768 do_scan_and_mark(i, _cms_space);
3769 _timer.stop();
3770 if (PrintCMSStatistics != 0) {
3771 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3772 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3773 }
3775 // ... do the same for the _perm_space
3776 _timer.reset();
3777 _timer.start();
3778 do_scan_and_mark(i, _perm_space);
3779 _timer.stop();
3780 if (PrintCMSStatistics != 0) {
3781 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3782 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3783 }
3785 // ... do work stealing
3786 _timer.reset();
3787 _timer.start();
3788 do_work_steal(i);
3789 _timer.stop();
3790 if (PrintCMSStatistics != 0) {
3791 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3792 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3793 }
3794 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3795 assert(work_queue(i)->size() == 0, "Should have been emptied");
3796 // Note that under the current task protocol, the
3797 // following assertion is true even of the spaces
3798 // expanded since the completion of the concurrent
3799 // marking. XXX This will likely change under a strict
3800 // ABORT semantics.
3801 assert(_global_finger > _cms_space->end() &&
3802 _global_finger >= _perm_space->end(),
3803 "All tasks have been completed");
3804 DEBUG_ONLY(_collector->verify_overflow_empty();)
3805 }
3807 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3808 HeapWord* read = _global_finger;
3809 HeapWord* cur = read;
3810 while (f > read) {
3811 cur = read;
3812 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3813 if (cur == read) {
3814 // our cas succeeded
3815 assert(_global_finger >= f, "protocol consistency");
3816 break;
3817 }
3818 }
3819 }
3821 // This is really inefficient, and should be redone by
3822 // using (not yet available) block-read and -write interfaces to the
3823 // stack and the work_queue. XXX FIX ME !!!
3824 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3825 OopTaskQueue* work_q) {
3826 // Fast lock-free check
3827 if (ovflw_stk->length() == 0) {
3828 return false;
3829 }
3830 assert(work_q->size() == 0, "Shouldn't steal");
3831 MutexLockerEx ml(ovflw_stk->par_lock(),
3832 Mutex::_no_safepoint_check_flag);
3833 // Grab up to 1/4 the size of the work queue
3834 size_t num = MIN2((size_t)work_q->max_elems()/4,
3835 (size_t)ParGCDesiredObjsFromOverflowList);
3836 num = MIN2(num, ovflw_stk->length());
3837 for (int i = (int) num; i > 0; i--) {
3838 oop cur = ovflw_stk->pop();
3839 assert(cur != NULL, "Counted wrong?");
3840 work_q->push(cur);
3841 }
3842 return num > 0;
3843 }
3845 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3846 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3847 int n_tasks = pst->n_tasks();
3848 // We allow that there may be no tasks to do here because
3849 // we are restarting after a stack overflow.
3850 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3851 int nth_task = 0;
3853 HeapWord* aligned_start = sp->bottom();
3854 if (sp->used_region().contains(_restart_addr)) {
3855 // Align down to a card boundary for the start of 0th task
3856 // for this space.
3857 aligned_start =
3858 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3859 CardTableModRefBS::card_size);
3860 }
3862 size_t chunk_size = sp->marking_task_size();
3863 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3864 // Having claimed the nth task in this space,
3865 // compute the chunk that it corresponds to:
3866 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3867 aligned_start + (nth_task+1)*chunk_size);
3868 // Try and bump the global finger via a CAS;
3869 // note that we need to do the global finger bump
3870 // _before_ taking the intersection below, because
3871 // the task corresponding to that region will be
3872 // deemed done even if the used_region() expands
3873 // because of allocation -- as it almost certainly will
3874 // during start-up while the threads yield in the
3875 // closure below.
3876 HeapWord* finger = span.end();
3877 bump_global_finger(finger); // atomically
3878 // There are null tasks here corresponding to chunks
3879 // beyond the "top" address of the space.
3880 span = span.intersection(sp->used_region());
3881 if (!span.is_empty()) { // Non-null task
3882 HeapWord* prev_obj;
3883 assert(!span.contains(_restart_addr) || nth_task == 0,
3884 "Inconsistency");
3885 if (nth_task == 0) {
3886 // For the 0th task, we'll not need to compute a block_start.
3887 if (span.contains(_restart_addr)) {
3888 // In the case of a restart because of stack overflow,
3889 // we might additionally skip a chunk prefix.
3890 prev_obj = _restart_addr;
3891 } else {
3892 prev_obj = span.start();
3893 }
3894 } else {
3895 // We want to skip the first object because
3896 // the protocol is to scan any object in its entirety
3897 // that _starts_ in this span; a fortiori, any
3898 // object starting in an earlier span is scanned
3899 // as part of an earlier claimed task.
3900 // Below we use the "careful" version of block_start
3901 // so we do not try to navigate uninitialized objects.
3902 prev_obj = sp->block_start_careful(span.start());
3903 // Below we use a variant of block_size that uses the
3904 // Printezis bits to avoid waiting for allocated
3905 // objects to become initialized/parsable.
3906 while (prev_obj < span.start()) {
3907 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3908 if (sz > 0) {
3909 prev_obj += sz;
3910 } else {
3911 // In this case we may end up doing a bit of redundant
3912 // scanning, but that appears unavoidable, short of
3913 // locking the free list locks; see bug 6324141.
3914 break;
3915 }
3916 }
3917 }
3918 if (prev_obj < span.end()) {
3919 MemRegion my_span = MemRegion(prev_obj, span.end());
3920 // Do the marking work within a non-empty span --
3921 // the last argument to the constructor indicates whether the
3922 // iteration should be incremental with periodic yields.
3923 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3924 &_collector->_markBitMap,
3925 work_queue(i),
3926 &_collector->_markStack,
3927 &_collector->_revisitStack,
3928 _asynch);
3929 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3930 } // else nothing to do for this task
3931 } // else nothing to do for this task
3932 }
3933 // We'd be tempted to assert here that since there are no
3934 // more tasks left to claim in this space, the global_finger
3935 // must exceed space->top() and a fortiori space->end(). However,
3936 // that would not quite be correct because the bumping of
3937 // global_finger occurs strictly after the claiming of a task,
3938 // so by the time we reach here the global finger may not yet
3939 // have been bumped up by the thread that claimed the last
3940 // task.
3941 pst->all_tasks_completed();
3942 }
3944 class Par_ConcMarkingClosure: public OopClosure {
3945 private:
3946 CMSCollector* _collector;
3947 MemRegion _span;
3948 CMSBitMap* _bit_map;
3949 CMSMarkStack* _overflow_stack;
3950 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3951 OopTaskQueue* _work_queue;
3952 protected:
3953 DO_OOP_WORK_DEFN
3954 public:
3955 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3956 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3957 _collector(collector),
3958 _span(_collector->_span),
3959 _work_queue(work_queue),
3960 _bit_map(bit_map),
3961 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3962 virtual void do_oop(oop* p);
3963 virtual void do_oop(narrowOop* p);
3964 void trim_queue(size_t max);
3965 void handle_stack_overflow(HeapWord* lost);
3966 };
3968 // Grey object scanning during work stealing phase --
3969 // the salient assumption here is that any references
3970 // that are in these stolen objects being scanned must
3971 // already have been initialized (else they would not have
3972 // been published), so we do not need to check for
3973 // uninitialized objects before pushing here.
3974 void Par_ConcMarkingClosure::do_oop(oop obj) {
3975 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
3976 HeapWord* addr = (HeapWord*)obj;
3977 // Check if oop points into the CMS generation
3978 // and is not marked
3979 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3980 // a white object ...
3981 // If we manage to "claim" the object, by being the
3982 // first thread to mark it, then we push it on our
3983 // marking stack
3984 if (_bit_map->par_mark(addr)) { // ... now grey
3985 // push on work queue (grey set)
3986 bool simulate_overflow = false;
3987 NOT_PRODUCT(
3988 if (CMSMarkStackOverflowALot &&
3989 _collector->simulate_overflow()) {
3990 // simulate a stack overflow
3991 simulate_overflow = true;
3992 }
3993 )
3994 if (simulate_overflow ||
3995 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3996 // stack overflow
3997 if (PrintCMSStatistics != 0) {
3998 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3999 SIZE_FORMAT, _overflow_stack->capacity());
4000 }
4001 // We cannot assert that the overflow stack is full because
4002 // it may have been emptied since.
4003 assert(simulate_overflow ||
4004 _work_queue->size() == _work_queue->max_elems(),
4005 "Else push should have succeeded");
4006 handle_stack_overflow(addr);
4007 }
4008 } // Else, some other thread got there first
4009 }
4010 }
4012 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4013 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4015 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4016 while (_work_queue->size() > max) {
4017 oop new_oop;
4018 if (_work_queue->pop_local(new_oop)) {
4019 assert(new_oop->is_oop(), "Should be an oop");
4020 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4021 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4022 assert(new_oop->is_parsable(), "Should be parsable");
4023 new_oop->oop_iterate(this); // do_oop() above
4024 }
4025 }
4026 }
4028 // Upon stack overflow, we discard (part of) the stack,
4029 // remembering the least address amongst those discarded
4030 // in CMSCollector's _restart_address.
4031 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4032 // We need to do this under a mutex to prevent other
4033 // workers from interfering with the work done below.
4034 MutexLockerEx ml(_overflow_stack->par_lock(),
4035 Mutex::_no_safepoint_check_flag);
4036 // Remember the least grey address discarded
4037 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4038 _collector->lower_restart_addr(ra);
4039 _overflow_stack->reset(); // discard stack contents
4040 _overflow_stack->expand(); // expand the stack if possible
4041 }
4044 void CMSConcMarkingTask::do_work_steal(int i) {
4045 OopTaskQueue* work_q = work_queue(i);
4046 oop obj_to_scan;
4047 CMSBitMap* bm = &(_collector->_markBitMap);
4048 CMSMarkStack* ovflw = &(_collector->_markStack);
4049 int* seed = _collector->hash_seed(i);
4050 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
4051 while (true) {
4052 cl.trim_queue(0);
4053 assert(work_q->size() == 0, "Should have been emptied above");
4054 if (get_work_from_overflow_stack(ovflw, work_q)) {
4055 // Can't assert below because the work obtained from the
4056 // overflow stack may already have been stolen from us.
4057 // assert(work_q->size() > 0, "Work from overflow stack");
4058 continue;
4059 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4060 assert(obj_to_scan->is_oop(), "Should be an oop");
4061 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4062 obj_to_scan->oop_iterate(&cl);
4063 } else if (terminator()->offer_termination()) {
4064 assert(work_q->size() == 0, "Impossible!");
4065 break;
4066 }
4067 }
4068 }
4070 // This is run by the CMS (coordinator) thread.
4071 void CMSConcMarkingTask::coordinator_yield() {
4072 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4073 "CMS thread should hold CMS token");
4075 // First give up the locks, then yield, then re-lock
4076 // We should probably use a constructor/destructor idiom to
4077 // do this unlock/lock or modify the MutexUnlocker class to
4078 // serve our purpose. XXX
4079 assert_lock_strong(_bit_map_lock);
4080 _bit_map_lock->unlock();
4081 ConcurrentMarkSweepThread::desynchronize(true);
4082 ConcurrentMarkSweepThread::acknowledge_yield_request();
4083 _collector->stopTimer();
4084 if (PrintCMSStatistics != 0) {
4085 _collector->incrementYields();
4086 }
4087 _collector->icms_wait();
4089 // It is possible for whichever thread initiated the yield request
4090 // not to get a chance to wake up and take the bitmap lock between
4091 // this thread releasing it and reacquiring it. So, while the
4092 // should_yield() flag is on, let's sleep for a bit to give the
4093 // other thread a chance to wake up. The limit imposed on the number
4094 // of iterations is defensive, to avoid any unforseen circumstances
4095 // putting us into an infinite loop. Since it's always been this
4096 // (coordinator_yield()) method that was observed to cause the
4097 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4098 // which is by default non-zero. For the other seven methods that
4099 // also perform the yield operation, as are using a different
4100 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4101 // can enable the sleeping for those methods too, if necessary.
4102 // See 6442774.
4103 //
4104 // We really need to reconsider the synchronization between the GC
4105 // thread and the yield-requesting threads in the future and we
4106 // should really use wait/notify, which is the recommended
4107 // way of doing this type of interaction. Additionally, we should
4108 // consolidate the eight methods that do the yield operation and they
4109 // are almost identical into one for better maintenability and
4110 // readability. See 6445193.
4111 //
4112 // Tony 2006.06.29
4113 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4114 ConcurrentMarkSweepThread::should_yield() &&
4115 !CMSCollector::foregroundGCIsActive(); ++i) {
4116 os::sleep(Thread::current(), 1, false);
4117 ConcurrentMarkSweepThread::acknowledge_yield_request();
4118 }
4120 ConcurrentMarkSweepThread::synchronize(true);
4121 _bit_map_lock->lock_without_safepoint_check();
4122 _collector->startTimer();
4123 }
4125 bool CMSCollector::do_marking_mt(bool asynch) {
4126 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4127 // In the future this would be determined ergonomically, based
4128 // on #cpu's, # active mutator threads (and load), and mutation rate.
4129 int num_workers = ParallelCMSThreads;
4131 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4132 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4134 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4135 asynch, num_workers /* number requested XXX */,
4136 conc_workers(), task_queues());
4138 // Since the actual number of workers we get may be different
4139 // from the number we requested above, do we need to do anything different
4140 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4141 // class?? XXX
4142 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4143 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4145 // Refs discovery is already non-atomic.
4146 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4147 // Mutate the Refs discovery so it is MT during the
4148 // multi-threaded marking phase.
4149 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4151 conc_workers()->start_task(&tsk);
4152 while (tsk.yielded()) {
4153 tsk.coordinator_yield();
4154 conc_workers()->continue_task(&tsk);
4155 }
4156 // If the task was aborted, _restart_addr will be non-NULL
4157 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4158 while (_restart_addr != NULL) {
4159 // XXX For now we do not make use of ABORTED state and have not
4160 // yet implemented the right abort semantics (even in the original
4161 // single-threaded CMS case). That needs some more investigation
4162 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4163 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4164 // If _restart_addr is non-NULL, a marking stack overflow
4165 // occured; we need to do a fresh marking iteration from the
4166 // indicated restart address.
4167 if (_foregroundGCIsActive && asynch) {
4168 // We may be running into repeated stack overflows, having
4169 // reached the limit of the stack size, while making very
4170 // slow forward progress. It may be best to bail out and
4171 // let the foreground collector do its job.
4172 // Clear _restart_addr, so that foreground GC
4173 // works from scratch. This avoids the headache of
4174 // a "rescan" which would otherwise be needed because
4175 // of the dirty mod union table & card table.
4176 _restart_addr = NULL;
4177 return false;
4178 }
4179 // Adjust the task to restart from _restart_addr
4180 tsk.reset(_restart_addr);
4181 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4182 _restart_addr);
4183 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4184 _restart_addr);
4185 _restart_addr = NULL;
4186 // Get the workers going again
4187 conc_workers()->start_task(&tsk);
4188 while (tsk.yielded()) {
4189 tsk.coordinator_yield();
4190 conc_workers()->continue_task(&tsk);
4191 }
4192 }
4193 assert(tsk.completed(), "Inconsistency");
4194 assert(tsk.result() == true, "Inconsistency");
4195 return true;
4196 }
4198 bool CMSCollector::do_marking_st(bool asynch) {
4199 ResourceMark rm;
4200 HandleMark hm;
4202 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4203 &_markStack, &_revisitStack, CMSYield && asynch);
4204 // the last argument to iterate indicates whether the iteration
4205 // should be incremental with periodic yields.
4206 _markBitMap.iterate(&markFromRootsClosure);
4207 // If _restart_addr is non-NULL, a marking stack overflow
4208 // occured; we need to do a fresh iteration from the
4209 // indicated restart address.
4210 while (_restart_addr != NULL) {
4211 if (_foregroundGCIsActive && asynch) {
4212 // We may be running into repeated stack overflows, having
4213 // reached the limit of the stack size, while making very
4214 // slow forward progress. It may be best to bail out and
4215 // let the foreground collector do its job.
4216 // Clear _restart_addr, so that foreground GC
4217 // works from scratch. This avoids the headache of
4218 // a "rescan" which would otherwise be needed because
4219 // of the dirty mod union table & card table.
4220 _restart_addr = NULL;
4221 return false; // indicating failure to complete marking
4222 }
4223 // Deal with stack overflow:
4224 // we restart marking from _restart_addr
4225 HeapWord* ra = _restart_addr;
4226 markFromRootsClosure.reset(ra);
4227 _restart_addr = NULL;
4228 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4229 }
4230 return true;
4231 }
4233 void CMSCollector::preclean() {
4234 check_correct_thread_executing();
4235 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4236 verify_work_stacks_empty();
4237 verify_overflow_empty();
4238 _abort_preclean = false;
4239 if (CMSPrecleaningEnabled) {
4240 _eden_chunk_index = 0;
4241 size_t used = get_eden_used();
4242 size_t capacity = get_eden_capacity();
4243 // Don't start sampling unless we will get sufficiently
4244 // many samples.
4245 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4246 * CMSScheduleRemarkEdenPenetration)) {
4247 _start_sampling = true;
4248 } else {
4249 _start_sampling = false;
4250 }
4251 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4252 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4253 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4254 }
4255 CMSTokenSync x(true); // is cms thread
4256 if (CMSPrecleaningEnabled) {
4257 sample_eden();
4258 _collectorState = AbortablePreclean;
4259 } else {
4260 _collectorState = FinalMarking;
4261 }
4262 verify_work_stacks_empty();
4263 verify_overflow_empty();
4264 }
4266 // Try and schedule the remark such that young gen
4267 // occupancy is CMSScheduleRemarkEdenPenetration %.
4268 void CMSCollector::abortable_preclean() {
4269 check_correct_thread_executing();
4270 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4271 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4273 // If Eden's current occupancy is below this threshold,
4274 // immediately schedule the remark; else preclean
4275 // past the next scavenge in an effort to
4276 // schedule the pause as described avove. By choosing
4277 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4278 // we will never do an actual abortable preclean cycle.
4279 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4280 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4281 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4282 // We need more smarts in the abortable preclean
4283 // loop below to deal with cases where allocation
4284 // in young gen is very very slow, and our precleaning
4285 // is running a losing race against a horde of
4286 // mutators intent on flooding us with CMS updates
4287 // (dirty cards).
4288 // One, admittedly dumb, strategy is to give up
4289 // after a certain number of abortable precleaning loops
4290 // or after a certain maximum time. We want to make
4291 // this smarter in the next iteration.
4292 // XXX FIX ME!!! YSR
4293 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4294 while (!(should_abort_preclean() ||
4295 ConcurrentMarkSweepThread::should_terminate())) {
4296 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4297 cumworkdone += workdone;
4298 loops++;
4299 // Voluntarily terminate abortable preclean phase if we have
4300 // been at it for too long.
4301 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4302 loops >= CMSMaxAbortablePrecleanLoops) {
4303 if (PrintGCDetails) {
4304 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4305 }
4306 break;
4307 }
4308 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4309 if (PrintGCDetails) {
4310 gclog_or_tty->print(" CMS: abort preclean due to time ");
4311 }
4312 break;
4313 }
4314 // If we are doing little work each iteration, we should
4315 // take a short break.
4316 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4317 // Sleep for some time, waiting for work to accumulate
4318 stopTimer();
4319 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4320 startTimer();
4321 waited++;
4322 }
4323 }
4324 if (PrintCMSStatistics > 0) {
4325 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4326 loops, waited, cumworkdone);
4327 }
4328 }
4329 CMSTokenSync x(true); // is cms thread
4330 if (_collectorState != Idling) {
4331 assert(_collectorState == AbortablePreclean,
4332 "Spontaneous state transition?");
4333 _collectorState = FinalMarking;
4334 } // Else, a foreground collection completed this CMS cycle.
4335 return;
4336 }
4338 // Respond to an Eden sampling opportunity
4339 void CMSCollector::sample_eden() {
4340 // Make sure a young gc cannot sneak in between our
4341 // reading and recording of a sample.
4342 assert(Thread::current()->is_ConcurrentGC_thread(),
4343 "Only the cms thread may collect Eden samples");
4344 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4345 "Should collect samples while holding CMS token");
4346 if (!_start_sampling) {
4347 return;
4348 }
4349 if (_eden_chunk_array) {
4350 if (_eden_chunk_index < _eden_chunk_capacity) {
4351 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4352 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4353 "Unexpected state of Eden");
4354 // We'd like to check that what we just sampled is an oop-start address;
4355 // however, we cannot do that here since the object may not yet have been
4356 // initialized. So we'll instead do the check when we _use_ this sample
4357 // later.
4358 if (_eden_chunk_index == 0 ||
4359 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4360 _eden_chunk_array[_eden_chunk_index-1])
4361 >= CMSSamplingGrain)) {
4362 _eden_chunk_index++; // commit sample
4363 }
4364 }
4365 }
4366 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4367 size_t used = get_eden_used();
4368 size_t capacity = get_eden_capacity();
4369 assert(used <= capacity, "Unexpected state of Eden");
4370 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4371 _abort_preclean = true;
4372 }
4373 }
4374 }
4377 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4378 assert(_collectorState == Precleaning ||
4379 _collectorState == AbortablePreclean, "incorrect state");
4380 ResourceMark rm;
4381 HandleMark hm;
4382 // Do one pass of scrubbing the discovered reference lists
4383 // to remove any reference objects with strongly-reachable
4384 // referents.
4385 if (clean_refs) {
4386 ReferenceProcessor* rp = ref_processor();
4387 CMSPrecleanRefsYieldClosure yield_cl(this);
4388 assert(rp->span().equals(_span), "Spans should be equal");
4389 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4390 &_markStack);
4391 CMSDrainMarkingStackClosure complete_trace(this,
4392 _span, &_markBitMap, &_markStack,
4393 &keep_alive);
4395 // We don't want this step to interfere with a young
4396 // collection because we don't want to take CPU
4397 // or memory bandwidth away from the young GC threads
4398 // (which may be as many as there are CPUs).
4399 // Note that we don't need to protect ourselves from
4400 // interference with mutators because they can't
4401 // manipulate the discovered reference lists nor affect
4402 // the computed reachability of the referents, the
4403 // only properties manipulated by the precleaning
4404 // of these reference lists.
4405 stopTimer();
4406 CMSTokenSyncWithLocks x(true /* is cms thread */,
4407 bitMapLock());
4408 startTimer();
4409 sample_eden();
4410 // The following will yield to allow foreground
4411 // collection to proceed promptly. XXX YSR:
4412 // The code in this method may need further
4413 // tweaking for better performance and some restructuring
4414 // for cleaner interfaces.
4415 rp->preclean_discovered_references(
4416 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4417 &yield_cl);
4418 }
4420 if (clean_survivor) { // preclean the active survivor space(s)
4421 assert(_young_gen->kind() == Generation::DefNew ||
4422 _young_gen->kind() == Generation::ParNew ||
4423 _young_gen->kind() == Generation::ASParNew,
4424 "incorrect type for cast");
4425 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4426 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4427 &_markBitMap, &_modUnionTable,
4428 &_markStack, &_revisitStack,
4429 true /* precleaning phase */);
4430 stopTimer();
4431 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4432 bitMapLock());
4433 startTimer();
4434 unsigned int before_count =
4435 GenCollectedHeap::heap()->total_collections();
4436 SurvivorSpacePrecleanClosure
4437 sss_cl(this, _span, &_markBitMap, &_markStack,
4438 &pam_cl, before_count, CMSYield);
4439 dng->from()->object_iterate_careful(&sss_cl);
4440 dng->to()->object_iterate_careful(&sss_cl);
4441 }
4442 MarkRefsIntoAndScanClosure
4443 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4444 &_markStack, &_revisitStack, this, CMSYield,
4445 true /* precleaning phase */);
4446 // CAUTION: The following closure has persistent state that may need to
4447 // be reset upon a decrease in the sequence of addresses it
4448 // processes.
4449 ScanMarkedObjectsAgainCarefullyClosure
4450 smoac_cl(this, _span,
4451 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4453 // Preclean dirty cards in ModUnionTable and CardTable using
4454 // appropriate convergence criterion;
4455 // repeat CMSPrecleanIter times unless we find that
4456 // we are losing.
4457 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4458 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4459 "Bad convergence multiplier");
4460 assert(CMSPrecleanThreshold >= 100,
4461 "Unreasonably low CMSPrecleanThreshold");
4463 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4464 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4465 numIter < CMSPrecleanIter;
4466 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4467 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4468 if (CMSPermGenPrecleaningEnabled) {
4469 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4470 }
4471 if (Verbose && PrintGCDetails) {
4472 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4473 }
4474 // Either there are very few dirty cards, so re-mark
4475 // pause will be small anyway, or our pre-cleaning isn't
4476 // that much faster than the rate at which cards are being
4477 // dirtied, so we might as well stop and re-mark since
4478 // precleaning won't improve our re-mark time by much.
4479 if (curNumCards <= CMSPrecleanThreshold ||
4480 (numIter > 0 &&
4481 (curNumCards * CMSPrecleanDenominator >
4482 lastNumCards * CMSPrecleanNumerator))) {
4483 numIter++;
4484 cumNumCards += curNumCards;
4485 break;
4486 }
4487 }
4488 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4489 if (CMSPermGenPrecleaningEnabled) {
4490 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4491 }
4492 cumNumCards += curNumCards;
4493 if (PrintGCDetails && PrintCMSStatistics != 0) {
4494 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4495 curNumCards, cumNumCards, numIter);
4496 }
4497 return cumNumCards; // as a measure of useful work done
4498 }
4500 // PRECLEANING NOTES:
4501 // Precleaning involves:
4502 // . reading the bits of the modUnionTable and clearing the set bits.
4503 // . For the cards corresponding to the set bits, we scan the
4504 // objects on those cards. This means we need the free_list_lock
4505 // so that we can safely iterate over the CMS space when scanning
4506 // for oops.
4507 // . When we scan the objects, we'll be both reading and setting
4508 // marks in the marking bit map, so we'll need the marking bit map.
4509 // . For protecting _collector_state transitions, we take the CGC_lock.
4510 // Note that any races in the reading of of card table entries by the
4511 // CMS thread on the one hand and the clearing of those entries by the
4512 // VM thread or the setting of those entries by the mutator threads on the
4513 // other are quite benign. However, for efficiency it makes sense to keep
4514 // the VM thread from racing with the CMS thread while the latter is
4515 // dirty card info to the modUnionTable. We therefore also use the
4516 // CGC_lock to protect the reading of the card table and the mod union
4517 // table by the CM thread.
4518 // . We run concurrently with mutator updates, so scanning
4519 // needs to be done carefully -- we should not try to scan
4520 // potentially uninitialized objects.
4521 //
4522 // Locking strategy: While holding the CGC_lock, we scan over and
4523 // reset a maximal dirty range of the mod union / card tables, then lock
4524 // the free_list_lock and bitmap lock to do a full marking, then
4525 // release these locks; and repeat the cycle. This allows for a
4526 // certain amount of fairness in the sharing of these locks between
4527 // the CMS collector on the one hand, and the VM thread and the
4528 // mutators on the other.
4530 // NOTE: preclean_mod_union_table() and preclean_card_table()
4531 // further below are largely identical; if you need to modify
4532 // one of these methods, please check the other method too.
4534 size_t CMSCollector::preclean_mod_union_table(
4535 ConcurrentMarkSweepGeneration* gen,
4536 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4537 verify_work_stacks_empty();
4538 verify_overflow_empty();
4540 // strategy: starting with the first card, accumulate contiguous
4541 // ranges of dirty cards; clear these cards, then scan the region
4542 // covered by these cards.
4544 // Since all of the MUT is committed ahead, we can just use
4545 // that, in case the generations expand while we are precleaning.
4546 // It might also be fine to just use the committed part of the
4547 // generation, but we might potentially miss cards when the
4548 // generation is rapidly expanding while we are in the midst
4549 // of precleaning.
4550 HeapWord* startAddr = gen->reserved().start();
4551 HeapWord* endAddr = gen->reserved().end();
4553 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4555 size_t numDirtyCards, cumNumDirtyCards;
4556 HeapWord *nextAddr, *lastAddr;
4557 for (cumNumDirtyCards = numDirtyCards = 0,
4558 nextAddr = lastAddr = startAddr;
4559 nextAddr < endAddr;
4560 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4562 ResourceMark rm;
4563 HandleMark hm;
4565 MemRegion dirtyRegion;
4566 {
4567 stopTimer();
4568 CMSTokenSync ts(true);
4569 startTimer();
4570 sample_eden();
4571 // Get dirty region starting at nextOffset (inclusive),
4572 // simultaneously clearing it.
4573 dirtyRegion =
4574 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4575 assert(dirtyRegion.start() >= nextAddr,
4576 "returned region inconsistent?");
4577 }
4578 // Remember where the next search should begin.
4579 // The returned region (if non-empty) is a right open interval,
4580 // so lastOffset is obtained from the right end of that
4581 // interval.
4582 lastAddr = dirtyRegion.end();
4583 // Should do something more transparent and less hacky XXX
4584 numDirtyCards =
4585 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4587 // We'll scan the cards in the dirty region (with periodic
4588 // yields for foreground GC as needed).
4589 if (!dirtyRegion.is_empty()) {
4590 assert(numDirtyCards > 0, "consistency check");
4591 HeapWord* stop_point = NULL;
4592 {
4593 stopTimer();
4594 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4595 bitMapLock());
4596 startTimer();
4597 verify_work_stacks_empty();
4598 verify_overflow_empty();
4599 sample_eden();
4600 stop_point =
4601 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4602 }
4603 if (stop_point != NULL) {
4604 // The careful iteration stopped early either because it found an
4605 // uninitialized object, or because we were in the midst of an
4606 // "abortable preclean", which should now be aborted. Redirty
4607 // the bits corresponding to the partially-scanned or unscanned
4608 // cards. We'll either restart at the next block boundary or
4609 // abort the preclean.
4610 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4611 (_collectorState == AbortablePreclean && should_abort_preclean()),
4612 "Unparsable objects should only be in perm gen.");
4614 stopTimer();
4615 CMSTokenSyncWithLocks ts(true, bitMapLock());
4616 startTimer();
4617 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4618 if (should_abort_preclean()) {
4619 break; // out of preclean loop
4620 } else {
4621 // Compute the next address at which preclean should pick up;
4622 // might need bitMapLock in order to read P-bits.
4623 lastAddr = next_card_start_after_block(stop_point);
4624 }
4625 }
4626 } else {
4627 assert(lastAddr == endAddr, "consistency check");
4628 assert(numDirtyCards == 0, "consistency check");
4629 break;
4630 }
4631 }
4632 verify_work_stacks_empty();
4633 verify_overflow_empty();
4634 return cumNumDirtyCards;
4635 }
4637 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4638 // below are largely identical; if you need to modify
4639 // one of these methods, please check the other method too.
4641 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4642 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4643 // strategy: it's similar to precleamModUnionTable above, in that
4644 // we accumulate contiguous ranges of dirty cards, mark these cards
4645 // precleaned, then scan the region covered by these cards.
4646 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4647 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4649 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4651 size_t numDirtyCards, cumNumDirtyCards;
4652 HeapWord *lastAddr, *nextAddr;
4654 for (cumNumDirtyCards = numDirtyCards = 0,
4655 nextAddr = lastAddr = startAddr;
4656 nextAddr < endAddr;
4657 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4659 ResourceMark rm;
4660 HandleMark hm;
4662 MemRegion dirtyRegion;
4663 {
4664 // See comments in "Precleaning notes" above on why we
4665 // do this locking. XXX Could the locking overheads be
4666 // too high when dirty cards are sparse? [I don't think so.]
4667 stopTimer();
4668 CMSTokenSync x(true); // is cms thread
4669 startTimer();
4670 sample_eden();
4671 // Get and clear dirty region from card table
4672 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
4673 MemRegion(nextAddr, endAddr));
4674 assert(dirtyRegion.start() >= nextAddr,
4675 "returned region inconsistent?");
4676 }
4677 lastAddr = dirtyRegion.end();
4678 numDirtyCards =
4679 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4681 if (!dirtyRegion.is_empty()) {
4682 stopTimer();
4683 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4684 startTimer();
4685 sample_eden();
4686 verify_work_stacks_empty();
4687 verify_overflow_empty();
4688 HeapWord* stop_point =
4689 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4690 if (stop_point != NULL) {
4691 // The careful iteration stopped early because it found an
4692 // uninitialized object. Redirty the bits corresponding to the
4693 // partially-scanned or unscanned cards, and start again at the
4694 // next block boundary.
4695 assert(CMSPermGenPrecleaningEnabled ||
4696 (_collectorState == AbortablePreclean && should_abort_preclean()),
4697 "Unparsable objects should only be in perm gen.");
4698 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4699 if (should_abort_preclean()) {
4700 break; // out of preclean loop
4701 } else {
4702 // Compute the next address at which preclean should pick up.
4703 lastAddr = next_card_start_after_block(stop_point);
4704 }
4705 }
4706 } else {
4707 break;
4708 }
4709 }
4710 verify_work_stacks_empty();
4711 verify_overflow_empty();
4712 return cumNumDirtyCards;
4713 }
4715 void CMSCollector::checkpointRootsFinal(bool asynch,
4716 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4717 assert(_collectorState == FinalMarking, "incorrect state transition?");
4718 check_correct_thread_executing();
4719 // world is stopped at this checkpoint
4720 assert(SafepointSynchronize::is_at_safepoint(),
4721 "world should be stopped");
4722 verify_work_stacks_empty();
4723 verify_overflow_empty();
4725 SpecializationStats::clear();
4726 if (PrintGCDetails) {
4727 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4728 _young_gen->used() / K,
4729 _young_gen->capacity() / K);
4730 }
4731 if (asynch) {
4732 if (CMSScavengeBeforeRemark) {
4733 GenCollectedHeap* gch = GenCollectedHeap::heap();
4734 // Temporarily set flag to false, GCH->do_collection will
4735 // expect it to be false and set to true
4736 FlagSetting fl(gch->_is_gc_active, false);
4737 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4738 PrintGCDetails && Verbose, true, gclog_or_tty);)
4739 int level = _cmsGen->level() - 1;
4740 if (level >= 0) {
4741 gch->do_collection(true, // full (i.e. force, see below)
4742 false, // !clear_all_soft_refs
4743 0, // size
4744 false, // is_tlab
4745 level // max_level
4746 );
4747 }
4748 }
4749 FreelistLocker x(this);
4750 MutexLockerEx y(bitMapLock(),
4751 Mutex::_no_safepoint_check_flag);
4752 assert(!init_mark_was_synchronous, "but that's impossible!");
4753 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4754 } else {
4755 // already have all the locks
4756 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4757 init_mark_was_synchronous);
4758 }
4759 verify_work_stacks_empty();
4760 verify_overflow_empty();
4761 SpecializationStats::print();
4762 }
4764 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4765 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4767 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4769 assert(haveFreelistLocks(), "must have free list locks");
4770 assert_lock_strong(bitMapLock());
4772 if (UseAdaptiveSizePolicy) {
4773 size_policy()->checkpoint_roots_final_begin();
4774 }
4776 ResourceMark rm;
4777 HandleMark hm;
4779 GenCollectedHeap* gch = GenCollectedHeap::heap();
4781 if (should_unload_classes()) {
4782 CodeCache::gc_prologue();
4783 }
4784 assert(haveFreelistLocks(), "must have free list locks");
4785 assert_lock_strong(bitMapLock());
4787 if (!init_mark_was_synchronous) {
4788 // We might assume that we need not fill TLAB's when
4789 // CMSScavengeBeforeRemark is set, because we may have just done
4790 // a scavenge which would have filled all TLAB's -- and besides
4791 // Eden would be empty. This however may not always be the case --
4792 // for instance although we asked for a scavenge, it may not have
4793 // happened because of a JNI critical section. We probably need
4794 // a policy for deciding whether we can in that case wait until
4795 // the critical section releases and then do the remark following
4796 // the scavenge, and skip it here. In the absence of that policy,
4797 // or of an indication of whether the scavenge did indeed occur,
4798 // we cannot rely on TLAB's having been filled and must do
4799 // so here just in case a scavenge did not happen.
4800 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4801 // Update the saved marks which may affect the root scans.
4802 gch->save_marks();
4804 {
4805 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4807 // Note on the role of the mod union table:
4808 // Since the marker in "markFromRoots" marks concurrently with
4809 // mutators, it is possible for some reachable objects not to have been
4810 // scanned. For instance, an only reference to an object A was
4811 // placed in object B after the marker scanned B. Unless B is rescanned,
4812 // A would be collected. Such updates to references in marked objects
4813 // are detected via the mod union table which is the set of all cards
4814 // dirtied since the first checkpoint in this GC cycle and prior to
4815 // the most recent young generation GC, minus those cleaned up by the
4816 // concurrent precleaning.
4817 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4818 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4819 do_remark_parallel();
4820 } else {
4821 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4822 gclog_or_tty);
4823 do_remark_non_parallel();
4824 }
4825 }
4826 } else {
4827 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4828 // The initial mark was stop-world, so there's no rescanning to
4829 // do; go straight on to the next step below.
4830 }
4831 verify_work_stacks_empty();
4832 verify_overflow_empty();
4834 {
4835 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4836 refProcessingWork(asynch, clear_all_soft_refs);
4837 }
4838 verify_work_stacks_empty();
4839 verify_overflow_empty();
4841 if (should_unload_classes()) {
4842 CodeCache::gc_epilogue();
4843 }
4845 // If we encountered any (marking stack / work queue) overflow
4846 // events during the current CMS cycle, take appropriate
4847 // remedial measures, where possible, so as to try and avoid
4848 // recurrence of that condition.
4849 assert(_markStack.isEmpty(), "No grey objects");
4850 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4851 _ser_kac_ovflw;
4852 if (ser_ovflw > 0) {
4853 if (PrintCMSStatistics != 0) {
4854 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4855 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4856 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4857 _ser_kac_ovflw);
4858 }
4859 _markStack.expand();
4860 _ser_pmc_remark_ovflw = 0;
4861 _ser_pmc_preclean_ovflw = 0;
4862 _ser_kac_ovflw = 0;
4863 }
4864 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4865 if (PrintCMSStatistics != 0) {
4866 gclog_or_tty->print_cr("Work queue overflow (benign) "
4867 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4868 _par_pmc_remark_ovflw, _par_kac_ovflw);
4869 }
4870 _par_pmc_remark_ovflw = 0;
4871 _par_kac_ovflw = 0;
4872 }
4873 if (PrintCMSStatistics != 0) {
4874 if (_markStack._hit_limit > 0) {
4875 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4876 _markStack._hit_limit);
4877 }
4878 if (_markStack._failed_double > 0) {
4879 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4880 " current capacity "SIZE_FORMAT,
4881 _markStack._failed_double,
4882 _markStack.capacity());
4883 }
4884 }
4885 _markStack._hit_limit = 0;
4886 _markStack._failed_double = 0;
4888 if ((VerifyAfterGC || VerifyDuringGC) &&
4889 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4890 verify_after_remark();
4891 }
4893 // Change under the freelistLocks.
4894 _collectorState = Sweeping;
4895 // Call isAllClear() under bitMapLock
4896 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4897 " final marking");
4898 if (UseAdaptiveSizePolicy) {
4899 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4900 }
4901 }
4903 // Parallel remark task
4904 class CMSParRemarkTask: public AbstractGangTask {
4905 CMSCollector* _collector;
4906 WorkGang* _workers;
4907 int _n_workers;
4908 CompactibleFreeListSpace* _cms_space;
4909 CompactibleFreeListSpace* _perm_space;
4911 // The per-thread work queues, available here for stealing.
4912 OopTaskQueueSet* _task_queues;
4913 ParallelTaskTerminator _term;
4915 public:
4916 CMSParRemarkTask(CMSCollector* collector,
4917 CompactibleFreeListSpace* cms_space,
4918 CompactibleFreeListSpace* perm_space,
4919 int n_workers, WorkGang* workers,
4920 OopTaskQueueSet* task_queues):
4921 AbstractGangTask("Rescan roots and grey objects in parallel"),
4922 _collector(collector),
4923 _cms_space(cms_space), _perm_space(perm_space),
4924 _n_workers(n_workers),
4925 _workers(workers),
4926 _task_queues(task_queues),
4927 _term(workers->total_workers(), task_queues) { }
4929 OopTaskQueueSet* task_queues() { return _task_queues; }
4931 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4933 ParallelTaskTerminator* terminator() { return &_term; }
4935 void work(int i);
4937 private:
4938 // Work method in support of parallel rescan ... of young gen spaces
4939 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4940 ContiguousSpace* space,
4941 HeapWord** chunk_array, size_t chunk_top);
4943 // ... of dirty cards in old space
4944 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4945 Par_MarkRefsIntoAndScanClosure* cl);
4947 // ... work stealing for the above
4948 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4949 };
4951 void CMSParRemarkTask::work(int i) {
4952 elapsedTimer _timer;
4953 ResourceMark rm;
4954 HandleMark hm;
4956 // ---------- rescan from roots --------------
4957 _timer.start();
4958 GenCollectedHeap* gch = GenCollectedHeap::heap();
4959 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4960 _collector->_span, _collector->ref_processor(),
4961 &(_collector->_markBitMap),
4962 work_queue(i), &(_collector->_revisitStack));
4964 // Rescan young gen roots first since these are likely
4965 // coarsely partitioned and may, on that account, constitute
4966 // the critical path; thus, it's best to start off that
4967 // work first.
4968 // ---------- young gen roots --------------
4969 {
4970 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
4971 EdenSpace* eden_space = dng->eden();
4972 ContiguousSpace* from_space = dng->from();
4973 ContiguousSpace* to_space = dng->to();
4975 HeapWord** eca = _collector->_eden_chunk_array;
4976 size_t ect = _collector->_eden_chunk_index;
4977 HeapWord** sca = _collector->_survivor_chunk_array;
4978 size_t sct = _collector->_survivor_chunk_index;
4980 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4981 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4983 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
4984 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
4985 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
4987 _timer.stop();
4988 if (PrintCMSStatistics != 0) {
4989 gclog_or_tty->print_cr(
4990 "Finished young gen rescan work in %dth thread: %3.3f sec",
4991 i, _timer.seconds());
4992 }
4993 }
4995 // ---------- remaining roots --------------
4996 _timer.reset();
4997 _timer.start();
4998 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
4999 false, // yg was scanned above
5000 true, // collecting perm gen
5001 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5002 NULL, &par_mrias_cl);
5003 _timer.stop();
5004 if (PrintCMSStatistics != 0) {
5005 gclog_or_tty->print_cr(
5006 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5007 i, _timer.seconds());
5008 }
5010 // ---------- rescan dirty cards ------------
5011 _timer.reset();
5012 _timer.start();
5014 // Do the rescan tasks for each of the two spaces
5015 // (cms_space and perm_space) in turn.
5016 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5017 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5018 _timer.stop();
5019 if (PrintCMSStatistics != 0) {
5020 gclog_or_tty->print_cr(
5021 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5022 i, _timer.seconds());
5023 }
5025 // ---------- steal work from other threads ...
5026 // ---------- ... and drain overflow list.
5027 _timer.reset();
5028 _timer.start();
5029 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5030 _timer.stop();
5031 if (PrintCMSStatistics != 0) {
5032 gclog_or_tty->print_cr(
5033 "Finished work stealing in %dth thread: %3.3f sec",
5034 i, _timer.seconds());
5035 }
5036 }
5038 void
5039 CMSParRemarkTask::do_young_space_rescan(int i,
5040 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5041 HeapWord** chunk_array, size_t chunk_top) {
5042 // Until all tasks completed:
5043 // . claim an unclaimed task
5044 // . compute region boundaries corresponding to task claimed
5045 // using chunk_array
5046 // . par_oop_iterate(cl) over that region
5048 ResourceMark rm;
5049 HandleMark hm;
5051 SequentialSubTasksDone* pst = space->par_seq_tasks();
5052 assert(pst->valid(), "Uninitialized use?");
5054 int nth_task = 0;
5055 int n_tasks = pst->n_tasks();
5057 HeapWord *start, *end;
5058 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5059 // We claimed task # nth_task; compute its boundaries.
5060 if (chunk_top == 0) { // no samples were taken
5061 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5062 start = space->bottom();
5063 end = space->top();
5064 } else if (nth_task == 0) {
5065 start = space->bottom();
5066 end = chunk_array[nth_task];
5067 } else if (nth_task < (jint)chunk_top) {
5068 assert(nth_task >= 1, "Control point invariant");
5069 start = chunk_array[nth_task - 1];
5070 end = chunk_array[nth_task];
5071 } else {
5072 assert(nth_task == (jint)chunk_top, "Control point invariant");
5073 start = chunk_array[chunk_top - 1];
5074 end = space->top();
5075 }
5076 MemRegion mr(start, end);
5077 // Verify that mr is in space
5078 assert(mr.is_empty() || space->used_region().contains(mr),
5079 "Should be in space");
5080 // Verify that "start" is an object boundary
5081 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5082 "Should be an oop");
5083 space->par_oop_iterate(mr, cl);
5084 }
5085 pst->all_tasks_completed();
5086 }
5088 void
5089 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5090 CompactibleFreeListSpace* sp, int i,
5091 Par_MarkRefsIntoAndScanClosure* cl) {
5092 // Until all tasks completed:
5093 // . claim an unclaimed task
5094 // . compute region boundaries corresponding to task claimed
5095 // . transfer dirty bits ct->mut for that region
5096 // . apply rescanclosure to dirty mut bits for that region
5098 ResourceMark rm;
5099 HandleMark hm;
5101 OopTaskQueue* work_q = work_queue(i);
5102 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5103 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5104 // CAUTION: This closure has state that persists across calls to
5105 // the work method dirty_range_iterate_clear() in that it has
5106 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5107 // use of that state in the imbedded UpwardsObjectClosure instance
5108 // assumes that the cards are always iterated (even if in parallel
5109 // by several threads) in monotonically increasing order per each
5110 // thread. This is true of the implementation below which picks
5111 // card ranges (chunks) in monotonically increasing order globally
5112 // and, a-fortiori, in monotonically increasing order per thread
5113 // (the latter order being a subsequence of the former).
5114 // If the work code below is ever reorganized into a more chaotic
5115 // work-partitioning form than the current "sequential tasks"
5116 // paradigm, the use of that persistent state will have to be
5117 // revisited and modified appropriately. See also related
5118 // bug 4756801 work on which should examine this code to make
5119 // sure that the changes there do not run counter to the
5120 // assumptions made here and necessary for correctness and
5121 // efficiency. Note also that this code might yield inefficient
5122 // behaviour in the case of very large objects that span one or
5123 // more work chunks. Such objects would potentially be scanned
5124 // several times redundantly. Work on 4756801 should try and
5125 // address that performance anomaly if at all possible. XXX
5126 MemRegion full_span = _collector->_span;
5127 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5128 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5129 MarkFromDirtyCardsClosure
5130 greyRescanClosure(_collector, full_span, // entire span of interest
5131 sp, bm, work_q, rs, cl);
5133 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5134 assert(pst->valid(), "Uninitialized use?");
5135 int nth_task = 0;
5136 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5137 MemRegion span = sp->used_region();
5138 HeapWord* start_addr = span.start();
5139 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5140 alignment);
5141 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5142 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5143 start_addr, "Check alignment");
5144 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5145 chunk_size, "Check alignment");
5147 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5148 // Having claimed the nth_task, compute corresponding mem-region,
5149 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5150 // The alignment restriction ensures that we do not need any
5151 // synchronization with other gang-workers while setting or
5152 // clearing bits in thus chunk of the MUT.
5153 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5154 start_addr + (nth_task+1)*chunk_size);
5155 // The last chunk's end might be way beyond end of the
5156 // used region. In that case pull back appropriately.
5157 if (this_span.end() > end_addr) {
5158 this_span.set_end(end_addr);
5159 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5160 }
5161 // Iterate over the dirty cards covering this chunk, marking them
5162 // precleaned, and setting the corresponding bits in the mod union
5163 // table. Since we have been careful to partition at Card and MUT-word
5164 // boundaries no synchronization is needed between parallel threads.
5165 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5166 &modUnionClosure);
5168 // Having transferred these marks into the modUnionTable,
5169 // rescan the marked objects on the dirty cards in the modUnionTable.
5170 // Even if this is at a synchronous collection, the initial marking
5171 // may have been done during an asynchronous collection so there
5172 // may be dirty bits in the mod-union table.
5173 _collector->_modUnionTable.dirty_range_iterate_clear(
5174 this_span, &greyRescanClosure);
5175 _collector->_modUnionTable.verifyNoOneBitsInRange(
5176 this_span.start(),
5177 this_span.end());
5178 }
5179 pst->all_tasks_completed(); // declare that i am done
5180 }
5182 // . see if we can share work_queues with ParNew? XXX
5183 void
5184 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5185 int* seed) {
5186 OopTaskQueue* work_q = work_queue(i);
5187 NOT_PRODUCT(int num_steals = 0;)
5188 oop obj_to_scan;
5189 CMSBitMap* bm = &(_collector->_markBitMap);
5190 size_t num_from_overflow_list =
5191 MIN2((size_t)work_q->max_elems()/4,
5192 (size_t)ParGCDesiredObjsFromOverflowList);
5194 while (true) {
5195 // Completely finish any left over work from (an) earlier round(s)
5196 cl->trim_queue(0);
5197 // Now check if there's any work in the overflow list
5198 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5199 work_q)) {
5200 // found something in global overflow list;
5201 // not yet ready to go stealing work from others.
5202 // We'd like to assert(work_q->size() != 0, ...)
5203 // because we just took work from the overflow list,
5204 // but of course we can't since all of that could have
5205 // been already stolen from us.
5206 // "He giveth and He taketh away."
5207 continue;
5208 }
5209 // Verify that we have no work before we resort to stealing
5210 assert(work_q->size() == 0, "Have work, shouldn't steal");
5211 // Try to steal from other queues that have work
5212 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5213 NOT_PRODUCT(num_steals++;)
5214 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5215 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5216 // Do scanning work
5217 obj_to_scan->oop_iterate(cl);
5218 // Loop around, finish this work, and try to steal some more
5219 } else if (terminator()->offer_termination()) {
5220 break; // nirvana from the infinite cycle
5221 }
5222 }
5223 NOT_PRODUCT(
5224 if (PrintCMSStatistics != 0) {
5225 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5226 }
5227 )
5228 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5229 "Else our work is not yet done");
5230 }
5232 // Return a thread-local PLAB recording array, as appropriate.
5233 void* CMSCollector::get_data_recorder(int thr_num) {
5234 if (_survivor_plab_array != NULL &&
5235 (CMSPLABRecordAlways ||
5236 (_collectorState > Marking && _collectorState < FinalMarking))) {
5237 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5238 ChunkArray* ca = &_survivor_plab_array[thr_num];
5239 ca->reset(); // clear it so that fresh data is recorded
5240 return (void*) ca;
5241 } else {
5242 return NULL;
5243 }
5244 }
5246 // Reset all the thread-local PLAB recording arrays
5247 void CMSCollector::reset_survivor_plab_arrays() {
5248 for (uint i = 0; i < ParallelGCThreads; i++) {
5249 _survivor_plab_array[i].reset();
5250 }
5251 }
5253 // Merge the per-thread plab arrays into the global survivor chunk
5254 // array which will provide the partitioning of the survivor space
5255 // for CMS rescan.
5256 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5257 assert(_survivor_plab_array != NULL, "Error");
5258 assert(_survivor_chunk_array != NULL, "Error");
5259 assert(_collectorState == FinalMarking, "Error");
5260 for (uint j = 0; j < ParallelGCThreads; j++) {
5261 _cursor[j] = 0;
5262 }
5263 HeapWord* top = surv->top();
5264 size_t i;
5265 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5266 HeapWord* min_val = top; // Higher than any PLAB address
5267 uint min_tid = 0; // position of min_val this round
5268 for (uint j = 0; j < ParallelGCThreads; j++) {
5269 ChunkArray* cur_sca = &_survivor_plab_array[j];
5270 if (_cursor[j] == cur_sca->end()) {
5271 continue;
5272 }
5273 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5274 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5275 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5276 if (cur_val < min_val) {
5277 min_tid = j;
5278 min_val = cur_val;
5279 } else {
5280 assert(cur_val < top, "All recorded addresses should be less");
5281 }
5282 }
5283 // At this point min_val and min_tid are respectively
5284 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5285 // and the thread (j) that witnesses that address.
5286 // We record this address in the _survivor_chunk_array[i]
5287 // and increment _cursor[min_tid] prior to the next round i.
5288 if (min_val == top) {
5289 break;
5290 }
5291 _survivor_chunk_array[i] = min_val;
5292 _cursor[min_tid]++;
5293 }
5294 // We are all done; record the size of the _survivor_chunk_array
5295 _survivor_chunk_index = i; // exclusive: [0, i)
5296 if (PrintCMSStatistics > 0) {
5297 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5298 }
5299 // Verify that we used up all the recorded entries
5300 #ifdef ASSERT
5301 size_t total = 0;
5302 for (uint j = 0; j < ParallelGCThreads; j++) {
5303 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5304 total += _cursor[j];
5305 }
5306 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5307 // Check that the merged array is in sorted order
5308 if (total > 0) {
5309 for (size_t i = 0; i < total - 1; i++) {
5310 if (PrintCMSStatistics > 0) {
5311 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5312 i, _survivor_chunk_array[i]);
5313 }
5314 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5315 "Not sorted");
5316 }
5317 }
5318 #endif // ASSERT
5319 }
5321 // Set up the space's par_seq_tasks structure for work claiming
5322 // for parallel rescan of young gen.
5323 // See ParRescanTask where this is currently used.
5324 void
5325 CMSCollector::
5326 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5327 assert(n_threads > 0, "Unexpected n_threads argument");
5328 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5330 // Eden space
5331 {
5332 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5333 assert(!pst->valid(), "Clobbering existing data?");
5334 // Each valid entry in [0, _eden_chunk_index) represents a task.
5335 size_t n_tasks = _eden_chunk_index + 1;
5336 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5337 pst->set_par_threads(n_threads);
5338 pst->set_n_tasks((int)n_tasks);
5339 }
5341 // Merge the survivor plab arrays into _survivor_chunk_array
5342 if (_survivor_plab_array != NULL) {
5343 merge_survivor_plab_arrays(dng->from());
5344 } else {
5345 assert(_survivor_chunk_index == 0, "Error");
5346 }
5348 // To space
5349 {
5350 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5351 assert(!pst->valid(), "Clobbering existing data?");
5352 pst->set_par_threads(n_threads);
5353 pst->set_n_tasks(1);
5354 assert(pst->valid(), "Error");
5355 }
5357 // From space
5358 {
5359 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5360 assert(!pst->valid(), "Clobbering existing data?");
5361 size_t n_tasks = _survivor_chunk_index + 1;
5362 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5363 pst->set_par_threads(n_threads);
5364 pst->set_n_tasks((int)n_tasks);
5365 assert(pst->valid(), "Error");
5366 }
5367 }
5369 // Parallel version of remark
5370 void CMSCollector::do_remark_parallel() {
5371 GenCollectedHeap* gch = GenCollectedHeap::heap();
5372 WorkGang* workers = gch->workers();
5373 assert(workers != NULL, "Need parallel worker threads.");
5374 int n_workers = workers->total_workers();
5375 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5376 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5378 CMSParRemarkTask tsk(this,
5379 cms_space, perm_space,
5380 n_workers, workers, task_queues());
5382 // Set up for parallel process_strong_roots work.
5383 gch->set_par_threads(n_workers);
5384 gch->change_strong_roots_parity();
5385 // We won't be iterating over the cards in the card table updating
5386 // the younger_gen cards, so we shouldn't call the following else
5387 // the verification code as well as subsequent younger_refs_iterate
5388 // code would get confused. XXX
5389 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5391 // The young gen rescan work will not be done as part of
5392 // process_strong_roots (which currently doesn't knw how to
5393 // parallelize such a scan), but rather will be broken up into
5394 // a set of parallel tasks (via the sampling that the [abortable]
5395 // preclean phase did of EdenSpace, plus the [two] tasks of
5396 // scanning the [two] survivor spaces. Further fine-grain
5397 // parallelization of the scanning of the survivor spaces
5398 // themselves, and of precleaning of the younger gen itself
5399 // is deferred to the future.
5400 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5402 // The dirty card rescan work is broken up into a "sequence"
5403 // of parallel tasks (per constituent space) that are dynamically
5404 // claimed by the parallel threads.
5405 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5406 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5408 // It turns out that even when we're using 1 thread, doing the work in a
5409 // separate thread causes wide variance in run times. We can't help this
5410 // in the multi-threaded case, but we special-case n=1 here to get
5411 // repeatable measurements of the 1-thread overhead of the parallel code.
5412 if (n_workers > 1) {
5413 // Make refs discovery MT-safe
5414 ReferenceProcessorMTMutator mt(ref_processor(), true);
5415 workers->run_task(&tsk);
5416 } else {
5417 tsk.work(0);
5418 }
5419 gch->set_par_threads(0); // 0 ==> non-parallel.
5420 // restore, single-threaded for now, any preserved marks
5421 // as a result of work_q overflow
5422 restore_preserved_marks_if_any();
5423 }
5425 // Non-parallel version of remark
5426 void CMSCollector::do_remark_non_parallel() {
5427 ResourceMark rm;
5428 HandleMark hm;
5429 GenCollectedHeap* gch = GenCollectedHeap::heap();
5430 MarkRefsIntoAndScanClosure
5431 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5432 &_markStack, &_revisitStack, this,
5433 false /* should_yield */, false /* not precleaning */);
5434 MarkFromDirtyCardsClosure
5435 markFromDirtyCardsClosure(this, _span,
5436 NULL, // space is set further below
5437 &_markBitMap, &_markStack, &_revisitStack,
5438 &mrias_cl);
5439 {
5440 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5441 // Iterate over the dirty cards, marking them precleaned, and
5442 // setting the corresponding bits in the mod union table.
5443 {
5444 ModUnionClosure modUnionClosure(&_modUnionTable);
5445 _ct->ct_bs()->dirty_card_iterate(
5446 _cmsGen->used_region(),
5447 &modUnionClosure);
5448 _ct->ct_bs()->dirty_card_iterate(
5449 _permGen->used_region(),
5450 &modUnionClosure);
5451 }
5452 // Having transferred these marks into the modUnionTable, we just need
5453 // to rescan the marked objects on the dirty cards in the modUnionTable.
5454 // The initial marking may have been done during an asynchronous
5455 // collection so there may be dirty bits in the mod-union table.
5456 const int alignment =
5457 CardTableModRefBS::card_size * BitsPerWord;
5458 {
5459 // ... First handle dirty cards in CMS gen
5460 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5461 MemRegion ur = _cmsGen->used_region();
5462 HeapWord* lb = ur.start();
5463 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5464 MemRegion cms_span(lb, ub);
5465 _modUnionTable.dirty_range_iterate_clear(cms_span,
5466 &markFromDirtyCardsClosure);
5467 verify_work_stacks_empty();
5468 if (PrintCMSStatistics != 0) {
5469 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5470 markFromDirtyCardsClosure.num_dirty_cards());
5471 }
5472 }
5473 {
5474 // .. and then repeat for dirty cards in perm gen
5475 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5476 MemRegion ur = _permGen->used_region();
5477 HeapWord* lb = ur.start();
5478 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5479 MemRegion perm_span(lb, ub);
5480 _modUnionTable.dirty_range_iterate_clear(perm_span,
5481 &markFromDirtyCardsClosure);
5482 verify_work_stacks_empty();
5483 if (PrintCMSStatistics != 0) {
5484 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5485 markFromDirtyCardsClosure.num_dirty_cards());
5486 }
5487 }
5488 }
5489 if (VerifyDuringGC &&
5490 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5491 HandleMark hm; // Discard invalid handles created during verification
5492 Universe::verify(true);
5493 }
5494 {
5495 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5497 verify_work_stacks_empty();
5499 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5500 gch->gen_process_strong_roots(_cmsGen->level(),
5501 true, // younger gens as roots
5502 true, // collecting perm gen
5503 SharedHeap::ScanningOption(roots_scanning_options()),
5504 NULL, &mrias_cl);
5505 }
5506 verify_work_stacks_empty();
5507 // Restore evacuated mark words, if any, used for overflow list links
5508 if (!CMSOverflowEarlyRestoration) {
5509 restore_preserved_marks_if_any();
5510 }
5511 verify_overflow_empty();
5512 }
5514 ////////////////////////////////////////////////////////
5515 // Parallel Reference Processing Task Proxy Class
5516 ////////////////////////////////////////////////////////
5517 class CMSRefProcTaskProxy: public AbstractGangTask {
5518 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5519 CMSCollector* _collector;
5520 CMSBitMap* _mark_bit_map;
5521 const MemRegion _span;
5522 OopTaskQueueSet* _task_queues;
5523 ParallelTaskTerminator _term;
5524 ProcessTask& _task;
5526 public:
5527 CMSRefProcTaskProxy(ProcessTask& task,
5528 CMSCollector* collector,
5529 const MemRegion& span,
5530 CMSBitMap* mark_bit_map,
5531 int total_workers,
5532 OopTaskQueueSet* task_queues):
5533 AbstractGangTask("Process referents by policy in parallel"),
5534 _task(task),
5535 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5536 _task_queues(task_queues),
5537 _term(total_workers, task_queues)
5538 {
5539 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5540 "Inconsistency in _span");
5541 }
5543 OopTaskQueueSet* task_queues() { return _task_queues; }
5545 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5547 ParallelTaskTerminator* terminator() { return &_term; }
5549 void do_work_steal(int i,
5550 CMSParDrainMarkingStackClosure* drain,
5551 CMSParKeepAliveClosure* keep_alive,
5552 int* seed);
5554 virtual void work(int i);
5555 };
5557 void CMSRefProcTaskProxy::work(int i) {
5558 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5559 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5560 _mark_bit_map, work_queue(i));
5561 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5562 _mark_bit_map, work_queue(i));
5563 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5564 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5565 if (_task.marks_oops_alive()) {
5566 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5567 _collector->hash_seed(i));
5568 }
5569 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5570 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5571 }
5573 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5574 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5575 EnqueueTask& _task;
5577 public:
5578 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5579 : AbstractGangTask("Enqueue reference objects in parallel"),
5580 _task(task)
5581 { }
5583 virtual void work(int i)
5584 {
5585 _task.work(i);
5586 }
5587 };
5589 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5590 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5591 _collector(collector),
5592 _span(span),
5593 _bit_map(bit_map),
5594 _work_queue(work_queue),
5595 _mark_and_push(collector, span, bit_map, work_queue),
5596 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5597 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5598 { }
5600 // . see if we can share work_queues with ParNew? XXX
5601 void CMSRefProcTaskProxy::do_work_steal(int i,
5602 CMSParDrainMarkingStackClosure* drain,
5603 CMSParKeepAliveClosure* keep_alive,
5604 int* seed) {
5605 OopTaskQueue* work_q = work_queue(i);
5606 NOT_PRODUCT(int num_steals = 0;)
5607 oop obj_to_scan;
5608 size_t num_from_overflow_list =
5609 MIN2((size_t)work_q->max_elems()/4,
5610 (size_t)ParGCDesiredObjsFromOverflowList);
5612 while (true) {
5613 // Completely finish any left over work from (an) earlier round(s)
5614 drain->trim_queue(0);
5615 // Now check if there's any work in the overflow list
5616 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5617 work_q)) {
5618 // Found something in global overflow list;
5619 // not yet ready to go stealing work from others.
5620 // We'd like to assert(work_q->size() != 0, ...)
5621 // because we just took work from the overflow list,
5622 // but of course we can't, since all of that might have
5623 // been already stolen from us.
5624 continue;
5625 }
5626 // Verify that we have no work before we resort to stealing
5627 assert(work_q->size() == 0, "Have work, shouldn't steal");
5628 // Try to steal from other queues that have work
5629 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5630 NOT_PRODUCT(num_steals++;)
5631 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5632 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5633 // Do scanning work
5634 obj_to_scan->oop_iterate(keep_alive);
5635 // Loop around, finish this work, and try to steal some more
5636 } else if (terminator()->offer_termination()) {
5637 break; // nirvana from the infinite cycle
5638 }
5639 }
5640 NOT_PRODUCT(
5641 if (PrintCMSStatistics != 0) {
5642 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5643 }
5644 )
5645 }
5647 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5648 {
5649 GenCollectedHeap* gch = GenCollectedHeap::heap();
5650 WorkGang* workers = gch->workers();
5651 assert(workers != NULL, "Need parallel worker threads.");
5652 int n_workers = workers->total_workers();
5653 CMSRefProcTaskProxy rp_task(task, &_collector,
5654 _collector.ref_processor()->span(),
5655 _collector.markBitMap(),
5656 n_workers, _collector.task_queues());
5657 workers->run_task(&rp_task);
5658 }
5660 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5661 {
5663 GenCollectedHeap* gch = GenCollectedHeap::heap();
5664 WorkGang* workers = gch->workers();
5665 assert(workers != NULL, "Need parallel worker threads.");
5666 CMSRefEnqueueTaskProxy enq_task(task);
5667 workers->run_task(&enq_task);
5668 }
5670 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5672 ResourceMark rm;
5673 HandleMark hm;
5674 ReferencePolicy* soft_ref_policy;
5676 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
5677 // Process weak references.
5678 if (clear_all_soft_refs) {
5679 soft_ref_policy = new AlwaysClearPolicy();
5680 } else {
5681 #ifdef COMPILER2
5682 soft_ref_policy = new LRUMaxHeapPolicy();
5683 #else
5684 soft_ref_policy = new LRUCurrentHeapPolicy();
5685 #endif // COMPILER2
5686 }
5687 verify_work_stacks_empty();
5689 ReferenceProcessor* rp = ref_processor();
5690 assert(rp->span().equals(_span), "Spans should be equal");
5691 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5692 &_markStack);
5693 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5694 _span, &_markBitMap, &_markStack,
5695 &cmsKeepAliveClosure);
5696 {
5697 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5698 if (rp->processing_is_mt()) {
5699 CMSRefProcTaskExecutor task_executor(*this);
5700 rp->process_discovered_references(soft_ref_policy,
5701 &_is_alive_closure,
5702 &cmsKeepAliveClosure,
5703 &cmsDrainMarkingStackClosure,
5704 &task_executor);
5705 } else {
5706 rp->process_discovered_references(soft_ref_policy,
5707 &_is_alive_closure,
5708 &cmsKeepAliveClosure,
5709 &cmsDrainMarkingStackClosure,
5710 NULL);
5711 }
5712 verify_work_stacks_empty();
5713 }
5715 if (should_unload_classes()) {
5716 {
5717 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5719 // Follow SystemDictionary roots and unload classes
5720 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5722 // Follow CodeCache roots and unload any methods marked for unloading
5723 CodeCache::do_unloading(&_is_alive_closure,
5724 &cmsKeepAliveClosure,
5725 purged_class);
5727 cmsDrainMarkingStackClosure.do_void();
5728 verify_work_stacks_empty();
5730 // Update subklass/sibling/implementor links in KlassKlass descendants
5731 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5732 oop k;
5733 while ((k = _revisitStack.pop()) != NULL) {
5734 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5735 &_is_alive_closure,
5736 &cmsKeepAliveClosure);
5737 }
5738 assert(!ClassUnloading ||
5739 (_markStack.isEmpty() && overflow_list_is_empty()),
5740 "Should not have found new reachable objects");
5741 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5742 cmsDrainMarkingStackClosure.do_void();
5743 verify_work_stacks_empty();
5744 }
5746 {
5747 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5748 // Now clean up stale oops in SymbolTable and StringTable
5749 SymbolTable::unlink(&_is_alive_closure);
5750 StringTable::unlink(&_is_alive_closure);
5751 }
5752 }
5754 verify_work_stacks_empty();
5755 // Restore any preserved marks as a result of mark stack or
5756 // work queue overflow
5757 restore_preserved_marks_if_any(); // done single-threaded for now
5759 rp->set_enqueuing_is_done(true);
5760 if (rp->processing_is_mt()) {
5761 CMSRefProcTaskExecutor task_executor(*this);
5762 rp->enqueue_discovered_references(&task_executor);
5763 } else {
5764 rp->enqueue_discovered_references(NULL);
5765 }
5766 rp->verify_no_references_recorded();
5767 assert(!rp->discovery_enabled(), "should have been disabled");
5769 // JVMTI object tagging is based on JNI weak refs. If any of these
5770 // refs were cleared then JVMTI needs to update its maps and
5771 // maybe post ObjectFrees to agents.
5772 JvmtiExport::cms_ref_processing_epilogue();
5773 }
5775 #ifndef PRODUCT
5776 void CMSCollector::check_correct_thread_executing() {
5777 Thread* t = Thread::current();
5778 // Only the VM thread or the CMS thread should be here.
5779 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5780 "Unexpected thread type");
5781 // If this is the vm thread, the foreground process
5782 // should not be waiting. Note that _foregroundGCIsActive is
5783 // true while the foreground collector is waiting.
5784 if (_foregroundGCShouldWait) {
5785 // We cannot be the VM thread
5786 assert(t->is_ConcurrentGC_thread(),
5787 "Should be CMS thread");
5788 } else {
5789 // We can be the CMS thread only if we are in a stop-world
5790 // phase of CMS collection.
5791 if (t->is_ConcurrentGC_thread()) {
5792 assert(_collectorState == InitialMarking ||
5793 _collectorState == FinalMarking,
5794 "Should be a stop-world phase");
5795 // The CMS thread should be holding the CMS_token.
5796 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5797 "Potential interference with concurrently "
5798 "executing VM thread");
5799 }
5800 }
5801 }
5802 #endif
5804 void CMSCollector::sweep(bool asynch) {
5805 assert(_collectorState == Sweeping, "just checking");
5806 check_correct_thread_executing();
5807 verify_work_stacks_empty();
5808 verify_overflow_empty();
5809 incrementSweepCount();
5810 _sweep_timer.stop();
5811 _sweep_estimate.sample(_sweep_timer.seconds());
5812 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5814 // PermGen verification support: If perm gen sweeping is disabled in
5815 // this cycle, we preserve the perm gen object "deadness" information
5816 // in the perm_gen_verify_bit_map. In order to do that we traverse
5817 // all blocks in perm gen and mark all dead objects.
5818 if (verifying() && !should_unload_classes()) {
5819 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5820 "Should have already been allocated");
5821 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5822 markBitMap(), perm_gen_verify_bit_map());
5823 if (asynch) {
5824 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5825 bitMapLock());
5826 _permGen->cmsSpace()->blk_iterate(&mdo);
5827 } else {
5828 // In the case of synchronous sweep, we already have
5829 // the requisite locks/tokens.
5830 _permGen->cmsSpace()->blk_iterate(&mdo);
5831 }
5832 }
5834 if (asynch) {
5835 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5836 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5837 // First sweep the old gen then the perm gen
5838 {
5839 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5840 bitMapLock());
5841 sweepWork(_cmsGen, asynch);
5842 }
5844 // Now repeat for perm gen
5845 if (should_unload_classes()) {
5846 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5847 bitMapLock());
5848 sweepWork(_permGen, asynch);
5849 }
5851 // Update Universe::_heap_*_at_gc figures.
5852 // We need all the free list locks to make the abstract state
5853 // transition from Sweeping to Resetting. See detailed note
5854 // further below.
5855 {
5856 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5857 _permGen->freelistLock());
5858 // Update heap occupancy information which is used as
5859 // input to soft ref clearing policy at the next gc.
5860 Universe::update_heap_info_at_gc();
5861 _collectorState = Resizing;
5862 }
5863 } else {
5864 // already have needed locks
5865 sweepWork(_cmsGen, asynch);
5867 if (should_unload_classes()) {
5868 sweepWork(_permGen, asynch);
5869 }
5870 // Update heap occupancy information which is used as
5871 // input to soft ref clearing policy at the next gc.
5872 Universe::update_heap_info_at_gc();
5873 _collectorState = Resizing;
5874 }
5875 verify_work_stacks_empty();
5876 verify_overflow_empty();
5878 _sweep_timer.reset();
5879 _sweep_timer.start();
5881 update_time_of_last_gc(os::javaTimeMillis());
5883 // NOTE on abstract state transitions:
5884 // Mutators allocate-live and/or mark the mod-union table dirty
5885 // based on the state of the collection. The former is done in
5886 // the interval [Marking, Sweeping] and the latter in the interval
5887 // [Marking, Sweeping). Thus the transitions into the Marking state
5888 // and out of the Sweeping state must be synchronously visible
5889 // globally to the mutators.
5890 // The transition into the Marking state happens with the world
5891 // stopped so the mutators will globally see it. Sweeping is
5892 // done asynchronously by the background collector so the transition
5893 // from the Sweeping state to the Resizing state must be done
5894 // under the freelistLock (as is the check for whether to
5895 // allocate-live and whether to dirty the mod-union table).
5896 assert(_collectorState == Resizing, "Change of collector state to"
5897 " Resizing must be done under the freelistLocks (plural)");
5899 // Now that sweeping has been completed, if the GCH's
5900 // incremental_collection_will_fail flag is set, clear it,
5901 // thus inviting a younger gen collection to promote into
5902 // this generation. If such a promotion may still fail,
5903 // the flag will be set again when a young collection is
5904 // attempted.
5905 // I think the incremental_collection_will_fail flag's use
5906 // is specific to a 2 generation collection policy, so i'll
5907 // assert that that's the configuration we are operating within.
5908 // The use of the flag can and should be generalized appropriately
5909 // in the future to deal with a general n-generation system.
5911 GenCollectedHeap* gch = GenCollectedHeap::heap();
5912 assert(gch->collector_policy()->is_two_generation_policy(),
5913 "Resetting of incremental_collection_will_fail flag"
5914 " may be incorrect otherwise");
5915 gch->clear_incremental_collection_will_fail();
5916 gch->update_full_collections_completed(_collection_count_start);
5917 }
5919 // FIX ME!!! Looks like this belongs in CFLSpace, with
5920 // CMSGen merely delegating to it.
5921 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5922 double nearLargestPercent = 0.999;
5923 HeapWord* minAddr = _cmsSpace->bottom();
5924 HeapWord* largestAddr =
5925 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5926 if (largestAddr == 0) {
5927 // The dictionary appears to be empty. In this case
5928 // try to coalesce at the end of the heap.
5929 largestAddr = _cmsSpace->end();
5930 }
5931 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5932 size_t nearLargestOffset =
5933 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5934 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5935 }
5937 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5938 return addr >= _cmsSpace->nearLargestChunk();
5939 }
5941 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5942 return _cmsSpace->find_chunk_at_end();
5943 }
5945 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5946 bool full) {
5947 // The next lower level has been collected. Gather any statistics
5948 // that are of interest at this point.
5949 if (!full && (current_level + 1) == level()) {
5950 // Gather statistics on the young generation collection.
5951 collector()->stats().record_gc0_end(used());
5952 }
5953 }
5955 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
5956 GenCollectedHeap* gch = GenCollectedHeap::heap();
5957 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
5958 "Wrong type of heap");
5959 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
5960 gch->gen_policy()->size_policy();
5961 assert(sp->is_gc_cms_adaptive_size_policy(),
5962 "Wrong type of size policy");
5963 return sp;
5964 }
5966 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5967 if (PrintGCDetails && Verbose) {
5968 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
5969 }
5970 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
5971 _debug_collection_type =
5972 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
5973 if (PrintGCDetails && Verbose) {
5974 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
5975 }
5976 }
5978 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
5979 bool asynch) {
5980 // We iterate over the space(s) underlying this generation,
5981 // checking the mark bit map to see if the bits corresponding
5982 // to specific blocks are marked or not. Blocks that are
5983 // marked are live and are not swept up. All remaining blocks
5984 // are swept up, with coalescing on-the-fly as we sweep up
5985 // contiguous free and/or garbage blocks:
5986 // We need to ensure that the sweeper synchronizes with allocators
5987 // and stop-the-world collectors. In particular, the following
5988 // locks are used:
5989 // . CMS token: if this is held, a stop the world collection cannot occur
5990 // . freelistLock: if this is held no allocation can occur from this
5991 // generation by another thread
5992 // . bitMapLock: if this is held, no other thread can access or update
5993 //
5995 // Note that we need to hold the freelistLock if we use
5996 // block iterate below; else the iterator might go awry if
5997 // a mutator (or promotion) causes block contents to change
5998 // (for instance if the allocator divvies up a block).
5999 // If we hold the free list lock, for all practical purposes
6000 // young generation GC's can't occur (they'll usually need to
6001 // promote), so we might as well prevent all young generation
6002 // GC's while we do a sweeping step. For the same reason, we might
6003 // as well take the bit map lock for the entire duration
6005 // check that we hold the requisite locks
6006 assert(have_cms_token(), "Should hold cms token");
6007 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6008 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6009 "Should possess CMS token to sweep");
6010 assert_lock_strong(gen->freelistLock());
6011 assert_lock_strong(bitMapLock());
6013 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
6014 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
6015 _sweep_estimate.padded_average());
6016 gen->setNearLargestChunk();
6018 {
6019 SweepClosure sweepClosure(this, gen, &_markBitMap,
6020 CMSYield && asynch);
6021 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6022 // We need to free-up/coalesce garbage/blocks from a
6023 // co-terminal free run. This is done in the SweepClosure
6024 // destructor; so, do not remove this scope, else the
6025 // end-of-sweep-census below will be off by a little bit.
6026 }
6027 gen->cmsSpace()->sweep_completed();
6028 gen->cmsSpace()->endSweepFLCensus(sweepCount());
6029 if (should_unload_classes()) { // unloaded classes this cycle,
6030 _concurrent_cycles_since_last_unload = 0; // ... reset count
6031 } else { // did not unload classes,
6032 _concurrent_cycles_since_last_unload++; // ... increment count
6033 }
6034 }
6036 // Reset CMS data structures (for now just the marking bit map)
6037 // preparatory for the next cycle.
6038 void CMSCollector::reset(bool asynch) {
6039 GenCollectedHeap* gch = GenCollectedHeap::heap();
6040 CMSAdaptiveSizePolicy* sp = size_policy();
6041 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6042 if (asynch) {
6043 CMSTokenSyncWithLocks ts(true, bitMapLock());
6045 // If the state is not "Resetting", the foreground thread
6046 // has done a collection and the resetting.
6047 if (_collectorState != Resetting) {
6048 assert(_collectorState == Idling, "The state should only change"
6049 " because the foreground collector has finished the collection");
6050 return;
6051 }
6053 // Clear the mark bitmap (no grey objects to start with)
6054 // for the next cycle.
6055 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6056 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6058 HeapWord* curAddr = _markBitMap.startWord();
6059 while (curAddr < _markBitMap.endWord()) {
6060 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6061 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6062 _markBitMap.clear_large_range(chunk);
6063 if (ConcurrentMarkSweepThread::should_yield() &&
6064 !foregroundGCIsActive() &&
6065 CMSYield) {
6066 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6067 "CMS thread should hold CMS token");
6068 assert_lock_strong(bitMapLock());
6069 bitMapLock()->unlock();
6070 ConcurrentMarkSweepThread::desynchronize(true);
6071 ConcurrentMarkSweepThread::acknowledge_yield_request();
6072 stopTimer();
6073 if (PrintCMSStatistics != 0) {
6074 incrementYields();
6075 }
6076 icms_wait();
6078 // See the comment in coordinator_yield()
6079 for (unsigned i = 0; i < CMSYieldSleepCount &&
6080 ConcurrentMarkSweepThread::should_yield() &&
6081 !CMSCollector::foregroundGCIsActive(); ++i) {
6082 os::sleep(Thread::current(), 1, false);
6083 ConcurrentMarkSweepThread::acknowledge_yield_request();
6084 }
6086 ConcurrentMarkSweepThread::synchronize(true);
6087 bitMapLock()->lock_without_safepoint_check();
6088 startTimer();
6089 }
6090 curAddr = chunk.end();
6091 }
6092 _collectorState = Idling;
6093 } else {
6094 // already have the lock
6095 assert(_collectorState == Resetting, "just checking");
6096 assert_lock_strong(bitMapLock());
6097 _markBitMap.clear_all();
6098 _collectorState = Idling;
6099 }
6101 // Stop incremental mode after a cycle completes, so that any future cycles
6102 // are triggered by allocation.
6103 stop_icms();
6105 NOT_PRODUCT(
6106 if (RotateCMSCollectionTypes) {
6107 _cmsGen->rotate_debug_collection_type();
6108 }
6109 )
6110 }
6112 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6113 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6114 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6115 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6116 TraceCollectorStats tcs(counters());
6118 switch (op) {
6119 case CMS_op_checkpointRootsInitial: {
6120 checkpointRootsInitial(true); // asynch
6121 if (PrintGC) {
6122 _cmsGen->printOccupancy("initial-mark");
6123 }
6124 break;
6125 }
6126 case CMS_op_checkpointRootsFinal: {
6127 checkpointRootsFinal(true, // asynch
6128 false, // !clear_all_soft_refs
6129 false); // !init_mark_was_synchronous
6130 if (PrintGC) {
6131 _cmsGen->printOccupancy("remark");
6132 }
6133 break;
6134 }
6135 default:
6136 fatal("No such CMS_op");
6137 }
6138 }
6140 #ifndef PRODUCT
6141 size_t const CMSCollector::skip_header_HeapWords() {
6142 return FreeChunk::header_size();
6143 }
6145 // Try and collect here conditions that should hold when
6146 // CMS thread is exiting. The idea is that the foreground GC
6147 // thread should not be blocked if it wants to terminate
6148 // the CMS thread and yet continue to run the VM for a while
6149 // after that.
6150 void CMSCollector::verify_ok_to_terminate() const {
6151 assert(Thread::current()->is_ConcurrentGC_thread(),
6152 "should be called by CMS thread");
6153 assert(!_foregroundGCShouldWait, "should be false");
6154 // We could check here that all the various low-level locks
6155 // are not held by the CMS thread, but that is overkill; see
6156 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6157 // is checked.
6158 }
6159 #endif
6161 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6162 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6163 "missing Printezis mark?");
6164 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6165 size_t size = pointer_delta(nextOneAddr + 1, addr);
6166 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6167 "alignment problem");
6168 assert(size >= 3, "Necessary for Printezis marks to work");
6169 return size;
6170 }
6172 // A variant of the above (block_size_using_printezis_bits()) except
6173 // that we return 0 if the P-bits are not yet set.
6174 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6175 if (_markBitMap.isMarked(addr)) {
6176 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6177 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6178 size_t size = pointer_delta(nextOneAddr + 1, addr);
6179 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6180 "alignment problem");
6181 assert(size >= 3, "Necessary for Printezis marks to work");
6182 return size;
6183 } else {
6184 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6185 return 0;
6186 }
6187 }
6189 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6190 size_t sz = 0;
6191 oop p = (oop)addr;
6192 if (p->klass_or_null() != NULL && p->is_parsable()) {
6193 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6194 } else {
6195 sz = block_size_using_printezis_bits(addr);
6196 }
6197 assert(sz > 0, "size must be nonzero");
6198 HeapWord* next_block = addr + sz;
6199 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6200 CardTableModRefBS::card_size);
6201 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6202 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6203 "must be different cards");
6204 return next_card;
6205 }
6208 // CMS Bit Map Wrapper /////////////////////////////////////////
6210 // Construct a CMS bit map infrastructure, but don't create the
6211 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6212 // further below.
6213 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6214 _bm(NULL,0),
6215 _shifter(shifter),
6216 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6217 {
6218 _bmStartWord = 0;
6219 _bmWordSize = 0;
6220 }
6222 bool CMSBitMap::allocate(MemRegion mr) {
6223 _bmStartWord = mr.start();
6224 _bmWordSize = mr.word_size();
6225 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6226 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6227 if (!brs.is_reserved()) {
6228 warning("CMS bit map allocation failure");
6229 return false;
6230 }
6231 // For now we'll just commit all of the bit map up fromt.
6232 // Later on we'll try to be more parsimonious with swap.
6233 if (!_virtual_space.initialize(brs, brs.size())) {
6234 warning("CMS bit map backing store failure");
6235 return false;
6236 }
6237 assert(_virtual_space.committed_size() == brs.size(),
6238 "didn't reserve backing store for all of CMS bit map?");
6239 _bm.set_map((uintptr_t*)_virtual_space.low());
6240 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6241 _bmWordSize, "inconsistency in bit map sizing");
6242 _bm.set_size(_bmWordSize >> _shifter);
6244 // bm.clear(); // can we rely on getting zero'd memory? verify below
6245 assert(isAllClear(),
6246 "Expected zero'd memory from ReservedSpace constructor");
6247 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6248 "consistency check");
6249 return true;
6250 }
6252 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6253 HeapWord *next_addr, *end_addr, *last_addr;
6254 assert_locked();
6255 assert(covers(mr), "out-of-range error");
6256 // XXX assert that start and end are appropriately aligned
6257 for (next_addr = mr.start(), end_addr = mr.end();
6258 next_addr < end_addr; next_addr = last_addr) {
6259 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6260 last_addr = dirty_region.end();
6261 if (!dirty_region.is_empty()) {
6262 cl->do_MemRegion(dirty_region);
6263 } else {
6264 assert(last_addr == end_addr, "program logic");
6265 return;
6266 }
6267 }
6268 }
6270 #ifndef PRODUCT
6271 void CMSBitMap::assert_locked() const {
6272 CMSLockVerifier::assert_locked(lock());
6273 }
6275 bool CMSBitMap::covers(MemRegion mr) const {
6276 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6277 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6278 "size inconsistency");
6279 return (mr.start() >= _bmStartWord) &&
6280 (mr.end() <= endWord());
6281 }
6283 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6284 return (start >= _bmStartWord && (start + size) <= endWord());
6285 }
6287 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6288 // verify that there are no 1 bits in the interval [left, right)
6289 FalseBitMapClosure falseBitMapClosure;
6290 iterate(&falseBitMapClosure, left, right);
6291 }
6293 void CMSBitMap::region_invariant(MemRegion mr)
6294 {
6295 assert_locked();
6296 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6297 assert(!mr.is_empty(), "unexpected empty region");
6298 assert(covers(mr), "mr should be covered by bit map");
6299 // convert address range into offset range
6300 size_t start_ofs = heapWordToOffset(mr.start());
6301 // Make sure that end() is appropriately aligned
6302 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6303 (1 << (_shifter+LogHeapWordSize))),
6304 "Misaligned mr.end()");
6305 size_t end_ofs = heapWordToOffset(mr.end());
6306 assert(end_ofs > start_ofs, "Should mark at least one bit");
6307 }
6309 #endif
6311 bool CMSMarkStack::allocate(size_t size) {
6312 // allocate a stack of the requisite depth
6313 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6314 size * sizeof(oop)));
6315 if (!rs.is_reserved()) {
6316 warning("CMSMarkStack allocation failure");
6317 return false;
6318 }
6319 if (!_virtual_space.initialize(rs, rs.size())) {
6320 warning("CMSMarkStack backing store failure");
6321 return false;
6322 }
6323 assert(_virtual_space.committed_size() == rs.size(),
6324 "didn't reserve backing store for all of CMS stack?");
6325 _base = (oop*)(_virtual_space.low());
6326 _index = 0;
6327 _capacity = size;
6328 NOT_PRODUCT(_max_depth = 0);
6329 return true;
6330 }
6332 // XXX FIX ME !!! In the MT case we come in here holding a
6333 // leaf lock. For printing we need to take a further lock
6334 // which has lower rank. We need to recallibrate the two
6335 // lock-ranks involved in order to be able to rpint the
6336 // messages below. (Or defer the printing to the caller.
6337 // For now we take the expedient path of just disabling the
6338 // messages for the problematic case.)
6339 void CMSMarkStack::expand() {
6340 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6341 if (_capacity == CMSMarkStackSizeMax) {
6342 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6343 // We print a warning message only once per CMS cycle.
6344 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6345 }
6346 return;
6347 }
6348 // Double capacity if possible
6349 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6350 // Do not give up existing stack until we have managed to
6351 // get the double capacity that we desired.
6352 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6353 new_capacity * sizeof(oop)));
6354 if (rs.is_reserved()) {
6355 // Release the backing store associated with old stack
6356 _virtual_space.release();
6357 // Reinitialize virtual space for new stack
6358 if (!_virtual_space.initialize(rs, rs.size())) {
6359 fatal("Not enough swap for expanded marking stack");
6360 }
6361 _base = (oop*)(_virtual_space.low());
6362 _index = 0;
6363 _capacity = new_capacity;
6364 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6365 // Failed to double capacity, continue;
6366 // we print a detail message only once per CMS cycle.
6367 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6368 SIZE_FORMAT"K",
6369 _capacity / K, new_capacity / K);
6370 }
6371 }
6374 // Closures
6375 // XXX: there seems to be a lot of code duplication here;
6376 // should refactor and consolidate common code.
6378 // This closure is used to mark refs into the CMS generation in
6379 // the CMS bit map. Called at the first checkpoint. This closure
6380 // assumes that we do not need to re-mark dirty cards; if the CMS
6381 // generation on which this is used is not an oldest (modulo perm gen)
6382 // generation then this will lose younger_gen cards!
6384 MarkRefsIntoClosure::MarkRefsIntoClosure(
6385 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
6386 _span(span),
6387 _bitMap(bitMap),
6388 _should_do_nmethods(should_do_nmethods)
6389 {
6390 assert(_ref_processor == NULL, "deliberately left NULL");
6391 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6392 }
6394 void MarkRefsIntoClosure::do_oop(oop obj) {
6395 // if p points into _span, then mark corresponding bit in _markBitMap
6396 assert(obj->is_oop(), "expected an oop");
6397 HeapWord* addr = (HeapWord*)obj;
6398 if (_span.contains(addr)) {
6399 // this should be made more efficient
6400 _bitMap->mark(addr);
6401 }
6402 }
6404 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6405 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6407 // A variant of the above, used for CMS marking verification.
6408 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6409 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6410 bool should_do_nmethods):
6411 _span(span),
6412 _verification_bm(verification_bm),
6413 _cms_bm(cms_bm),
6414 _should_do_nmethods(should_do_nmethods) {
6415 assert(_ref_processor == NULL, "deliberately left NULL");
6416 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6417 }
6419 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6420 // if p points into _span, then mark corresponding bit in _markBitMap
6421 assert(obj->is_oop(), "expected an oop");
6422 HeapWord* addr = (HeapWord*)obj;
6423 if (_span.contains(addr)) {
6424 _verification_bm->mark(addr);
6425 if (!_cms_bm->isMarked(addr)) {
6426 oop(addr)->print();
6427 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6428 fatal("... aborting");
6429 }
6430 }
6431 }
6433 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6434 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6436 //////////////////////////////////////////////////
6437 // MarkRefsIntoAndScanClosure
6438 //////////////////////////////////////////////////
6440 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6441 ReferenceProcessor* rp,
6442 CMSBitMap* bit_map,
6443 CMSBitMap* mod_union_table,
6444 CMSMarkStack* mark_stack,
6445 CMSMarkStack* revisit_stack,
6446 CMSCollector* collector,
6447 bool should_yield,
6448 bool concurrent_precleaning):
6449 _collector(collector),
6450 _span(span),
6451 _bit_map(bit_map),
6452 _mark_stack(mark_stack),
6453 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6454 mark_stack, revisit_stack, concurrent_precleaning),
6455 _yield(should_yield),
6456 _concurrent_precleaning(concurrent_precleaning),
6457 _freelistLock(NULL)
6458 {
6459 _ref_processor = rp;
6460 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6461 }
6463 // This closure is used to mark refs into the CMS generation at the
6464 // second (final) checkpoint, and to scan and transitively follow
6465 // the unmarked oops. It is also used during the concurrent precleaning
6466 // phase while scanning objects on dirty cards in the CMS generation.
6467 // The marks are made in the marking bit map and the marking stack is
6468 // used for keeping the (newly) grey objects during the scan.
6469 // The parallel version (Par_...) appears further below.
6470 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6471 if (obj != NULL) {
6472 assert(obj->is_oop(), "expected an oop");
6473 HeapWord* addr = (HeapWord*)obj;
6474 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6475 assert(_collector->overflow_list_is_empty(),
6476 "overflow list should be empty");
6477 if (_span.contains(addr) &&
6478 !_bit_map->isMarked(addr)) {
6479 // mark bit map (object is now grey)
6480 _bit_map->mark(addr);
6481 // push on marking stack (stack should be empty), and drain the
6482 // stack by applying this closure to the oops in the oops popped
6483 // from the stack (i.e. blacken the grey objects)
6484 bool res = _mark_stack->push(obj);
6485 assert(res, "Should have space to push on empty stack");
6486 do {
6487 oop new_oop = _mark_stack->pop();
6488 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6489 assert(new_oop->is_parsable(), "Found unparsable oop");
6490 assert(_bit_map->isMarked((HeapWord*)new_oop),
6491 "only grey objects on this stack");
6492 // iterate over the oops in this oop, marking and pushing
6493 // the ones in CMS heap (i.e. in _span).
6494 new_oop->oop_iterate(&_pushAndMarkClosure);
6495 // check if it's time to yield
6496 do_yield_check();
6497 } while (!_mark_stack->isEmpty() ||
6498 (!_concurrent_precleaning && take_from_overflow_list()));
6499 // if marking stack is empty, and we are not doing this
6500 // during precleaning, then check the overflow list
6501 }
6502 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6503 assert(_collector->overflow_list_is_empty(),
6504 "overflow list was drained above");
6505 // We could restore evacuated mark words, if any, used for
6506 // overflow list links here because the overflow list is
6507 // provably empty here. That would reduce the maximum
6508 // size requirements for preserved_{oop,mark}_stack.
6509 // But we'll just postpone it until we are all done
6510 // so we can just stream through.
6511 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6512 _collector->restore_preserved_marks_if_any();
6513 assert(_collector->no_preserved_marks(), "No preserved marks");
6514 }
6515 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6516 "All preserved marks should have been restored above");
6517 }
6518 }
6520 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6521 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6523 void MarkRefsIntoAndScanClosure::do_yield_work() {
6524 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6525 "CMS thread should hold CMS token");
6526 assert_lock_strong(_freelistLock);
6527 assert_lock_strong(_bit_map->lock());
6528 // relinquish the free_list_lock and bitMaplock()
6529 _bit_map->lock()->unlock();
6530 _freelistLock->unlock();
6531 ConcurrentMarkSweepThread::desynchronize(true);
6532 ConcurrentMarkSweepThread::acknowledge_yield_request();
6533 _collector->stopTimer();
6534 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6535 if (PrintCMSStatistics != 0) {
6536 _collector->incrementYields();
6537 }
6538 _collector->icms_wait();
6540 // See the comment in coordinator_yield()
6541 for (unsigned i = 0;
6542 i < CMSYieldSleepCount &&
6543 ConcurrentMarkSweepThread::should_yield() &&
6544 !CMSCollector::foregroundGCIsActive();
6545 ++i) {
6546 os::sleep(Thread::current(), 1, false);
6547 ConcurrentMarkSweepThread::acknowledge_yield_request();
6548 }
6550 ConcurrentMarkSweepThread::synchronize(true);
6551 _freelistLock->lock_without_safepoint_check();
6552 _bit_map->lock()->lock_without_safepoint_check();
6553 _collector->startTimer();
6554 }
6556 ///////////////////////////////////////////////////////////
6557 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6558 // MarkRefsIntoAndScanClosure
6559 ///////////////////////////////////////////////////////////
6560 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6561 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6562 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6563 _span(span),
6564 _bit_map(bit_map),
6565 _work_queue(work_queue),
6566 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6567 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6568 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6569 revisit_stack)
6570 {
6571 _ref_processor = rp;
6572 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6573 }
6575 // This closure is used to mark refs into the CMS generation at the
6576 // second (final) checkpoint, and to scan and transitively follow
6577 // the unmarked oops. The marks are made in the marking bit map and
6578 // the work_queue is used for keeping the (newly) grey objects during
6579 // the scan phase whence they are also available for stealing by parallel
6580 // threads. Since the marking bit map is shared, updates are
6581 // synchronized (via CAS).
6582 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6583 if (obj != NULL) {
6584 // Ignore mark word because this could be an already marked oop
6585 // that may be chained at the end of the overflow list.
6586 assert(obj->is_oop(true), "expected an oop");
6587 HeapWord* addr = (HeapWord*)obj;
6588 if (_span.contains(addr) &&
6589 !_bit_map->isMarked(addr)) {
6590 // mark bit map (object will become grey):
6591 // It is possible for several threads to be
6592 // trying to "claim" this object concurrently;
6593 // the unique thread that succeeds in marking the
6594 // object first will do the subsequent push on
6595 // to the work queue (or overflow list).
6596 if (_bit_map->par_mark(addr)) {
6597 // push on work_queue (which may not be empty), and trim the
6598 // queue to an appropriate length by applying this closure to
6599 // the oops in the oops popped from the stack (i.e. blacken the
6600 // grey objects)
6601 bool res = _work_queue->push(obj);
6602 assert(res, "Low water mark should be less than capacity?");
6603 trim_queue(_low_water_mark);
6604 } // Else, another thread claimed the object
6605 }
6606 }
6607 }
6609 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6610 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6612 // This closure is used to rescan the marked objects on the dirty cards
6613 // in the mod union table and the card table proper.
6614 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6615 oop p, MemRegion mr) {
6617 size_t size = 0;
6618 HeapWord* addr = (HeapWord*)p;
6619 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6620 assert(_span.contains(addr), "we are scanning the CMS generation");
6621 // check if it's time to yield
6622 if (do_yield_check()) {
6623 // We yielded for some foreground stop-world work,
6624 // and we have been asked to abort this ongoing preclean cycle.
6625 return 0;
6626 }
6627 if (_bitMap->isMarked(addr)) {
6628 // it's marked; is it potentially uninitialized?
6629 if (p->klass_or_null() != NULL) {
6630 if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
6631 // Signal precleaning to redirty the card since
6632 // the klass pointer is already installed.
6633 assert(size == 0, "Initial value");
6634 } else {
6635 assert(p->is_parsable(), "must be parsable.");
6636 // an initialized object; ignore mark word in verification below
6637 // since we are running concurrent with mutators
6638 assert(p->is_oop(true), "should be an oop");
6639 if (p->is_objArray()) {
6640 // objArrays are precisely marked; restrict scanning
6641 // to dirty cards only.
6642 size = CompactibleFreeListSpace::adjustObjectSize(
6643 p->oop_iterate(_scanningClosure, mr));
6644 } else {
6645 // A non-array may have been imprecisely marked; we need
6646 // to scan object in its entirety.
6647 size = CompactibleFreeListSpace::adjustObjectSize(
6648 p->oop_iterate(_scanningClosure));
6649 }
6650 #ifdef DEBUG
6651 size_t direct_size =
6652 CompactibleFreeListSpace::adjustObjectSize(p->size());
6653 assert(size == direct_size, "Inconsistency in size");
6654 assert(size >= 3, "Necessary for Printezis marks to work");
6655 if (!_bitMap->isMarked(addr+1)) {
6656 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6657 } else {
6658 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6659 assert(_bitMap->isMarked(addr+size-1),
6660 "inconsistent Printezis mark");
6661 }
6662 #endif // DEBUG
6663 }
6664 } else {
6665 // an unitialized object
6666 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6667 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6668 size = pointer_delta(nextOneAddr + 1, addr);
6669 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6670 "alignment problem");
6671 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6672 // will dirty the card when the klass pointer is installed in the
6673 // object (signalling the completion of initialization).
6674 }
6675 } else {
6676 // Either a not yet marked object or an uninitialized object
6677 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6678 // An uninitialized object, skip to the next card, since
6679 // we may not be able to read its P-bits yet.
6680 assert(size == 0, "Initial value");
6681 } else {
6682 // An object not (yet) reached by marking: we merely need to
6683 // compute its size so as to go look at the next block.
6684 assert(p->is_oop(true), "should be an oop");
6685 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6686 }
6687 }
6688 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6689 return size;
6690 }
6692 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6693 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6694 "CMS thread should hold CMS token");
6695 assert_lock_strong(_freelistLock);
6696 assert_lock_strong(_bitMap->lock());
6697 // relinquish the free_list_lock and bitMaplock()
6698 _bitMap->lock()->unlock();
6699 _freelistLock->unlock();
6700 ConcurrentMarkSweepThread::desynchronize(true);
6701 ConcurrentMarkSweepThread::acknowledge_yield_request();
6702 _collector->stopTimer();
6703 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6704 if (PrintCMSStatistics != 0) {
6705 _collector->incrementYields();
6706 }
6707 _collector->icms_wait();
6709 // See the comment in coordinator_yield()
6710 for (unsigned i = 0; i < CMSYieldSleepCount &&
6711 ConcurrentMarkSweepThread::should_yield() &&
6712 !CMSCollector::foregroundGCIsActive(); ++i) {
6713 os::sleep(Thread::current(), 1, false);
6714 ConcurrentMarkSweepThread::acknowledge_yield_request();
6715 }
6717 ConcurrentMarkSweepThread::synchronize(true);
6718 _freelistLock->lock_without_safepoint_check();
6719 _bitMap->lock()->lock_without_safepoint_check();
6720 _collector->startTimer();
6721 }
6724 //////////////////////////////////////////////////////////////////
6725 // SurvivorSpacePrecleanClosure
6726 //////////////////////////////////////////////////////////////////
6727 // This (single-threaded) closure is used to preclean the oops in
6728 // the survivor spaces.
6729 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6731 HeapWord* addr = (HeapWord*)p;
6732 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6733 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6734 assert(p->klass_or_null() != NULL, "object should be initializd");
6735 assert(p->is_parsable(), "must be parsable.");
6736 // an initialized object; ignore mark word in verification below
6737 // since we are running concurrent with mutators
6738 assert(p->is_oop(true), "should be an oop");
6739 // Note that we do not yield while we iterate over
6740 // the interior oops of p, pushing the relevant ones
6741 // on our marking stack.
6742 size_t size = p->oop_iterate(_scanning_closure);
6743 do_yield_check();
6744 // Observe that below, we do not abandon the preclean
6745 // phase as soon as we should; rather we empty the
6746 // marking stack before returning. This is to satisfy
6747 // some existing assertions. In general, it may be a
6748 // good idea to abort immediately and complete the marking
6749 // from the grey objects at a later time.
6750 while (!_mark_stack->isEmpty()) {
6751 oop new_oop = _mark_stack->pop();
6752 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6753 assert(new_oop->is_parsable(), "Found unparsable oop");
6754 assert(_bit_map->isMarked((HeapWord*)new_oop),
6755 "only grey objects on this stack");
6756 // iterate over the oops in this oop, marking and pushing
6757 // the ones in CMS heap (i.e. in _span).
6758 new_oop->oop_iterate(_scanning_closure);
6759 // check if it's time to yield
6760 do_yield_check();
6761 }
6762 unsigned int after_count =
6763 GenCollectedHeap::heap()->total_collections();
6764 bool abort = (_before_count != after_count) ||
6765 _collector->should_abort_preclean();
6766 return abort ? 0 : size;
6767 }
6769 void SurvivorSpacePrecleanClosure::do_yield_work() {
6770 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6771 "CMS thread should hold CMS token");
6772 assert_lock_strong(_bit_map->lock());
6773 // Relinquish the bit map lock
6774 _bit_map->lock()->unlock();
6775 ConcurrentMarkSweepThread::desynchronize(true);
6776 ConcurrentMarkSweepThread::acknowledge_yield_request();
6777 _collector->stopTimer();
6778 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6779 if (PrintCMSStatistics != 0) {
6780 _collector->incrementYields();
6781 }
6782 _collector->icms_wait();
6784 // See the comment in coordinator_yield()
6785 for (unsigned i = 0; i < CMSYieldSleepCount &&
6786 ConcurrentMarkSweepThread::should_yield() &&
6787 !CMSCollector::foregroundGCIsActive(); ++i) {
6788 os::sleep(Thread::current(), 1, false);
6789 ConcurrentMarkSweepThread::acknowledge_yield_request();
6790 }
6792 ConcurrentMarkSweepThread::synchronize(true);
6793 _bit_map->lock()->lock_without_safepoint_check();
6794 _collector->startTimer();
6795 }
6797 // This closure is used to rescan the marked objects on the dirty cards
6798 // in the mod union table and the card table proper. In the parallel
6799 // case, although the bitMap is shared, we do a single read so the
6800 // isMarked() query is "safe".
6801 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6802 // Ignore mark word because we are running concurrent with mutators
6803 assert(p->is_oop_or_null(true), "expected an oop or null");
6804 HeapWord* addr = (HeapWord*)p;
6805 assert(_span.contains(addr), "we are scanning the CMS generation");
6806 bool is_obj_array = false;
6807 #ifdef DEBUG
6808 if (!_parallel) {
6809 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6810 assert(_collector->overflow_list_is_empty(),
6811 "overflow list should be empty");
6813 }
6814 #endif // DEBUG
6815 if (_bit_map->isMarked(addr)) {
6816 // Obj arrays are precisely marked, non-arrays are not;
6817 // so we scan objArrays precisely and non-arrays in their
6818 // entirety.
6819 if (p->is_objArray()) {
6820 is_obj_array = true;
6821 if (_parallel) {
6822 p->oop_iterate(_par_scan_closure, mr);
6823 } else {
6824 p->oop_iterate(_scan_closure, mr);
6825 }
6826 } else {
6827 if (_parallel) {
6828 p->oop_iterate(_par_scan_closure);
6829 } else {
6830 p->oop_iterate(_scan_closure);
6831 }
6832 }
6833 }
6834 #ifdef DEBUG
6835 if (!_parallel) {
6836 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6837 assert(_collector->overflow_list_is_empty(),
6838 "overflow list should be empty");
6840 }
6841 #endif // DEBUG
6842 return is_obj_array;
6843 }
6845 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6846 MemRegion span,
6847 CMSBitMap* bitMap, CMSMarkStack* markStack,
6848 CMSMarkStack* revisitStack,
6849 bool should_yield, bool verifying):
6850 _collector(collector),
6851 _span(span),
6852 _bitMap(bitMap),
6853 _mut(&collector->_modUnionTable),
6854 _markStack(markStack),
6855 _revisitStack(revisitStack),
6856 _yield(should_yield),
6857 _skipBits(0)
6858 {
6859 assert(_markStack->isEmpty(), "stack should be empty");
6860 _finger = _bitMap->startWord();
6861 _threshold = _finger;
6862 assert(_collector->_restart_addr == NULL, "Sanity check");
6863 assert(_span.contains(_finger), "Out of bounds _finger?");
6864 DEBUG_ONLY(_verifying = verifying;)
6865 }
6867 void MarkFromRootsClosure::reset(HeapWord* addr) {
6868 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6869 assert(_span.contains(addr), "Out of bounds _finger?");
6870 _finger = addr;
6871 _threshold = (HeapWord*)round_to(
6872 (intptr_t)_finger, CardTableModRefBS::card_size);
6873 }
6875 // Should revisit to see if this should be restructured for
6876 // greater efficiency.
6877 void MarkFromRootsClosure::do_bit(size_t offset) {
6878 if (_skipBits > 0) {
6879 _skipBits--;
6880 return;
6881 }
6882 // convert offset into a HeapWord*
6883 HeapWord* addr = _bitMap->startWord() + offset;
6884 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6885 "address out of range");
6886 assert(_bitMap->isMarked(addr), "tautology");
6887 if (_bitMap->isMarked(addr+1)) {
6888 // this is an allocated but not yet initialized object
6889 assert(_skipBits == 0, "tautology");
6890 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6891 oop p = oop(addr);
6892 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6893 DEBUG_ONLY(if (!_verifying) {)
6894 // We re-dirty the cards on which this object lies and increase
6895 // the _threshold so that we'll come back to scan this object
6896 // during the preclean or remark phase. (CMSCleanOnEnter)
6897 if (CMSCleanOnEnter) {
6898 size_t sz = _collector->block_size_using_printezis_bits(addr);
6899 HeapWord* end_card_addr = (HeapWord*)round_to(
6900 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6901 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6902 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6903 // Bump _threshold to end_card_addr; note that
6904 // _threshold cannot possibly exceed end_card_addr, anyhow.
6905 // This prevents future clearing of the card as the scan proceeds
6906 // to the right.
6907 assert(_threshold <= end_card_addr,
6908 "Because we are just scanning into this object");
6909 if (_threshold < end_card_addr) {
6910 _threshold = end_card_addr;
6911 }
6912 if (p->klass_or_null() != NULL) {
6913 // Redirty the range of cards...
6914 _mut->mark_range(redirty_range);
6915 } // ...else the setting of klass will dirty the card anyway.
6916 }
6917 DEBUG_ONLY(})
6918 return;
6919 }
6920 }
6921 scanOopsInOop(addr);
6922 }
6924 // We take a break if we've been at this for a while,
6925 // so as to avoid monopolizing the locks involved.
6926 void MarkFromRootsClosure::do_yield_work() {
6927 // First give up the locks, then yield, then re-lock
6928 // We should probably use a constructor/destructor idiom to
6929 // do this unlock/lock or modify the MutexUnlocker class to
6930 // serve our purpose. XXX
6931 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6932 "CMS thread should hold CMS token");
6933 assert_lock_strong(_bitMap->lock());
6934 _bitMap->lock()->unlock();
6935 ConcurrentMarkSweepThread::desynchronize(true);
6936 ConcurrentMarkSweepThread::acknowledge_yield_request();
6937 _collector->stopTimer();
6938 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6939 if (PrintCMSStatistics != 0) {
6940 _collector->incrementYields();
6941 }
6942 _collector->icms_wait();
6944 // See the comment in coordinator_yield()
6945 for (unsigned i = 0; i < CMSYieldSleepCount &&
6946 ConcurrentMarkSweepThread::should_yield() &&
6947 !CMSCollector::foregroundGCIsActive(); ++i) {
6948 os::sleep(Thread::current(), 1, false);
6949 ConcurrentMarkSweepThread::acknowledge_yield_request();
6950 }
6952 ConcurrentMarkSweepThread::synchronize(true);
6953 _bitMap->lock()->lock_without_safepoint_check();
6954 _collector->startTimer();
6955 }
6957 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6958 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6959 assert(_markStack->isEmpty(),
6960 "should drain stack to limit stack usage");
6961 // convert ptr to an oop preparatory to scanning
6962 oop obj = oop(ptr);
6963 // Ignore mark word in verification below, since we
6964 // may be running concurrent with mutators.
6965 assert(obj->is_oop(true), "should be an oop");
6966 assert(_finger <= ptr, "_finger runneth ahead");
6967 // advance the finger to right end of this object
6968 _finger = ptr + obj->size();
6969 assert(_finger > ptr, "we just incremented it above");
6970 // On large heaps, it may take us some time to get through
6971 // the marking phase (especially if running iCMS). During
6972 // this time it's possible that a lot of mutations have
6973 // accumulated in the card table and the mod union table --
6974 // these mutation records are redundant until we have
6975 // actually traced into the corresponding card.
6976 // Here, we check whether advancing the finger would make
6977 // us cross into a new card, and if so clear corresponding
6978 // cards in the MUT (preclean them in the card-table in the
6979 // future).
6981 DEBUG_ONLY(if (!_verifying) {)
6982 // The clean-on-enter optimization is disabled by default,
6983 // until we fix 6178663.
6984 if (CMSCleanOnEnter && (_finger > _threshold)) {
6985 // [_threshold, _finger) represents the interval
6986 // of cards to be cleared in MUT (or precleaned in card table).
6987 // The set of cards to be cleared is all those that overlap
6988 // with the interval [_threshold, _finger); note that
6989 // _threshold is always kept card-aligned but _finger isn't
6990 // always card-aligned.
6991 HeapWord* old_threshold = _threshold;
6992 assert(old_threshold == (HeapWord*)round_to(
6993 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6994 "_threshold should always be card-aligned");
6995 _threshold = (HeapWord*)round_to(
6996 (intptr_t)_finger, CardTableModRefBS::card_size);
6997 MemRegion mr(old_threshold, _threshold);
6998 assert(!mr.is_empty(), "Control point invariant");
6999 assert(_span.contains(mr), "Should clear within span");
7000 // XXX When _finger crosses from old gen into perm gen
7001 // we may be doing unnecessary cleaning; do better in the
7002 // future by detecting that condition and clearing fewer
7003 // MUT/CT entries.
7004 _mut->clear_range(mr);
7005 }
7006 DEBUG_ONLY(})
7008 // Note: the finger doesn't advance while we drain
7009 // the stack below.
7010 PushOrMarkClosure pushOrMarkClosure(_collector,
7011 _span, _bitMap, _markStack,
7012 _revisitStack,
7013 _finger, this);
7014 bool res = _markStack->push(obj);
7015 assert(res, "Empty non-zero size stack should have space for single push");
7016 while (!_markStack->isEmpty()) {
7017 oop new_oop = _markStack->pop();
7018 // Skip verifying header mark word below because we are
7019 // running concurrent with mutators.
7020 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7021 // now scan this oop's oops
7022 new_oop->oop_iterate(&pushOrMarkClosure);
7023 do_yield_check();
7024 }
7025 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7026 }
7028 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7029 CMSCollector* collector, MemRegion span,
7030 CMSBitMap* bit_map,
7031 OopTaskQueue* work_queue,
7032 CMSMarkStack* overflow_stack,
7033 CMSMarkStack* revisit_stack,
7034 bool should_yield):
7035 _collector(collector),
7036 _whole_span(collector->_span),
7037 _span(span),
7038 _bit_map(bit_map),
7039 _mut(&collector->_modUnionTable),
7040 _work_queue(work_queue),
7041 _overflow_stack(overflow_stack),
7042 _revisit_stack(revisit_stack),
7043 _yield(should_yield),
7044 _skip_bits(0),
7045 _task(task)
7046 {
7047 assert(_work_queue->size() == 0, "work_queue should be empty");
7048 _finger = span.start();
7049 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7050 assert(_span.contains(_finger), "Out of bounds _finger?");
7051 }
7053 // Should revisit to see if this should be restructured for
7054 // greater efficiency.
7055 void Par_MarkFromRootsClosure::do_bit(size_t offset) {
7056 if (_skip_bits > 0) {
7057 _skip_bits--;
7058 return;
7059 }
7060 // convert offset into a HeapWord*
7061 HeapWord* addr = _bit_map->startWord() + offset;
7062 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7063 "address out of range");
7064 assert(_bit_map->isMarked(addr), "tautology");
7065 if (_bit_map->isMarked(addr+1)) {
7066 // this is an allocated object that might not yet be initialized
7067 assert(_skip_bits == 0, "tautology");
7068 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7069 oop p = oop(addr);
7070 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7071 // in the case of Clean-on-Enter optimization, redirty card
7072 // and avoid clearing card by increasing the threshold.
7073 return;
7074 }
7075 }
7076 scan_oops_in_oop(addr);
7077 }
7079 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7080 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7081 // Should we assert that our work queue is empty or
7082 // below some drain limit?
7083 assert(_work_queue->size() == 0,
7084 "should drain stack to limit stack usage");
7085 // convert ptr to an oop preparatory to scanning
7086 oop obj = oop(ptr);
7087 // Ignore mark word in verification below, since we
7088 // may be running concurrent with mutators.
7089 assert(obj->is_oop(true), "should be an oop");
7090 assert(_finger <= ptr, "_finger runneth ahead");
7091 // advance the finger to right end of this object
7092 _finger = ptr + obj->size();
7093 assert(_finger > ptr, "we just incremented it above");
7094 // On large heaps, it may take us some time to get through
7095 // the marking phase (especially if running iCMS). During
7096 // this time it's possible that a lot of mutations have
7097 // accumulated in the card table and the mod union table --
7098 // these mutation records are redundant until we have
7099 // actually traced into the corresponding card.
7100 // Here, we check whether advancing the finger would make
7101 // us cross into a new card, and if so clear corresponding
7102 // cards in the MUT (preclean them in the card-table in the
7103 // future).
7105 // The clean-on-enter optimization is disabled by default,
7106 // until we fix 6178663.
7107 if (CMSCleanOnEnter && (_finger > _threshold)) {
7108 // [_threshold, _finger) represents the interval
7109 // of cards to be cleared in MUT (or precleaned in card table).
7110 // The set of cards to be cleared is all those that overlap
7111 // with the interval [_threshold, _finger); note that
7112 // _threshold is always kept card-aligned but _finger isn't
7113 // always card-aligned.
7114 HeapWord* old_threshold = _threshold;
7115 assert(old_threshold == (HeapWord*)round_to(
7116 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7117 "_threshold should always be card-aligned");
7118 _threshold = (HeapWord*)round_to(
7119 (intptr_t)_finger, CardTableModRefBS::card_size);
7120 MemRegion mr(old_threshold, _threshold);
7121 assert(!mr.is_empty(), "Control point invariant");
7122 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7123 // XXX When _finger crosses from old gen into perm gen
7124 // we may be doing unnecessary cleaning; do better in the
7125 // future by detecting that condition and clearing fewer
7126 // MUT/CT entries.
7127 _mut->clear_range(mr);
7128 }
7130 // Note: the local finger doesn't advance while we drain
7131 // the stack below, but the global finger sure can and will.
7132 HeapWord** gfa = _task->global_finger_addr();
7133 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7134 _span, _bit_map,
7135 _work_queue,
7136 _overflow_stack,
7137 _revisit_stack,
7138 _finger,
7139 gfa, this);
7140 bool res = _work_queue->push(obj); // overflow could occur here
7141 assert(res, "Will hold once we use workqueues");
7142 while (true) {
7143 oop new_oop;
7144 if (!_work_queue->pop_local(new_oop)) {
7145 // We emptied our work_queue; check if there's stuff that can
7146 // be gotten from the overflow stack.
7147 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7148 _overflow_stack, _work_queue)) {
7149 do_yield_check();
7150 continue;
7151 } else { // done
7152 break;
7153 }
7154 }
7155 // Skip verifying header mark word below because we are
7156 // running concurrent with mutators.
7157 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7158 // now scan this oop's oops
7159 new_oop->oop_iterate(&pushOrMarkClosure);
7160 do_yield_check();
7161 }
7162 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7163 }
7165 // Yield in response to a request from VM Thread or
7166 // from mutators.
7167 void Par_MarkFromRootsClosure::do_yield_work() {
7168 assert(_task != NULL, "sanity");
7169 _task->yield();
7170 }
7172 // A variant of the above used for verifying CMS marking work.
7173 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7174 MemRegion span,
7175 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7176 CMSMarkStack* mark_stack):
7177 _collector(collector),
7178 _span(span),
7179 _verification_bm(verification_bm),
7180 _cms_bm(cms_bm),
7181 _mark_stack(mark_stack),
7182 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7183 mark_stack)
7184 {
7185 assert(_mark_stack->isEmpty(), "stack should be empty");
7186 _finger = _verification_bm->startWord();
7187 assert(_collector->_restart_addr == NULL, "Sanity check");
7188 assert(_span.contains(_finger), "Out of bounds _finger?");
7189 }
7191 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7192 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7193 assert(_span.contains(addr), "Out of bounds _finger?");
7194 _finger = addr;
7195 }
7197 // Should revisit to see if this should be restructured for
7198 // greater efficiency.
7199 void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7200 // convert offset into a HeapWord*
7201 HeapWord* addr = _verification_bm->startWord() + offset;
7202 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7203 "address out of range");
7204 assert(_verification_bm->isMarked(addr), "tautology");
7205 assert(_cms_bm->isMarked(addr), "tautology");
7207 assert(_mark_stack->isEmpty(),
7208 "should drain stack to limit stack usage");
7209 // convert addr to an oop preparatory to scanning
7210 oop obj = oop(addr);
7211 assert(obj->is_oop(), "should be an oop");
7212 assert(_finger <= addr, "_finger runneth ahead");
7213 // advance the finger to right end of this object
7214 _finger = addr + obj->size();
7215 assert(_finger > addr, "we just incremented it above");
7216 // Note: the finger doesn't advance while we drain
7217 // the stack below.
7218 bool res = _mark_stack->push(obj);
7219 assert(res, "Empty non-zero size stack should have space for single push");
7220 while (!_mark_stack->isEmpty()) {
7221 oop new_oop = _mark_stack->pop();
7222 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7223 // now scan this oop's oops
7224 new_oop->oop_iterate(&_pam_verify_closure);
7225 }
7226 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7227 }
7229 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7230 CMSCollector* collector, MemRegion span,
7231 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7232 CMSMarkStack* mark_stack):
7233 OopClosure(collector->ref_processor()),
7234 _collector(collector),
7235 _span(span),
7236 _verification_bm(verification_bm),
7237 _cms_bm(cms_bm),
7238 _mark_stack(mark_stack)
7239 { }
7241 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7242 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7244 // Upon stack overflow, we discard (part of) the stack,
7245 // remembering the least address amongst those discarded
7246 // in CMSCollector's _restart_address.
7247 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7248 // Remember the least grey address discarded
7249 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7250 _collector->lower_restart_addr(ra);
7251 _mark_stack->reset(); // discard stack contents
7252 _mark_stack->expand(); // expand the stack if possible
7253 }
7255 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7256 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7257 HeapWord* addr = (HeapWord*)obj;
7258 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7259 // Oop lies in _span and isn't yet grey or black
7260 _verification_bm->mark(addr); // now grey
7261 if (!_cms_bm->isMarked(addr)) {
7262 oop(addr)->print();
7263 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7264 addr);
7265 fatal("... aborting");
7266 }
7268 if (!_mark_stack->push(obj)) { // stack overflow
7269 if (PrintCMSStatistics != 0) {
7270 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7271 SIZE_FORMAT, _mark_stack->capacity());
7272 }
7273 assert(_mark_stack->isFull(), "Else push should have succeeded");
7274 handle_stack_overflow(addr);
7275 }
7276 // anything including and to the right of _finger
7277 // will be scanned as we iterate over the remainder of the
7278 // bit map
7279 }
7280 }
7282 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7283 MemRegion span,
7284 CMSBitMap* bitMap, CMSMarkStack* markStack,
7285 CMSMarkStack* revisitStack,
7286 HeapWord* finger, MarkFromRootsClosure* parent) :
7287 OopClosure(collector->ref_processor()),
7288 _collector(collector),
7289 _span(span),
7290 _bitMap(bitMap),
7291 _markStack(markStack),
7292 _revisitStack(revisitStack),
7293 _finger(finger),
7294 _parent(parent),
7295 _should_remember_klasses(collector->should_unload_classes())
7296 { }
7298 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7299 MemRegion span,
7300 CMSBitMap* bit_map,
7301 OopTaskQueue* work_queue,
7302 CMSMarkStack* overflow_stack,
7303 CMSMarkStack* revisit_stack,
7304 HeapWord* finger,
7305 HeapWord** global_finger_addr,
7306 Par_MarkFromRootsClosure* parent) :
7307 OopClosure(collector->ref_processor()),
7308 _collector(collector),
7309 _whole_span(collector->_span),
7310 _span(span),
7311 _bit_map(bit_map),
7312 _work_queue(work_queue),
7313 _overflow_stack(overflow_stack),
7314 _revisit_stack(revisit_stack),
7315 _finger(finger),
7316 _global_finger_addr(global_finger_addr),
7317 _parent(parent),
7318 _should_remember_klasses(collector->should_unload_classes())
7319 { }
7321 // Assumes thread-safe access by callers, who are
7322 // responsible for mutual exclusion.
7323 void CMSCollector::lower_restart_addr(HeapWord* low) {
7324 assert(_span.contains(low), "Out of bounds addr");
7325 if (_restart_addr == NULL) {
7326 _restart_addr = low;
7327 } else {
7328 _restart_addr = MIN2(_restart_addr, low);
7329 }
7330 }
7332 // Upon stack overflow, we discard (part of) the stack,
7333 // remembering the least address amongst those discarded
7334 // in CMSCollector's _restart_address.
7335 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7336 // Remember the least grey address discarded
7337 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7338 _collector->lower_restart_addr(ra);
7339 _markStack->reset(); // discard stack contents
7340 _markStack->expand(); // expand the stack if possible
7341 }
7343 // Upon stack overflow, we discard (part of) the stack,
7344 // remembering the least address amongst those discarded
7345 // in CMSCollector's _restart_address.
7346 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7347 // We need to do this under a mutex to prevent other
7348 // workers from interfering with the work done below.
7349 MutexLockerEx ml(_overflow_stack->par_lock(),
7350 Mutex::_no_safepoint_check_flag);
7351 // Remember the least grey address discarded
7352 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7353 _collector->lower_restart_addr(ra);
7354 _overflow_stack->reset(); // discard stack contents
7355 _overflow_stack->expand(); // expand the stack if possible
7356 }
7358 void PushOrMarkClosure::do_oop(oop obj) {
7359 // Ignore mark word because we are running concurrent with mutators.
7360 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7361 HeapWord* addr = (HeapWord*)obj;
7362 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7363 // Oop lies in _span and isn't yet grey or black
7364 _bitMap->mark(addr); // now grey
7365 if (addr < _finger) {
7366 // the bit map iteration has already either passed, or
7367 // sampled, this bit in the bit map; we'll need to
7368 // use the marking stack to scan this oop's oops.
7369 bool simulate_overflow = false;
7370 NOT_PRODUCT(
7371 if (CMSMarkStackOverflowALot &&
7372 _collector->simulate_overflow()) {
7373 // simulate a stack overflow
7374 simulate_overflow = true;
7375 }
7376 )
7377 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7378 if (PrintCMSStatistics != 0) {
7379 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7380 SIZE_FORMAT, _markStack->capacity());
7381 }
7382 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7383 handle_stack_overflow(addr);
7384 }
7385 }
7386 // anything including and to the right of _finger
7387 // will be scanned as we iterate over the remainder of the
7388 // bit map
7389 do_yield_check();
7390 }
7391 }
7393 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7394 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7396 void Par_PushOrMarkClosure::do_oop(oop obj) {
7397 // Ignore mark word because we are running concurrent with mutators.
7398 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7399 HeapWord* addr = (HeapWord*)obj;
7400 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7401 // Oop lies in _span and isn't yet grey or black
7402 // We read the global_finger (volatile read) strictly after marking oop
7403 bool res = _bit_map->par_mark(addr); // now grey
7404 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7405 // Should we push this marked oop on our stack?
7406 // -- if someone else marked it, nothing to do
7407 // -- if target oop is above global finger nothing to do
7408 // -- if target oop is in chunk and above local finger
7409 // then nothing to do
7410 // -- else push on work queue
7411 if ( !res // someone else marked it, they will deal with it
7412 || (addr >= *gfa) // will be scanned in a later task
7413 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7414 return;
7415 }
7416 // the bit map iteration has already either passed, or
7417 // sampled, this bit in the bit map; we'll need to
7418 // use the marking stack to scan this oop's oops.
7419 bool simulate_overflow = false;
7420 NOT_PRODUCT(
7421 if (CMSMarkStackOverflowALot &&
7422 _collector->simulate_overflow()) {
7423 // simulate a stack overflow
7424 simulate_overflow = true;
7425 }
7426 )
7427 if (simulate_overflow ||
7428 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7429 // stack overflow
7430 if (PrintCMSStatistics != 0) {
7431 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7432 SIZE_FORMAT, _overflow_stack->capacity());
7433 }
7434 // We cannot assert that the overflow stack is full because
7435 // it may have been emptied since.
7436 assert(simulate_overflow ||
7437 _work_queue->size() == _work_queue->max_elems(),
7438 "Else push should have succeeded");
7439 handle_stack_overflow(addr);
7440 }
7441 do_yield_check();
7442 }
7443 }
7445 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7446 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7448 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7449 MemRegion span,
7450 ReferenceProcessor* rp,
7451 CMSBitMap* bit_map,
7452 CMSBitMap* mod_union_table,
7453 CMSMarkStack* mark_stack,
7454 CMSMarkStack* revisit_stack,
7455 bool concurrent_precleaning):
7456 OopClosure(rp),
7457 _collector(collector),
7458 _span(span),
7459 _bit_map(bit_map),
7460 _mod_union_table(mod_union_table),
7461 _mark_stack(mark_stack),
7462 _revisit_stack(revisit_stack),
7463 _concurrent_precleaning(concurrent_precleaning),
7464 _should_remember_klasses(collector->should_unload_classes())
7465 {
7466 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7467 }
7469 // Grey object rescan during pre-cleaning and second checkpoint phases --
7470 // the non-parallel version (the parallel version appears further below.)
7471 void PushAndMarkClosure::do_oop(oop obj) {
7472 // If _concurrent_precleaning, ignore mark word verification
7473 assert(obj->is_oop_or_null(_concurrent_precleaning),
7474 "expected an oop or NULL");
7475 HeapWord* addr = (HeapWord*)obj;
7476 // Check if oop points into the CMS generation
7477 // and is not marked
7478 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7479 // a white object ...
7480 _bit_map->mark(addr); // ... now grey
7481 // push on the marking stack (grey set)
7482 bool simulate_overflow = false;
7483 NOT_PRODUCT(
7484 if (CMSMarkStackOverflowALot &&
7485 _collector->simulate_overflow()) {
7486 // simulate a stack overflow
7487 simulate_overflow = true;
7488 }
7489 )
7490 if (simulate_overflow || !_mark_stack->push(obj)) {
7491 if (_concurrent_precleaning) {
7492 // During precleaning we can just dirty the appropriate card(s)
7493 // in the mod union table, thus ensuring that the object remains
7494 // in the grey set and continue. In the case of object arrays
7495 // we need to dirty all of the cards that the object spans,
7496 // since the rescan of object arrays will be limited to the
7497 // dirty cards.
7498 // Note that no one can be intefering with us in this action
7499 // of dirtying the mod union table, so no locking or atomics
7500 // are required.
7501 if (obj->is_objArray()) {
7502 size_t sz = obj->size();
7503 HeapWord* end_card_addr = (HeapWord*)round_to(
7504 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7505 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7506 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7507 _mod_union_table->mark_range(redirty_range);
7508 } else {
7509 _mod_union_table->mark(addr);
7510 }
7511 _collector->_ser_pmc_preclean_ovflw++;
7512 } else {
7513 // During the remark phase, we need to remember this oop
7514 // in the overflow list.
7515 _collector->push_on_overflow_list(obj);
7516 _collector->_ser_pmc_remark_ovflw++;
7517 }
7518 }
7519 }
7520 }
7522 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7523 MemRegion span,
7524 ReferenceProcessor* rp,
7525 CMSBitMap* bit_map,
7526 OopTaskQueue* work_queue,
7527 CMSMarkStack* revisit_stack):
7528 OopClosure(rp),
7529 _collector(collector),
7530 _span(span),
7531 _bit_map(bit_map),
7532 _work_queue(work_queue),
7533 _revisit_stack(revisit_stack),
7534 _should_remember_klasses(collector->should_unload_classes())
7535 {
7536 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7537 }
7539 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7540 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7542 // Grey object rescan during second checkpoint phase --
7543 // the parallel version.
7544 void Par_PushAndMarkClosure::do_oop(oop obj) {
7545 // In the assert below, we ignore the mark word because
7546 // this oop may point to an already visited object that is
7547 // on the overflow stack (in which case the mark word has
7548 // been hijacked for chaining into the overflow stack --
7549 // if this is the last object in the overflow stack then
7550 // its mark word will be NULL). Because this object may
7551 // have been subsequently popped off the global overflow
7552 // stack, and the mark word possibly restored to the prototypical
7553 // value, by the time we get to examined this failing assert in
7554 // the debugger, is_oop_or_null(false) may subsequently start
7555 // to hold.
7556 assert(obj->is_oop_or_null(true),
7557 "expected an oop or NULL");
7558 HeapWord* addr = (HeapWord*)obj;
7559 // Check if oop points into the CMS generation
7560 // and is not marked
7561 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7562 // a white object ...
7563 // If we manage to "claim" the object, by being the
7564 // first thread to mark it, then we push it on our
7565 // marking stack
7566 if (_bit_map->par_mark(addr)) { // ... now grey
7567 // push on work queue (grey set)
7568 bool simulate_overflow = false;
7569 NOT_PRODUCT(
7570 if (CMSMarkStackOverflowALot &&
7571 _collector->par_simulate_overflow()) {
7572 // simulate a stack overflow
7573 simulate_overflow = true;
7574 }
7575 )
7576 if (simulate_overflow || !_work_queue->push(obj)) {
7577 _collector->par_push_on_overflow_list(obj);
7578 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7579 }
7580 } // Else, some other thread got there first
7581 }
7582 }
7584 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7585 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7587 void PushAndMarkClosure::remember_klass(Klass* k) {
7588 if (!_revisit_stack->push(oop(k))) {
7589 fatal("Revisit stack overflowed in PushAndMarkClosure");
7590 }
7591 }
7593 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7594 if (!_revisit_stack->par_push(oop(k))) {
7595 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7596 }
7597 }
7599 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7600 Mutex* bml = _collector->bitMapLock();
7601 assert_lock_strong(bml);
7602 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7603 "CMS thread should hold CMS token");
7605 bml->unlock();
7606 ConcurrentMarkSweepThread::desynchronize(true);
7608 ConcurrentMarkSweepThread::acknowledge_yield_request();
7610 _collector->stopTimer();
7611 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7612 if (PrintCMSStatistics != 0) {
7613 _collector->incrementYields();
7614 }
7615 _collector->icms_wait();
7617 // See the comment in coordinator_yield()
7618 for (unsigned i = 0; i < CMSYieldSleepCount &&
7619 ConcurrentMarkSweepThread::should_yield() &&
7620 !CMSCollector::foregroundGCIsActive(); ++i) {
7621 os::sleep(Thread::current(), 1, false);
7622 ConcurrentMarkSweepThread::acknowledge_yield_request();
7623 }
7625 ConcurrentMarkSweepThread::synchronize(true);
7626 bml->lock();
7628 _collector->startTimer();
7629 }
7631 bool CMSPrecleanRefsYieldClosure::should_return() {
7632 if (ConcurrentMarkSweepThread::should_yield()) {
7633 do_yield_work();
7634 }
7635 return _collector->foregroundGCIsActive();
7636 }
7638 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7639 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7640 "mr should be aligned to start at a card boundary");
7641 // We'd like to assert:
7642 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7643 // "mr should be a range of cards");
7644 // However, that would be too strong in one case -- the last
7645 // partition ends at _unallocated_block which, in general, can be
7646 // an arbitrary boundary, not necessarily card aligned.
7647 if (PrintCMSStatistics != 0) {
7648 _num_dirty_cards +=
7649 mr.word_size()/CardTableModRefBS::card_size_in_words;
7650 }
7651 _space->object_iterate_mem(mr, &_scan_cl);
7652 }
7654 SweepClosure::SweepClosure(CMSCollector* collector,
7655 ConcurrentMarkSweepGeneration* g,
7656 CMSBitMap* bitMap, bool should_yield) :
7657 _collector(collector),
7658 _g(g),
7659 _sp(g->cmsSpace()),
7660 _limit(_sp->sweep_limit()),
7661 _freelistLock(_sp->freelistLock()),
7662 _bitMap(bitMap),
7663 _yield(should_yield),
7664 _inFreeRange(false), // No free range at beginning of sweep
7665 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7666 _lastFreeRangeCoalesced(false),
7667 _freeFinger(g->used_region().start())
7668 {
7669 NOT_PRODUCT(
7670 _numObjectsFreed = 0;
7671 _numWordsFreed = 0;
7672 _numObjectsLive = 0;
7673 _numWordsLive = 0;
7674 _numObjectsAlreadyFree = 0;
7675 _numWordsAlreadyFree = 0;
7676 _last_fc = NULL;
7678 _sp->initializeIndexedFreeListArrayReturnedBytes();
7679 _sp->dictionary()->initializeDictReturnedBytes();
7680 )
7681 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7682 "sweep _limit out of bounds");
7683 if (CMSTraceSweeper) {
7684 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7685 }
7686 }
7688 // We need this destructor to reclaim any space at the end
7689 // of the space, which do_blk below may not have added back to
7690 // the free lists. [basically dealing with the "fringe effect"]
7691 SweepClosure::~SweepClosure() {
7692 assert_lock_strong(_freelistLock);
7693 // this should be treated as the end of a free run if any
7694 // The current free range should be returned to the free lists
7695 // as one coalesced chunk.
7696 if (inFreeRange()) {
7697 flushCurFreeChunk(freeFinger(),
7698 pointer_delta(_limit, freeFinger()));
7699 assert(freeFinger() < _limit, "the finger pointeth off base");
7700 if (CMSTraceSweeper) {
7701 gclog_or_tty->print("destructor:");
7702 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7703 "[coalesced:"SIZE_FORMAT"]\n",
7704 freeFinger(), pointer_delta(_limit, freeFinger()),
7705 lastFreeRangeCoalesced());
7706 }
7707 }
7708 NOT_PRODUCT(
7709 if (Verbose && PrintGC) {
7710 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7711 SIZE_FORMAT " bytes",
7712 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7713 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7714 SIZE_FORMAT" bytes "
7715 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7716 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7717 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7718 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7719 sizeof(HeapWord);
7720 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7722 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7723 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7724 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7725 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7726 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7727 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7728 indexListReturnedBytes);
7729 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7730 dictReturnedBytes);
7731 }
7732 }
7733 )
7734 // Now, in debug mode, just null out the sweep_limit
7735 NOT_PRODUCT(_sp->clear_sweep_limit();)
7736 if (CMSTraceSweeper) {
7737 gclog_or_tty->print("end of sweep\n================\n");
7738 }
7739 }
7741 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7742 bool freeRangeInFreeLists) {
7743 if (CMSTraceSweeper) {
7744 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7745 freeFinger, _sp->block_size(freeFinger),
7746 freeRangeInFreeLists);
7747 }
7748 assert(!inFreeRange(), "Trampling existing free range");
7749 set_inFreeRange(true);
7750 set_lastFreeRangeCoalesced(false);
7752 set_freeFinger(freeFinger);
7753 set_freeRangeInFreeLists(freeRangeInFreeLists);
7754 if (CMSTestInFreeList) {
7755 if (freeRangeInFreeLists) {
7756 FreeChunk* fc = (FreeChunk*) freeFinger;
7757 assert(fc->isFree(), "A chunk on the free list should be free.");
7758 assert(fc->size() > 0, "Free range should have a size");
7759 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7760 }
7761 }
7762 }
7764 // Note that the sweeper runs concurrently with mutators. Thus,
7765 // it is possible for direct allocation in this generation to happen
7766 // in the middle of the sweep. Note that the sweeper also coalesces
7767 // contiguous free blocks. Thus, unless the sweeper and the allocator
7768 // synchronize appropriately freshly allocated blocks may get swept up.
7769 // This is accomplished by the sweeper locking the free lists while
7770 // it is sweeping. Thus blocks that are determined to be free are
7771 // indeed free. There is however one additional complication:
7772 // blocks that have been allocated since the final checkpoint and
7773 // mark, will not have been marked and so would be treated as
7774 // unreachable and swept up. To prevent this, the allocator marks
7775 // the bit map when allocating during the sweep phase. This leads,
7776 // however, to a further complication -- objects may have been allocated
7777 // but not yet initialized -- in the sense that the header isn't yet
7778 // installed. The sweeper can not then determine the size of the block
7779 // in order to skip over it. To deal with this case, we use a technique
7780 // (due to Printezis) to encode such uninitialized block sizes in the
7781 // bit map. Since the bit map uses a bit per every HeapWord, but the
7782 // CMS generation has a minimum object size of 3 HeapWords, it follows
7783 // that "normal marks" won't be adjacent in the bit map (there will
7784 // always be at least two 0 bits between successive 1 bits). We make use
7785 // of these "unused" bits to represent uninitialized blocks -- the bit
7786 // corresponding to the start of the uninitialized object and the next
7787 // bit are both set. Finally, a 1 bit marks the end of the object that
7788 // started with the two consecutive 1 bits to indicate its potentially
7789 // uninitialized state.
7791 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7792 FreeChunk* fc = (FreeChunk*)addr;
7793 size_t res;
7795 // check if we are done sweepinrg
7796 if (addr == _limit) { // we have swept up to the limit, do nothing more
7797 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7798 "sweep _limit out of bounds");
7799 // help the closure application finish
7800 return pointer_delta(_sp->end(), _limit);
7801 }
7802 assert(addr <= _limit, "sweep invariant");
7804 // check if we should yield
7805 do_yield_check(addr);
7806 if (fc->isFree()) {
7807 // Chunk that is already free
7808 res = fc->size();
7809 doAlreadyFreeChunk(fc);
7810 debug_only(_sp->verifyFreeLists());
7811 assert(res == fc->size(), "Don't expect the size to change");
7812 NOT_PRODUCT(
7813 _numObjectsAlreadyFree++;
7814 _numWordsAlreadyFree += res;
7815 )
7816 NOT_PRODUCT(_last_fc = fc;)
7817 } else if (!_bitMap->isMarked(addr)) {
7818 // Chunk is fresh garbage
7819 res = doGarbageChunk(fc);
7820 debug_only(_sp->verifyFreeLists());
7821 NOT_PRODUCT(
7822 _numObjectsFreed++;
7823 _numWordsFreed += res;
7824 )
7825 } else {
7826 // Chunk that is alive.
7827 res = doLiveChunk(fc);
7828 debug_only(_sp->verifyFreeLists());
7829 NOT_PRODUCT(
7830 _numObjectsLive++;
7831 _numWordsLive += res;
7832 )
7833 }
7834 return res;
7835 }
7837 // For the smart allocation, record following
7838 // split deaths - a free chunk is removed from its free list because
7839 // it is being split into two or more chunks.
7840 // split birth - a free chunk is being added to its free list because
7841 // a larger free chunk has been split and resulted in this free chunk.
7842 // coal death - a free chunk is being removed from its free list because
7843 // it is being coalesced into a large free chunk.
7844 // coal birth - a free chunk is being added to its free list because
7845 // it was created when two or more free chunks where coalesced into
7846 // this free chunk.
7847 //
7848 // These statistics are used to determine the desired number of free
7849 // chunks of a given size. The desired number is chosen to be relative
7850 // to the end of a CMS sweep. The desired number at the end of a sweep
7851 // is the
7852 // count-at-end-of-previous-sweep (an amount that was enough)
7853 // - count-at-beginning-of-current-sweep (the excess)
7854 // + split-births (gains in this size during interval)
7855 // - split-deaths (demands on this size during interval)
7856 // where the interval is from the end of one sweep to the end of the
7857 // next.
7858 //
7859 // When sweeping the sweeper maintains an accumulated chunk which is
7860 // the chunk that is made up of chunks that have been coalesced. That
7861 // will be termed the left-hand chunk. A new chunk of garbage that
7862 // is being considered for coalescing will be referred to as the
7863 // right-hand chunk.
7864 //
7865 // When making a decision on whether to coalesce a right-hand chunk with
7866 // the current left-hand chunk, the current count vs. the desired count
7867 // of the left-hand chunk is considered. Also if the right-hand chunk
7868 // is near the large chunk at the end of the heap (see
7869 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7870 // left-hand chunk is coalesced.
7871 //
7872 // When making a decision about whether to split a chunk, the desired count
7873 // vs. the current count of the candidate to be split is also considered.
7874 // If the candidate is underpopulated (currently fewer chunks than desired)
7875 // a chunk of an overpopulated (currently more chunks than desired) size may
7876 // be chosen. The "hint" associated with a free list, if non-null, points
7877 // to a free list which may be overpopulated.
7878 //
7880 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7881 size_t size = fc->size();
7882 // Chunks that cannot be coalesced are not in the
7883 // free lists.
7884 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7885 assert(_sp->verifyChunkInFreeLists(fc),
7886 "free chunk should be in free lists");
7887 }
7888 // a chunk that is already free, should not have been
7889 // marked in the bit map
7890 HeapWord* addr = (HeapWord*) fc;
7891 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7892 // Verify that the bit map has no bits marked between
7893 // addr and purported end of this block.
7894 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7896 // Some chunks cannot be coalesced in under any circumstances.
7897 // See the definition of cantCoalesce().
7898 if (!fc->cantCoalesce()) {
7899 // This chunk can potentially be coalesced.
7900 if (_sp->adaptive_freelists()) {
7901 // All the work is done in
7902 doPostIsFreeOrGarbageChunk(fc, size);
7903 } else { // Not adaptive free lists
7904 // this is a free chunk that can potentially be coalesced by the sweeper;
7905 if (!inFreeRange()) {
7906 // if the next chunk is a free block that can't be coalesced
7907 // it doesn't make sense to remove this chunk from the free lists
7908 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7909 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7910 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7911 nextChunk->isFree() && // which is free...
7912 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7913 // nothing to do
7914 } else {
7915 // Potentially the start of a new free range:
7916 // Don't eagerly remove it from the free lists.
7917 // No need to remove it if it will just be put
7918 // back again. (Also from a pragmatic point of view
7919 // if it is a free block in a region that is beyond
7920 // any allocated blocks, an assertion will fail)
7921 // Remember the start of a free run.
7922 initialize_free_range(addr, true);
7923 // end - can coalesce with next chunk
7924 }
7925 } else {
7926 // the midst of a free range, we are coalescing
7927 debug_only(record_free_block_coalesced(fc);)
7928 if (CMSTraceSweeper) {
7929 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7930 }
7931 // remove it from the free lists
7932 _sp->removeFreeChunkFromFreeLists(fc);
7933 set_lastFreeRangeCoalesced(true);
7934 // If the chunk is being coalesced and the current free range is
7935 // in the free lists, remove the current free range so that it
7936 // will be returned to the free lists in its entirety - all
7937 // the coalesced pieces included.
7938 if (freeRangeInFreeLists()) {
7939 FreeChunk* ffc = (FreeChunk*) freeFinger();
7940 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7941 "Size of free range is inconsistent with chunk size.");
7942 if (CMSTestInFreeList) {
7943 assert(_sp->verifyChunkInFreeLists(ffc),
7944 "free range is not in free lists");
7945 }
7946 _sp->removeFreeChunkFromFreeLists(ffc);
7947 set_freeRangeInFreeLists(false);
7948 }
7949 }
7950 }
7951 } else {
7952 // Code path common to both original and adaptive free lists.
7954 // cant coalesce with previous block; this should be treated
7955 // as the end of a free run if any
7956 if (inFreeRange()) {
7957 // we kicked some butt; time to pick up the garbage
7958 assert(freeFinger() < addr, "the finger pointeth off base");
7959 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
7960 }
7961 // else, nothing to do, just continue
7962 }
7963 }
7965 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
7966 // This is a chunk of garbage. It is not in any free list.
7967 // Add it to a free list or let it possibly be coalesced into
7968 // a larger chunk.
7969 HeapWord* addr = (HeapWord*) fc;
7970 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7972 if (_sp->adaptive_freelists()) {
7973 // Verify that the bit map has no bits marked between
7974 // addr and purported end of just dead object.
7975 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7977 doPostIsFreeOrGarbageChunk(fc, size);
7978 } else {
7979 if (!inFreeRange()) {
7980 // start of a new free range
7981 assert(size > 0, "A free range should have a size");
7982 initialize_free_range(addr, false);
7984 } else {
7985 // this will be swept up when we hit the end of the
7986 // free range
7987 if (CMSTraceSweeper) {
7988 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
7989 }
7990 // If the chunk is being coalesced and the current free range is
7991 // in the free lists, remove the current free range so that it
7992 // will be returned to the free lists in its entirety - all
7993 // the coalesced pieces included.
7994 if (freeRangeInFreeLists()) {
7995 FreeChunk* ffc = (FreeChunk*)freeFinger();
7996 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7997 "Size of free range is inconsistent with chunk size.");
7998 if (CMSTestInFreeList) {
7999 assert(_sp->verifyChunkInFreeLists(ffc),
8000 "free range is not in free lists");
8001 }
8002 _sp->removeFreeChunkFromFreeLists(ffc);
8003 set_freeRangeInFreeLists(false);
8004 }
8005 set_lastFreeRangeCoalesced(true);
8006 }
8007 // this will be swept up when we hit the end of the free range
8009 // Verify that the bit map has no bits marked between
8010 // addr and purported end of just dead object.
8011 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8012 }
8013 return size;
8014 }
8016 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8017 HeapWord* addr = (HeapWord*) fc;
8018 // The sweeper has just found a live object. Return any accumulated
8019 // left hand chunk to the free lists.
8020 if (inFreeRange()) {
8021 if (_sp->adaptive_freelists()) {
8022 flushCurFreeChunk(freeFinger(),
8023 pointer_delta(addr, freeFinger()));
8024 } else { // not adaptive freelists
8025 set_inFreeRange(false);
8026 // Add the free range back to the free list if it is not already
8027 // there.
8028 if (!freeRangeInFreeLists()) {
8029 assert(freeFinger() < addr, "the finger pointeth off base");
8030 if (CMSTraceSweeper) {
8031 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8032 "[coalesced:%d]\n",
8033 freeFinger(), pointer_delta(addr, freeFinger()),
8034 lastFreeRangeCoalesced());
8035 }
8036 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8037 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8038 }
8039 }
8040 }
8042 // Common code path for original and adaptive free lists.
8044 // this object is live: we'd normally expect this to be
8045 // an oop, and like to assert the following:
8046 // assert(oop(addr)->is_oop(), "live block should be an oop");
8047 // However, as we commented above, this may be an object whose
8048 // header hasn't yet been initialized.
8049 size_t size;
8050 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8051 if (_bitMap->isMarked(addr + 1)) {
8052 // Determine the size from the bit map, rather than trying to
8053 // compute it from the object header.
8054 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8055 size = pointer_delta(nextOneAddr + 1, addr);
8056 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8057 "alignment problem");
8059 #ifdef DEBUG
8060 if (oop(addr)->klass_or_null() != NULL &&
8061 ( !_collector->should_unload_classes()
8062 || oop(addr)->is_parsable())) {
8063 // Ignore mark word because we are running concurrent with mutators
8064 assert(oop(addr)->is_oop(true), "live block should be an oop");
8065 assert(size ==
8066 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8067 "P-mark and computed size do not agree");
8068 }
8069 #endif
8071 } else {
8072 // This should be an initialized object that's alive.
8073 assert(oop(addr)->klass_or_null() != NULL &&
8074 (!_collector->should_unload_classes()
8075 || oop(addr)->is_parsable()),
8076 "Should be an initialized object");
8077 // Ignore mark word because we are running concurrent with mutators
8078 assert(oop(addr)->is_oop(true), "live block should be an oop");
8079 // Verify that the bit map has no bits marked between
8080 // addr and purported end of this block.
8081 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8082 assert(size >= 3, "Necessary for Printezis marks to work");
8083 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8084 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8085 }
8086 return size;
8087 }
8089 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8090 size_t chunkSize) {
8091 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8092 // scheme.
8093 bool fcInFreeLists = fc->isFree();
8094 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8095 assert((HeapWord*)fc <= _limit, "sweep invariant");
8096 if (CMSTestInFreeList && fcInFreeLists) {
8097 assert(_sp->verifyChunkInFreeLists(fc),
8098 "free chunk is not in free lists");
8099 }
8102 if (CMSTraceSweeper) {
8103 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8104 }
8106 HeapWord* addr = (HeapWord*) fc;
8108 bool coalesce;
8109 size_t left = pointer_delta(addr, freeFinger());
8110 size_t right = chunkSize;
8111 switch (FLSCoalescePolicy) {
8112 // numeric value forms a coalition aggressiveness metric
8113 case 0: { // never coalesce
8114 coalesce = false;
8115 break;
8116 }
8117 case 1: { // coalesce if left & right chunks on overpopulated lists
8118 coalesce = _sp->coalOverPopulated(left) &&
8119 _sp->coalOverPopulated(right);
8120 break;
8121 }
8122 case 2: { // coalesce if left chunk on overpopulated list (default)
8123 coalesce = _sp->coalOverPopulated(left);
8124 break;
8125 }
8126 case 3: { // coalesce if left OR right chunk on overpopulated list
8127 coalesce = _sp->coalOverPopulated(left) ||
8128 _sp->coalOverPopulated(right);
8129 break;
8130 }
8131 case 4: { // always coalesce
8132 coalesce = true;
8133 break;
8134 }
8135 default:
8136 ShouldNotReachHere();
8137 }
8139 // Should the current free range be coalesced?
8140 // If the chunk is in a free range and either we decided to coalesce above
8141 // or the chunk is near the large block at the end of the heap
8142 // (isNearLargestChunk() returns true), then coalesce this chunk.
8143 bool doCoalesce = inFreeRange() &&
8144 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8145 if (doCoalesce) {
8146 // Coalesce the current free range on the left with the new
8147 // chunk on the right. If either is on a free list,
8148 // it must be removed from the list and stashed in the closure.
8149 if (freeRangeInFreeLists()) {
8150 FreeChunk* ffc = (FreeChunk*)freeFinger();
8151 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8152 "Size of free range is inconsistent with chunk size.");
8153 if (CMSTestInFreeList) {
8154 assert(_sp->verifyChunkInFreeLists(ffc),
8155 "Chunk is not in free lists");
8156 }
8157 _sp->coalDeath(ffc->size());
8158 _sp->removeFreeChunkFromFreeLists(ffc);
8159 set_freeRangeInFreeLists(false);
8160 }
8161 if (fcInFreeLists) {
8162 _sp->coalDeath(chunkSize);
8163 assert(fc->size() == chunkSize,
8164 "The chunk has the wrong size or is not in the free lists");
8165 _sp->removeFreeChunkFromFreeLists(fc);
8166 }
8167 set_lastFreeRangeCoalesced(true);
8168 } else { // not in a free range and/or should not coalesce
8169 // Return the current free range and start a new one.
8170 if (inFreeRange()) {
8171 // In a free range but cannot coalesce with the right hand chunk.
8172 // Put the current free range into the free lists.
8173 flushCurFreeChunk(freeFinger(),
8174 pointer_delta(addr, freeFinger()));
8175 }
8176 // Set up for new free range. Pass along whether the right hand
8177 // chunk is in the free lists.
8178 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8179 }
8180 }
8181 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8182 assert(inFreeRange(), "Should only be called if currently in a free range.");
8183 assert(size > 0,
8184 "A zero sized chunk cannot be added to the free lists.");
8185 if (!freeRangeInFreeLists()) {
8186 if(CMSTestInFreeList) {
8187 FreeChunk* fc = (FreeChunk*) chunk;
8188 fc->setSize(size);
8189 assert(!_sp->verifyChunkInFreeLists(fc),
8190 "chunk should not be in free lists yet");
8191 }
8192 if (CMSTraceSweeper) {
8193 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8194 chunk, size);
8195 }
8196 // A new free range is going to be starting. The current
8197 // free range has not been added to the free lists yet or
8198 // was removed so add it back.
8199 // If the current free range was coalesced, then the death
8200 // of the free range was recorded. Record a birth now.
8201 if (lastFreeRangeCoalesced()) {
8202 _sp->coalBirth(size);
8203 }
8204 _sp->addChunkAndRepairOffsetTable(chunk, size,
8205 lastFreeRangeCoalesced());
8206 }
8207 set_inFreeRange(false);
8208 set_freeRangeInFreeLists(false);
8209 }
8211 // We take a break if we've been at this for a while,
8212 // so as to avoid monopolizing the locks involved.
8213 void SweepClosure::do_yield_work(HeapWord* addr) {
8214 // Return current free chunk being used for coalescing (if any)
8215 // to the appropriate freelist. After yielding, the next
8216 // free block encountered will start a coalescing range of
8217 // free blocks. If the next free block is adjacent to the
8218 // chunk just flushed, they will need to wait for the next
8219 // sweep to be coalesced.
8220 if (inFreeRange()) {
8221 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8222 }
8224 // First give up the locks, then yield, then re-lock.
8225 // We should probably use a constructor/destructor idiom to
8226 // do this unlock/lock or modify the MutexUnlocker class to
8227 // serve our purpose. XXX
8228 assert_lock_strong(_bitMap->lock());
8229 assert_lock_strong(_freelistLock);
8230 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8231 "CMS thread should hold CMS token");
8232 _bitMap->lock()->unlock();
8233 _freelistLock->unlock();
8234 ConcurrentMarkSweepThread::desynchronize(true);
8235 ConcurrentMarkSweepThread::acknowledge_yield_request();
8236 _collector->stopTimer();
8237 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8238 if (PrintCMSStatistics != 0) {
8239 _collector->incrementYields();
8240 }
8241 _collector->icms_wait();
8243 // See the comment in coordinator_yield()
8244 for (unsigned i = 0; i < CMSYieldSleepCount &&
8245 ConcurrentMarkSweepThread::should_yield() &&
8246 !CMSCollector::foregroundGCIsActive(); ++i) {
8247 os::sleep(Thread::current(), 1, false);
8248 ConcurrentMarkSweepThread::acknowledge_yield_request();
8249 }
8251 ConcurrentMarkSweepThread::synchronize(true);
8252 _freelistLock->lock();
8253 _bitMap->lock()->lock_without_safepoint_check();
8254 _collector->startTimer();
8255 }
8257 #ifndef PRODUCT
8258 // This is actually very useful in a product build if it can
8259 // be called from the debugger. Compile it into the product
8260 // as needed.
8261 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8262 return debug_cms_space->verifyChunkInFreeLists(fc);
8263 }
8265 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8266 if (CMSTraceSweeper) {
8267 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8268 }
8269 }
8270 #endif
8272 // CMSIsAliveClosure
8273 bool CMSIsAliveClosure::do_object_b(oop obj) {
8274 HeapWord* addr = (HeapWord*)obj;
8275 return addr != NULL &&
8276 (!_span.contains(addr) || _bit_map->isMarked(addr));
8277 }
8279 // CMSKeepAliveClosure: the serial version
8280 void CMSKeepAliveClosure::do_oop(oop obj) {
8281 HeapWord* addr = (HeapWord*)obj;
8282 if (_span.contains(addr) &&
8283 !_bit_map->isMarked(addr)) {
8284 _bit_map->mark(addr);
8285 bool simulate_overflow = false;
8286 NOT_PRODUCT(
8287 if (CMSMarkStackOverflowALot &&
8288 _collector->simulate_overflow()) {
8289 // simulate a stack overflow
8290 simulate_overflow = true;
8291 }
8292 )
8293 if (simulate_overflow || !_mark_stack->push(obj)) {
8294 _collector->push_on_overflow_list(obj);
8295 _collector->_ser_kac_ovflw++;
8296 }
8297 }
8298 }
8300 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8301 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8303 // CMSParKeepAliveClosure: a parallel version of the above.
8304 // The work queues are private to each closure (thread),
8305 // but (may be) available for stealing by other threads.
8306 void CMSParKeepAliveClosure::do_oop(oop obj) {
8307 HeapWord* addr = (HeapWord*)obj;
8308 if (_span.contains(addr) &&
8309 !_bit_map->isMarked(addr)) {
8310 // In general, during recursive tracing, several threads
8311 // may be concurrently getting here; the first one to
8312 // "tag" it, claims it.
8313 if (_bit_map->par_mark(addr)) {
8314 bool res = _work_queue->push(obj);
8315 assert(res, "Low water mark should be much less than capacity");
8316 // Do a recursive trim in the hope that this will keep
8317 // stack usage lower, but leave some oops for potential stealers
8318 trim_queue(_low_water_mark);
8319 } // Else, another thread got there first
8320 }
8321 }
8323 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8324 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8326 void CMSParKeepAliveClosure::trim_queue(uint max) {
8327 while (_work_queue->size() > max) {
8328 oop new_oop;
8329 if (_work_queue->pop_local(new_oop)) {
8330 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8331 assert(_bit_map->isMarked((HeapWord*)new_oop),
8332 "no white objects on this stack!");
8333 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8334 // iterate over the oops in this oop, marking and pushing
8335 // the ones in CMS heap (i.e. in _span).
8336 new_oop->oop_iterate(&_mark_and_push);
8337 }
8338 }
8339 }
8341 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8342 HeapWord* addr = (HeapWord*)obj;
8343 if (_span.contains(addr) &&
8344 !_bit_map->isMarked(addr)) {
8345 if (_bit_map->par_mark(addr)) {
8346 bool simulate_overflow = false;
8347 NOT_PRODUCT(
8348 if (CMSMarkStackOverflowALot &&
8349 _collector->par_simulate_overflow()) {
8350 // simulate a stack overflow
8351 simulate_overflow = true;
8352 }
8353 )
8354 if (simulate_overflow || !_work_queue->push(obj)) {
8355 _collector->par_push_on_overflow_list(obj);
8356 _collector->_par_kac_ovflw++;
8357 }
8358 } // Else another thread got there already
8359 }
8360 }
8362 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8363 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8365 //////////////////////////////////////////////////////////////////
8366 // CMSExpansionCause /////////////////////////////
8367 //////////////////////////////////////////////////////////////////
8368 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8369 switch (cause) {
8370 case _no_expansion:
8371 return "No expansion";
8372 case _satisfy_free_ratio:
8373 return "Free ratio";
8374 case _satisfy_promotion:
8375 return "Satisfy promotion";
8376 case _satisfy_allocation:
8377 return "allocation";
8378 case _allocate_par_lab:
8379 return "Par LAB";
8380 case _allocate_par_spooling_space:
8381 return "Par Spooling Space";
8382 case _adaptive_size_policy:
8383 return "Ergonomics";
8384 default:
8385 return "unknown";
8386 }
8387 }
8389 void CMSDrainMarkingStackClosure::do_void() {
8390 // the max number to take from overflow list at a time
8391 const size_t num = _mark_stack->capacity()/4;
8392 while (!_mark_stack->isEmpty() ||
8393 // if stack is empty, check the overflow list
8394 _collector->take_from_overflow_list(num, _mark_stack)) {
8395 oop obj = _mark_stack->pop();
8396 HeapWord* addr = (HeapWord*)obj;
8397 assert(_span.contains(addr), "Should be within span");
8398 assert(_bit_map->isMarked(addr), "Should be marked");
8399 assert(obj->is_oop(), "Should be an oop");
8400 obj->oop_iterate(_keep_alive);
8401 }
8402 }
8404 void CMSParDrainMarkingStackClosure::do_void() {
8405 // drain queue
8406 trim_queue(0);
8407 }
8409 // Trim our work_queue so its length is below max at return
8410 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8411 while (_work_queue->size() > max) {
8412 oop new_oop;
8413 if (_work_queue->pop_local(new_oop)) {
8414 assert(new_oop->is_oop(), "Expected an oop");
8415 assert(_bit_map->isMarked((HeapWord*)new_oop),
8416 "no white objects on this stack!");
8417 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8418 // iterate over the oops in this oop, marking and pushing
8419 // the ones in CMS heap (i.e. in _span).
8420 new_oop->oop_iterate(&_mark_and_push);
8421 }
8422 }
8423 }
8425 ////////////////////////////////////////////////////////////////////
8426 // Support for Marking Stack Overflow list handling and related code
8427 ////////////////////////////////////////////////////////////////////
8428 // Much of the following code is similar in shape and spirit to the
8429 // code used in ParNewGC. We should try and share that code
8430 // as much as possible in the future.
8432 #ifndef PRODUCT
8433 // Debugging support for CMSStackOverflowALot
8435 // It's OK to call this multi-threaded; the worst thing
8436 // that can happen is that we'll get a bunch of closely
8437 // spaced simulated oveflows, but that's OK, in fact
8438 // probably good as it would exercise the overflow code
8439 // under contention.
8440 bool CMSCollector::simulate_overflow() {
8441 if (_overflow_counter-- <= 0) { // just being defensive
8442 _overflow_counter = CMSMarkStackOverflowInterval;
8443 return true;
8444 } else {
8445 return false;
8446 }
8447 }
8449 bool CMSCollector::par_simulate_overflow() {
8450 return simulate_overflow();
8451 }
8452 #endif
8454 // Single-threaded
8455 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8456 assert(stack->isEmpty(), "Expected precondition");
8457 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8458 size_t i = num;
8459 oop cur = _overflow_list;
8460 const markOop proto = markOopDesc::prototype();
8461 NOT_PRODUCT(size_t n = 0;)
8462 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8463 next = oop(cur->mark());
8464 cur->set_mark(proto); // until proven otherwise
8465 assert(cur->is_oop(), "Should be an oop");
8466 bool res = stack->push(cur);
8467 assert(res, "Bit off more than can chew?");
8468 NOT_PRODUCT(n++;)
8469 }
8470 _overflow_list = cur;
8471 #ifndef PRODUCT
8472 assert(_num_par_pushes >= n, "Too many pops?");
8473 _num_par_pushes -=n;
8474 #endif
8475 return !stack->isEmpty();
8476 }
8478 // Multi-threaded; use CAS to break off a prefix
8479 bool CMSCollector::par_take_from_overflow_list(size_t num,
8480 OopTaskQueue* work_q) {
8481 assert(work_q->size() == 0, "That's the current policy");
8482 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8483 if (_overflow_list == NULL) {
8484 return false;
8485 }
8486 // Grab the entire list; we'll put back a suffix
8487 oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
8488 if (prefix == NULL) { // someone grabbed it before we did ...
8489 // ... we could spin for a short while, but for now we don't
8490 return false;
8491 }
8492 size_t i = num;
8493 oop cur = prefix;
8494 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8495 if (cur->mark() != NULL) {
8496 oop suffix_head = cur->mark(); // suffix will be put back on global list
8497 cur->set_mark(NULL); // break off suffix
8498 // Find tail of suffix so we can prepend suffix to global list
8499 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8500 oop suffix_tail = cur;
8501 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8502 "Tautology");
8503 oop observed_overflow_list = _overflow_list;
8504 do {
8505 cur = observed_overflow_list;
8506 suffix_tail->set_mark(markOop(cur));
8507 observed_overflow_list =
8508 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
8509 } while (cur != observed_overflow_list);
8510 }
8512 // Push the prefix elements on work_q
8513 assert(prefix != NULL, "control point invariant");
8514 const markOop proto = markOopDesc::prototype();
8515 oop next;
8516 NOT_PRODUCT(size_t n = 0;)
8517 for (cur = prefix; cur != NULL; cur = next) {
8518 next = oop(cur->mark());
8519 cur->set_mark(proto); // until proven otherwise
8520 assert(cur->is_oop(), "Should be an oop");
8521 bool res = work_q->push(cur);
8522 assert(res, "Bit off more than we can chew?");
8523 NOT_PRODUCT(n++;)
8524 }
8525 #ifndef PRODUCT
8526 assert(_num_par_pushes >= n, "Too many pops?");
8527 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8528 #endif
8529 return true;
8530 }
8532 // Single-threaded
8533 void CMSCollector::push_on_overflow_list(oop p) {
8534 NOT_PRODUCT(_num_par_pushes++;)
8535 assert(p->is_oop(), "Not an oop");
8536 preserve_mark_if_necessary(p);
8537 p->set_mark((markOop)_overflow_list);
8538 _overflow_list = p;
8539 }
8541 // Multi-threaded; use CAS to prepend to overflow list
8542 void CMSCollector::par_push_on_overflow_list(oop p) {
8543 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8544 assert(p->is_oop(), "Not an oop");
8545 par_preserve_mark_if_necessary(p);
8546 oop observed_overflow_list = _overflow_list;
8547 oop cur_overflow_list;
8548 do {
8549 cur_overflow_list = observed_overflow_list;
8550 p->set_mark(markOop(cur_overflow_list));
8551 observed_overflow_list =
8552 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8553 } while (cur_overflow_list != observed_overflow_list);
8554 }
8556 // Single threaded
8557 // General Note on GrowableArray: pushes may silently fail
8558 // because we are (temporarily) out of C-heap for expanding
8559 // the stack. The problem is quite ubiquitous and affects
8560 // a lot of code in the JVM. The prudent thing for GrowableArray
8561 // to do (for now) is to exit with an error. However, that may
8562 // be too draconian in some cases because the caller may be
8563 // able to recover without much harm. For suych cases, we
8564 // should probably introduce a "soft_push" method which returns
8565 // an indication of success or failure with the assumption that
8566 // the caller may be able to recover from a failure; code in
8567 // the VM can then be changed, incrementally, to deal with such
8568 // failures where possible, thus, incrementally hardening the VM
8569 // in such low resource situations.
8570 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8571 int PreserveMarkStackSize = 128;
8573 if (_preserved_oop_stack == NULL) {
8574 assert(_preserved_mark_stack == NULL,
8575 "bijection with preserved_oop_stack");
8576 // Allocate the stacks
8577 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8578 GrowableArray<oop>(PreserveMarkStackSize, true);
8579 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8580 GrowableArray<markOop>(PreserveMarkStackSize, true);
8581 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8582 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8583 "Preserved Mark/Oop Stack for CMS (C-heap)");
8584 }
8585 }
8586 _preserved_oop_stack->push(p);
8587 _preserved_mark_stack->push(m);
8588 assert(m == p->mark(), "Mark word changed");
8589 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8590 "bijection");
8591 }
8593 // Single threaded
8594 void CMSCollector::preserve_mark_if_necessary(oop p) {
8595 markOop m = p->mark();
8596 if (m->must_be_preserved(p)) {
8597 preserve_mark_work(p, m);
8598 }
8599 }
8601 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8602 markOop m = p->mark();
8603 if (m->must_be_preserved(p)) {
8604 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8605 // Even though we read the mark word without holding
8606 // the lock, we are assured that it will not change
8607 // because we "own" this oop, so no other thread can
8608 // be trying to push it on the overflow list; see
8609 // the assertion in preserve_mark_work() that checks
8610 // that m == p->mark().
8611 preserve_mark_work(p, m);
8612 }
8613 }
8615 // We should be able to do this multi-threaded,
8616 // a chunk of stack being a task (this is
8617 // correct because each oop only ever appears
8618 // once in the overflow list. However, it's
8619 // not very easy to completely overlap this with
8620 // other operations, so will generally not be done
8621 // until all work's been completed. Because we
8622 // expect the preserved oop stack (set) to be small,
8623 // it's probably fine to do this single-threaded.
8624 // We can explore cleverer concurrent/overlapped/parallel
8625 // processing of preserved marks if we feel the
8626 // need for this in the future. Stack overflow should
8627 // be so rare in practice and, when it happens, its
8628 // effect on performance so great that this will
8629 // likely just be in the noise anyway.
8630 void CMSCollector::restore_preserved_marks_if_any() {
8631 if (_preserved_oop_stack == NULL) {
8632 assert(_preserved_mark_stack == NULL,
8633 "bijection with preserved_oop_stack");
8634 return;
8635 }
8637 assert(SafepointSynchronize::is_at_safepoint(),
8638 "world should be stopped");
8639 assert(Thread::current()->is_ConcurrentGC_thread() ||
8640 Thread::current()->is_VM_thread(),
8641 "should be single-threaded");
8643 int length = _preserved_oop_stack->length();
8644 assert(_preserved_mark_stack->length() == length, "bijection");
8645 for (int i = 0; i < length; i++) {
8646 oop p = _preserved_oop_stack->at(i);
8647 assert(p->is_oop(), "Should be an oop");
8648 assert(_span.contains(p), "oop should be in _span");
8649 assert(p->mark() == markOopDesc::prototype(),
8650 "Set when taken from overflow list");
8651 markOop m = _preserved_mark_stack->at(i);
8652 p->set_mark(m);
8653 }
8654 _preserved_mark_stack->clear();
8655 _preserved_oop_stack->clear();
8656 assert(_preserved_mark_stack->is_empty() &&
8657 _preserved_oop_stack->is_empty(),
8658 "stacks were cleared above");
8659 }
8661 #ifndef PRODUCT
8662 bool CMSCollector::no_preserved_marks() const {
8663 return ( ( _preserved_mark_stack == NULL
8664 && _preserved_oop_stack == NULL)
8665 || ( _preserved_mark_stack->is_empty()
8666 && _preserved_oop_stack->is_empty()));
8667 }
8668 #endif
8670 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8671 {
8672 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8673 CMSAdaptiveSizePolicy* size_policy =
8674 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8675 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8676 "Wrong type for size policy");
8677 return size_policy;
8678 }
8680 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8681 size_t desired_promo_size) {
8682 if (cur_promo_size < desired_promo_size) {
8683 size_t expand_bytes = desired_promo_size - cur_promo_size;
8684 if (PrintAdaptiveSizePolicy && Verbose) {
8685 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8686 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8687 expand_bytes);
8688 }
8689 expand(expand_bytes,
8690 MinHeapDeltaBytes,
8691 CMSExpansionCause::_adaptive_size_policy);
8692 } else if (desired_promo_size < cur_promo_size) {
8693 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8694 if (PrintAdaptiveSizePolicy && Verbose) {
8695 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8696 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8697 shrink_bytes);
8698 }
8699 shrink(shrink_bytes);
8700 }
8701 }
8703 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8704 GenCollectedHeap* gch = GenCollectedHeap::heap();
8705 CMSGCAdaptivePolicyCounters* counters =
8706 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8707 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8708 "Wrong kind of counters");
8709 return counters;
8710 }
8713 void ASConcurrentMarkSweepGeneration::update_counters() {
8714 if (UsePerfData) {
8715 _space_counters->update_all();
8716 _gen_counters->update_all();
8717 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8718 GenCollectedHeap* gch = GenCollectedHeap::heap();
8719 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8720 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8721 "Wrong gc statistics type");
8722 counters->update_counters(gc_stats_l);
8723 }
8724 }
8726 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8727 if (UsePerfData) {
8728 _space_counters->update_used(used);
8729 _space_counters->update_capacity();
8730 _gen_counters->update_all();
8732 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8733 GenCollectedHeap* gch = GenCollectedHeap::heap();
8734 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8735 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8736 "Wrong gc statistics type");
8737 counters->update_counters(gc_stats_l);
8738 }
8739 }
8741 // The desired expansion delta is computed so that:
8742 // . desired free percentage or greater is used
8743 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8744 assert_locked_or_safepoint(Heap_lock);
8746 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8748 // If incremental collection failed, we just want to expand
8749 // to the limit.
8750 if (incremental_collection_failed()) {
8751 clear_incremental_collection_failed();
8752 grow_to_reserved();
8753 return;
8754 }
8756 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8758 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8759 "Wrong type of heap");
8760 int prev_level = level() - 1;
8761 assert(prev_level >= 0, "The cms generation is the lowest generation");
8762 Generation* prev_gen = gch->get_gen(prev_level);
8763 assert(prev_gen->kind() == Generation::ASParNew,
8764 "Wrong type of young generation");
8765 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8766 size_t cur_eden = younger_gen->eden()->capacity();
8767 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8768 size_t cur_promo = free();
8769 size_policy->compute_tenured_generation_free_space(cur_promo,
8770 max_available(),
8771 cur_eden);
8772 resize(cur_promo, size_policy->promo_size());
8774 // Record the new size of the space in the cms generation
8775 // that is available for promotions. This is temporary.
8776 // It should be the desired promo size.
8777 size_policy->avg_cms_promo()->sample(free());
8778 size_policy->avg_old_live()->sample(used());
8780 if (UsePerfData) {
8781 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8782 counters->update_cms_capacity_counter(capacity());
8783 }
8784 }
8786 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
8787 assert_locked_or_safepoint(Heap_lock);
8788 assert_lock_strong(freelistLock());
8789 HeapWord* old_end = _cmsSpace->end();
8790 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
8791 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
8792 FreeChunk* chunk_at_end = find_chunk_at_end();
8793 if (chunk_at_end == NULL) {
8794 // No room to shrink
8795 if (PrintGCDetails && Verbose) {
8796 gclog_or_tty->print_cr("No room to shrink: old_end "
8797 PTR_FORMAT " unallocated_start " PTR_FORMAT
8798 " chunk_at_end " PTR_FORMAT,
8799 old_end, unallocated_start, chunk_at_end);
8800 }
8801 return;
8802 } else {
8804 // Find the chunk at the end of the space and determine
8805 // how much it can be shrunk.
8806 size_t shrinkable_size_in_bytes = chunk_at_end->size();
8807 size_t aligned_shrinkable_size_in_bytes =
8808 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
8809 assert(unallocated_start <= chunk_at_end->end(),
8810 "Inconsistent chunk at end of space");
8811 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
8812 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
8814 // Shrink the underlying space
8815 _virtual_space.shrink_by(bytes);
8816 if (PrintGCDetails && Verbose) {
8817 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
8818 " desired_bytes " SIZE_FORMAT
8819 " shrinkable_size_in_bytes " SIZE_FORMAT
8820 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
8821 " bytes " SIZE_FORMAT,
8822 desired_bytes, shrinkable_size_in_bytes,
8823 aligned_shrinkable_size_in_bytes, bytes);
8824 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
8825 " unallocated_start " SIZE_FORMAT,
8826 old_end, unallocated_start);
8827 }
8829 // If the space did shrink (shrinking is not guaranteed),
8830 // shrink the chunk at the end by the appropriate amount.
8831 if (((HeapWord*)_virtual_space.high()) < old_end) {
8832 size_t new_word_size =
8833 heap_word_size(_virtual_space.committed_size());
8835 // Have to remove the chunk from the dictionary because it is changing
8836 // size and might be someplace elsewhere in the dictionary.
8838 // Get the chunk at end, shrink it, and put it
8839 // back.
8840 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
8841 size_t word_size_change = word_size_before - new_word_size;
8842 size_t chunk_at_end_old_size = chunk_at_end->size();
8843 assert(chunk_at_end_old_size >= word_size_change,
8844 "Shrink is too large");
8845 chunk_at_end->setSize(chunk_at_end_old_size -
8846 word_size_change);
8847 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
8848 word_size_change);
8850 _cmsSpace->returnChunkToDictionary(chunk_at_end);
8852 MemRegion mr(_cmsSpace->bottom(), new_word_size);
8853 _bts->resize(new_word_size); // resize the block offset shared array
8854 Universe::heap()->barrier_set()->resize_covered_region(mr);
8855 _cmsSpace->assert_locked();
8856 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
8858 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
8860 // update the space and generation capacity counters
8861 if (UsePerfData) {
8862 _space_counters->update_capacity();
8863 _gen_counters->update_all();
8864 }
8866 if (Verbose && PrintGCDetails) {
8867 size_t new_mem_size = _virtual_space.committed_size();
8868 size_t old_mem_size = new_mem_size + bytes;
8869 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
8870 name(), old_mem_size/K, bytes/K, new_mem_size/K);
8871 }
8872 }
8874 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
8875 "Inconsistency at end of space");
8876 assert(chunk_at_end->end() == _cmsSpace->end(),
8877 "Shrinking is inconsistent");
8878 return;
8879 }
8880 }
8882 // Transfer some number of overflown objects to usual marking
8883 // stack. Return true if some objects were transferred.
8884 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8885 size_t num = MIN2((size_t)_mark_stack->capacity()/4,
8886 (size_t)ParGCDesiredObjsFromOverflowList);
8888 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8889 assert(_collector->overflow_list_is_empty() || res,
8890 "If list is not empty, we should have taken something");
8891 assert(!res || !_mark_stack->isEmpty(),
8892 "If we took something, it should now be on our stack");
8893 return res;
8894 }
8896 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8897 size_t res = _sp->block_size_no_stall(addr, _collector);
8898 assert(res != 0, "Should always be able to compute a size");
8899 if (_sp->block_is_obj(addr)) {
8900 if (_live_bit_map->isMarked(addr)) {
8901 // It can't have been dead in a previous cycle
8902 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8903 } else {
8904 _dead_bit_map->mark(addr); // mark the dead object
8905 }
8906 }
8907 return res;
8908 }