Fri, 27 Feb 2009 13:27:09 -0800
6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(UseCompressedOops ||
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
195 "Offset of FreeChunk::_prev within FreeChunk must match"
196 " that of OopDesc::_klass within OopDesc");
197 )
198 if (ParallelGCThreads > 0) {
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
200 _par_gc_thread_states =
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
202 if (_par_gc_thread_states == NULL) {
203 vm_exit_during_initialization("Could not allocate par gc structs");
204 }
205 for (uint i = 0; i < ParallelGCThreads; i++) {
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
207 if (_par_gc_thread_states[i] == NULL) {
208 vm_exit_during_initialization("Could not allocate par gc structs");
209 }
210 }
211 } else {
212 _par_gc_thread_states = NULL;
213 }
214 _incremental_collection_failed = false;
215 // The "dilatation_factor" is the expansion that can occur on
216 // account of the fact that the minimum object size in the CMS
217 // generation may be larger than that in, say, a contiguous young
218 // generation.
219 // Ideally, in the calculation below, we'd compute the dilatation
220 // factor as: MinChunkSize/(promoting_gen's min object size)
221 // Since we do not have such a general query interface for the
222 // promoting generation, we'll instead just use the mimimum
223 // object size (which today is a header's worth of space);
224 // note that all arithmetic is in units of HeapWords.
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
226 assert(_dilatation_factor >= 1.0, "from previous assert");
227 }
230 // The field "_initiating_occupancy" represents the occupancy percentage
231 // at which we trigger a new collection cycle. Unless explicitly specified
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
233 // is calculated by:
234 //
235 // Let "f" be MinHeapFreeRatio in
236 //
237 // _intiating_occupancy = 100-f +
238 // f * (CMSTrigger[Perm]Ratio/100)
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
240 //
241 // That is, if we assume the heap is at its desired maximum occupancy at the
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
243 // space be allocated before initiating a new collection cycle.
244 //
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
247 if (io >= 0) {
248 _initiating_occupancy = (double)io / 100.0;
249 } else {
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0;
253 }
254 }
257 void ConcurrentMarkSweepGeneration::ref_processor_init() {
258 assert(collector() != NULL, "no collector");
259 collector()->ref_processor_init();
260 }
262 void CMSCollector::ref_processor_init() {
263 if (_ref_processor == NULL) {
264 // Allocate and initialize a reference processor
265 _ref_processor = ReferenceProcessor::create_ref_processor(
266 _span, // span
267 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
268 _cmsGen->refs_discovery_is_mt(), // mt_discovery
269 &_is_alive_closure,
270 ParallelGCThreads,
271 ParallelRefProcEnabled);
272 // Initialize the _ref_processor field of CMSGen
273 _cmsGen->set_ref_processor(_ref_processor);
275 // Allocate a dummy ref processor for perm gen.
276 ReferenceProcessor* rp2 = new ReferenceProcessor();
277 if (rp2 == NULL) {
278 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
279 }
280 _permGen->set_ref_processor(rp2);
281 }
282 }
284 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
285 GenCollectedHeap* gch = GenCollectedHeap::heap();
286 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
287 "Wrong type of heap");
288 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
289 gch->gen_policy()->size_policy();
290 assert(sp->is_gc_cms_adaptive_size_policy(),
291 "Wrong type of size policy");
292 return sp;
293 }
295 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
296 CMSGCAdaptivePolicyCounters* results =
297 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
298 assert(
299 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
300 "Wrong gc policy counter kind");
301 return results;
302 }
305 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
307 const char* gen_name = "old";
309 // Generation Counters - generation 1, 1 subspace
310 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
312 _space_counters = new GSpaceCounters(gen_name, 0,
313 _virtual_space.reserved_size(),
314 this, _gen_counters);
315 }
317 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
318 _cms_gen(cms_gen)
319 {
320 assert(alpha <= 100, "bad value");
321 _saved_alpha = alpha;
323 // Initialize the alphas to the bootstrap value of 100.
324 _gc0_alpha = _cms_alpha = 100;
326 _cms_begin_time.update();
327 _cms_end_time.update();
329 _gc0_duration = 0.0;
330 _gc0_period = 0.0;
331 _gc0_promoted = 0;
333 _cms_duration = 0.0;
334 _cms_period = 0.0;
335 _cms_allocated = 0;
337 _cms_used_at_gc0_begin = 0;
338 _cms_used_at_gc0_end = 0;
339 _allow_duty_cycle_reduction = false;
340 _valid_bits = 0;
341 _icms_duty_cycle = CMSIncrementalDutyCycle;
342 }
344 // If promotion failure handling is on use
345 // the padded average size of the promotion for each
346 // young generation collection.
347 double CMSStats::time_until_cms_gen_full() const {
348 size_t cms_free = _cms_gen->cmsSpace()->free();
349 GenCollectedHeap* gch = GenCollectedHeap::heap();
350 size_t expected_promotion = gch->get_gen(0)->capacity();
351 if (HandlePromotionFailure) {
352 expected_promotion = MIN2(
353 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
354 expected_promotion);
355 }
356 if (cms_free > expected_promotion) {
357 // Start a cms collection if there isn't enough space to promote
358 // for the next minor collection. Use the padded average as
359 // a safety factor.
360 cms_free -= expected_promotion;
362 // Adjust by the safety factor.
363 double cms_free_dbl = (double)cms_free;
364 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
366 if (PrintGCDetails && Verbose) {
367 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
368 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
369 cms_free, expected_promotion);
370 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
371 cms_free_dbl, cms_consumption_rate() + 1.0);
372 }
373 // Add 1 in case the consumption rate goes to zero.
374 return cms_free_dbl / (cms_consumption_rate() + 1.0);
375 }
376 return 0.0;
377 }
379 // Compare the duration of the cms collection to the
380 // time remaining before the cms generation is empty.
381 // Note that the time from the start of the cms collection
382 // to the start of the cms sweep (less than the total
383 // duration of the cms collection) can be used. This
384 // has been tried and some applications experienced
385 // promotion failures early in execution. This was
386 // possibly because the averages were not accurate
387 // enough at the beginning.
388 double CMSStats::time_until_cms_start() const {
389 // We add "gc0_period" to the "work" calculation
390 // below because this query is done (mostly) at the
391 // end of a scavenge, so we need to conservatively
392 // account for that much possible delay
393 // in the query so as to avoid concurrent mode failures
394 // due to starting the collection just a wee bit too
395 // late.
396 double work = cms_duration() + gc0_period();
397 double deadline = time_until_cms_gen_full();
398 if (work > deadline) {
399 if (Verbose && PrintGCDetails) {
400 gclog_or_tty->print(
401 " CMSCollector: collect because of anticipated promotion "
402 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
403 gc0_period(), time_until_cms_gen_full());
404 }
405 return 0.0;
406 }
407 return work - deadline;
408 }
410 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
411 // amount of change to prevent wild oscillation.
412 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
413 unsigned int new_duty_cycle) {
414 assert(old_duty_cycle <= 100, "bad input value");
415 assert(new_duty_cycle <= 100, "bad input value");
417 // Note: use subtraction with caution since it may underflow (values are
418 // unsigned). Addition is safe since we're in the range 0-100.
419 unsigned int damped_duty_cycle = new_duty_cycle;
420 if (new_duty_cycle < old_duty_cycle) {
421 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
422 if (new_duty_cycle + largest_delta < old_duty_cycle) {
423 damped_duty_cycle = old_duty_cycle - largest_delta;
424 }
425 } else if (new_duty_cycle > old_duty_cycle) {
426 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
427 if (new_duty_cycle > old_duty_cycle + largest_delta) {
428 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
429 }
430 }
431 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
433 if (CMSTraceIncrementalPacing) {
434 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
435 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
436 }
437 return damped_duty_cycle;
438 }
440 unsigned int CMSStats::icms_update_duty_cycle_impl() {
441 assert(CMSIncrementalPacing && valid(),
442 "should be handled in icms_update_duty_cycle()");
444 double cms_time_so_far = cms_timer().seconds();
445 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
446 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
448 // Avoid division by 0.
449 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
450 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
452 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
453 if (new_duty_cycle > _icms_duty_cycle) {
454 // Avoid very small duty cycles (1 or 2); 0 is allowed.
455 if (new_duty_cycle > 2) {
456 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
457 new_duty_cycle);
458 }
459 } else if (_allow_duty_cycle_reduction) {
460 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
461 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
462 // Respect the minimum duty cycle.
463 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
464 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
465 }
467 if (PrintGCDetails || CMSTraceIncrementalPacing) {
468 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
469 }
471 _allow_duty_cycle_reduction = false;
472 return _icms_duty_cycle;
473 }
475 #ifndef PRODUCT
476 void CMSStats::print_on(outputStream *st) const {
477 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
478 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
479 gc0_duration(), gc0_period(), gc0_promoted());
480 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
481 cms_duration(), cms_duration_per_mb(),
482 cms_period(), cms_allocated());
483 st->print(",cms_since_beg=%g,cms_since_end=%g",
484 cms_time_since_begin(), cms_time_since_end());
485 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
486 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
487 if (CMSIncrementalMode) {
488 st->print(",dc=%d", icms_duty_cycle());
489 }
491 if (valid()) {
492 st->print(",promo_rate=%g,cms_alloc_rate=%g",
493 promotion_rate(), cms_allocation_rate());
494 st->print(",cms_consumption_rate=%g,time_until_full=%g",
495 cms_consumption_rate(), time_until_cms_gen_full());
496 }
497 st->print(" ");
498 }
499 #endif // #ifndef PRODUCT
501 CMSCollector::CollectorState CMSCollector::_collectorState =
502 CMSCollector::Idling;
503 bool CMSCollector::_foregroundGCIsActive = false;
504 bool CMSCollector::_foregroundGCShouldWait = false;
506 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
507 ConcurrentMarkSweepGeneration* permGen,
508 CardTableRS* ct,
509 ConcurrentMarkSweepPolicy* cp):
510 _cmsGen(cmsGen),
511 _permGen(permGen),
512 _ct(ct),
513 _ref_processor(NULL), // will be set later
514 _conc_workers(NULL), // may be set later
515 _abort_preclean(false),
516 _start_sampling(false),
517 _between_prologue_and_epilogue(false),
518 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
519 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
520 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
521 -1 /* lock-free */, "No_lock" /* dummy */),
522 _modUnionClosure(&_modUnionTable),
523 _modUnionClosurePar(&_modUnionTable),
524 // Adjust my span to cover old (cms) gen and perm gen
525 _span(cmsGen->reserved()._union(permGen->reserved())),
526 // Construct the is_alive_closure with _span & markBitMap
527 _is_alive_closure(_span, &_markBitMap),
528 _restart_addr(NULL),
529 _overflow_list(NULL),
530 _preserved_oop_stack(NULL),
531 _preserved_mark_stack(NULL),
532 _stats(cmsGen),
533 _eden_chunk_array(NULL), // may be set in ctor body
534 _eden_chunk_capacity(0), // -- ditto --
535 _eden_chunk_index(0), // -- ditto --
536 _survivor_plab_array(NULL), // -- ditto --
537 _survivor_chunk_array(NULL), // -- ditto --
538 _survivor_chunk_capacity(0), // -- ditto --
539 _survivor_chunk_index(0), // -- ditto --
540 _ser_pmc_preclean_ovflw(0),
541 _ser_kac_preclean_ovflw(0),
542 _ser_pmc_remark_ovflw(0),
543 _par_pmc_remark_ovflw(0),
544 _ser_kac_ovflw(0),
545 _par_kac_ovflw(0),
546 #ifndef PRODUCT
547 _num_par_pushes(0),
548 #endif
549 _collection_count_start(0),
550 _verifying(false),
551 _icms_start_limit(NULL),
552 _icms_stop_limit(NULL),
553 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
554 _completed_initialization(false),
555 _collector_policy(cp),
556 _should_unload_classes(false),
557 _concurrent_cycles_since_last_unload(0),
558 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
559 {
560 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
561 ExplicitGCInvokesConcurrent = true;
562 }
563 // Now expand the span and allocate the collection support structures
564 // (MUT, marking bit map etc.) to cover both generations subject to
565 // collection.
567 // First check that _permGen is adjacent to _cmsGen and above it.
568 assert( _cmsGen->reserved().word_size() > 0
569 && _permGen->reserved().word_size() > 0,
570 "generations should not be of zero size");
571 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
572 "_cmsGen and _permGen should not overlap");
573 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
574 "_cmsGen->end() different from _permGen->start()");
576 // For use by dirty card to oop closures.
577 _cmsGen->cmsSpace()->set_collector(this);
578 _permGen->cmsSpace()->set_collector(this);
580 // Allocate MUT and marking bit map
581 {
582 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
583 if (!_markBitMap.allocate(_span)) {
584 warning("Failed to allocate CMS Bit Map");
585 return;
586 }
587 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
588 }
589 {
590 _modUnionTable.allocate(_span);
591 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
592 }
594 if (!_markStack.allocate(CMSMarkStackSize)) {
595 warning("Failed to allocate CMS Marking Stack");
596 return;
597 }
598 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
599 warning("Failed to allocate CMS Revisit Stack");
600 return;
601 }
603 // Support for multi-threaded concurrent phases
604 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
605 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
606 // just for now
607 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
608 }
609 if (ParallelCMSThreads > 1) {
610 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
611 ParallelCMSThreads, true);
612 if (_conc_workers == NULL) {
613 warning("GC/CMS: _conc_workers allocation failure: "
614 "forcing -CMSConcurrentMTEnabled");
615 CMSConcurrentMTEnabled = false;
616 }
617 } else {
618 CMSConcurrentMTEnabled = false;
619 }
620 }
621 if (!CMSConcurrentMTEnabled) {
622 ParallelCMSThreads = 0;
623 } else {
624 // Turn off CMSCleanOnEnter optimization temporarily for
625 // the MT case where it's not fixed yet; see 6178663.
626 CMSCleanOnEnter = false;
627 }
628 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
629 "Inconsistency");
631 // Parallel task queues; these are shared for the
632 // concurrent and stop-world phases of CMS, but
633 // are not shared with parallel scavenge (ParNew).
634 {
635 uint i;
636 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
638 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
639 || ParallelRefProcEnabled)
640 && num_queues > 0) {
641 _task_queues = new OopTaskQueueSet(num_queues);
642 if (_task_queues == NULL) {
643 warning("task_queues allocation failure.");
644 return;
645 }
646 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
647 if (_hash_seed == NULL) {
648 warning("_hash_seed array allocation failure");
649 return;
650 }
652 // XXX use a global constant instead of 64!
653 typedef struct OopTaskQueuePadded {
654 OopTaskQueue work_queue;
655 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
656 } OopTaskQueuePadded;
658 for (i = 0; i < num_queues; i++) {
659 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
660 if (q_padded == NULL) {
661 warning("work_queue allocation failure.");
662 return;
663 }
664 _task_queues->register_queue(i, &q_padded->work_queue);
665 }
666 for (i = 0; i < num_queues; i++) {
667 _task_queues->queue(i)->initialize();
668 _hash_seed[i] = 17; // copied from ParNew
669 }
670 }
671 }
673 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
674 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
676 // Clip CMSBootstrapOccupancy between 0 and 100.
677 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
678 /(double)100;
680 _full_gcs_since_conc_gc = 0;
682 // Now tell CMS generations the identity of their collector
683 ConcurrentMarkSweepGeneration::set_collector(this);
685 // Create & start a CMS thread for this CMS collector
686 _cmsThread = ConcurrentMarkSweepThread::start(this);
687 assert(cmsThread() != NULL, "CMS Thread should have been created");
688 assert(cmsThread()->collector() == this,
689 "CMS Thread should refer to this gen");
690 assert(CGC_lock != NULL, "Where's the CGC_lock?");
692 // Support for parallelizing young gen rescan
693 GenCollectedHeap* gch = GenCollectedHeap::heap();
694 _young_gen = gch->prev_gen(_cmsGen);
695 if (gch->supports_inline_contig_alloc()) {
696 _top_addr = gch->top_addr();
697 _end_addr = gch->end_addr();
698 assert(_young_gen != NULL, "no _young_gen");
699 _eden_chunk_index = 0;
700 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
701 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
702 if (_eden_chunk_array == NULL) {
703 _eden_chunk_capacity = 0;
704 warning("GC/CMS: _eden_chunk_array allocation failure");
705 }
706 }
707 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
709 // Support for parallelizing survivor space rescan
710 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
711 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
712 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
713 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
714 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
715 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
716 || _cursor == NULL) {
717 warning("Failed to allocate survivor plab/chunk array");
718 if (_survivor_plab_array != NULL) {
719 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
720 _survivor_plab_array = NULL;
721 }
722 if (_survivor_chunk_array != NULL) {
723 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
724 _survivor_chunk_array = NULL;
725 }
726 if (_cursor != NULL) {
727 FREE_C_HEAP_ARRAY(size_t, _cursor);
728 _cursor = NULL;
729 }
730 } else {
731 _survivor_chunk_capacity = 2*max_plab_samples;
732 for (uint i = 0; i < ParallelGCThreads; i++) {
733 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
734 if (vec == NULL) {
735 warning("Failed to allocate survivor plab array");
736 for (int j = i; j > 0; j--) {
737 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
738 }
739 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
740 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
741 _survivor_plab_array = NULL;
742 _survivor_chunk_array = NULL;
743 _survivor_chunk_capacity = 0;
744 break;
745 } else {
746 ChunkArray* cur =
747 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
748 max_plab_samples);
749 assert(cur->end() == 0, "Should be 0");
750 assert(cur->array() == vec, "Should be vec");
751 assert(cur->capacity() == max_plab_samples, "Error");
752 }
753 }
754 }
755 }
756 assert( ( _survivor_plab_array != NULL
757 && _survivor_chunk_array != NULL)
758 || ( _survivor_chunk_capacity == 0
759 && _survivor_chunk_index == 0),
760 "Error");
762 // Choose what strong roots should be scanned depending on verification options
763 // and perm gen collection mode.
764 if (!CMSClassUnloadingEnabled) {
765 // If class unloading is disabled we want to include all classes into the root set.
766 add_root_scanning_option(SharedHeap::SO_AllClasses);
767 } else {
768 add_root_scanning_option(SharedHeap::SO_SystemClasses);
769 }
771 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
772 _gc_counters = new CollectorCounters("CMS", 1);
773 _completed_initialization = true;
774 _sweep_timer.start(); // start of time
775 }
777 const char* ConcurrentMarkSweepGeneration::name() const {
778 return "concurrent mark-sweep generation";
779 }
780 void ConcurrentMarkSweepGeneration::update_counters() {
781 if (UsePerfData) {
782 _space_counters->update_all();
783 _gen_counters->update_all();
784 }
785 }
787 // this is an optimized version of update_counters(). it takes the
788 // used value as a parameter rather than computing it.
789 //
790 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
791 if (UsePerfData) {
792 _space_counters->update_used(used);
793 _space_counters->update_capacity();
794 _gen_counters->update_all();
795 }
796 }
798 void ConcurrentMarkSweepGeneration::print() const {
799 Generation::print();
800 cmsSpace()->print();
801 }
803 #ifndef PRODUCT
804 void ConcurrentMarkSweepGeneration::print_statistics() {
805 cmsSpace()->printFLCensus(0);
806 }
807 #endif
809 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
810 GenCollectedHeap* gch = GenCollectedHeap::heap();
811 if (PrintGCDetails) {
812 if (Verbose) {
813 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
814 level(), short_name(), s, used(), capacity());
815 } else {
816 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
817 level(), short_name(), s, used() / K, capacity() / K);
818 }
819 }
820 if (Verbose) {
821 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
822 gch->used(), gch->capacity());
823 } else {
824 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
825 gch->used() / K, gch->capacity() / K);
826 }
827 }
829 size_t
830 ConcurrentMarkSweepGeneration::contiguous_available() const {
831 // dld proposes an improvement in precision here. If the committed
832 // part of the space ends in a free block we should add that to
833 // uncommitted size in the calculation below. Will make this
834 // change later, staying with the approximation below for the
835 // time being. -- ysr.
836 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
837 }
839 size_t
840 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
841 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
842 }
844 size_t ConcurrentMarkSweepGeneration::max_available() const {
845 return free() + _virtual_space.uncommitted_size();
846 }
848 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
849 size_t max_promotion_in_bytes,
850 bool younger_handles_promotion_failure) const {
852 // This is the most conservative test. Full promotion is
853 // guaranteed if this is used. The multiplicative factor is to
854 // account for the worst case "dilatation".
855 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
856 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
857 adjusted_max_promo_bytes = (double)max_uintx;
858 }
859 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
861 if (younger_handles_promotion_failure && !result) {
862 // Full promotion is not guaranteed because fragmentation
863 // of the cms generation can prevent the full promotion.
864 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
866 if (!result) {
867 // With promotion failure handling the test for the ability
868 // to support the promotion does not have to be guaranteed.
869 // Use an average of the amount promoted.
870 result = max_available() >= (size_t)
871 gc_stats()->avg_promoted()->padded_average();
872 if (PrintGC && Verbose && result) {
873 gclog_or_tty->print_cr(
874 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
875 " max_available: " SIZE_FORMAT
876 " avg_promoted: " SIZE_FORMAT,
877 max_available(), (size_t)
878 gc_stats()->avg_promoted()->padded_average());
879 }
880 } else {
881 if (PrintGC && Verbose) {
882 gclog_or_tty->print_cr(
883 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
884 " max_available: " SIZE_FORMAT
885 " adj_max_promo_bytes: " SIZE_FORMAT,
886 max_available(), (size_t)adjusted_max_promo_bytes);
887 }
888 }
889 } else {
890 if (PrintGC && Verbose) {
891 gclog_or_tty->print_cr(
892 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
893 " contiguous_available: " SIZE_FORMAT
894 " adj_max_promo_bytes: " SIZE_FORMAT,
895 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
896 }
897 }
898 return result;
899 }
901 CompactibleSpace*
902 ConcurrentMarkSweepGeneration::first_compaction_space() const {
903 return _cmsSpace;
904 }
906 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
907 // Clear the promotion information. These pointers can be adjusted
908 // along with all the other pointers into the heap but
909 // compaction is expected to be a rare event with
910 // a heap using cms so don't do it without seeing the need.
911 if (ParallelGCThreads > 0) {
912 for (uint i = 0; i < ParallelGCThreads; i++) {
913 _par_gc_thread_states[i]->promo.reset();
914 }
915 }
916 }
918 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
919 blk->do_space(_cmsSpace);
920 }
922 void ConcurrentMarkSweepGeneration::compute_new_size() {
923 assert_locked_or_safepoint(Heap_lock);
925 // If incremental collection failed, we just want to expand
926 // to the limit.
927 if (incremental_collection_failed()) {
928 clear_incremental_collection_failed();
929 grow_to_reserved();
930 return;
931 }
933 size_t expand_bytes = 0;
934 double free_percentage = ((double) free()) / capacity();
935 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
936 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
938 // compute expansion delta needed for reaching desired free percentage
939 if (free_percentage < desired_free_percentage) {
940 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
941 assert(desired_capacity >= capacity(), "invalid expansion size");
942 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
943 }
944 if (expand_bytes > 0) {
945 if (PrintGCDetails && Verbose) {
946 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
947 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
948 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
949 gclog_or_tty->print_cr(" Desired free fraction %f",
950 desired_free_percentage);
951 gclog_or_tty->print_cr(" Maximum free fraction %f",
952 maximum_free_percentage);
953 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
954 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
955 desired_capacity/1000);
956 int prev_level = level() - 1;
957 if (prev_level >= 0) {
958 size_t prev_size = 0;
959 GenCollectedHeap* gch = GenCollectedHeap::heap();
960 Generation* prev_gen = gch->_gens[prev_level];
961 prev_size = prev_gen->capacity();
962 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
963 prev_size/1000);
964 }
965 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
966 unsafe_max_alloc_nogc()/1000);
967 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
968 contiguous_available()/1000);
969 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
970 expand_bytes);
971 }
972 // safe if expansion fails
973 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
974 if (PrintGCDetails && Verbose) {
975 gclog_or_tty->print_cr(" Expanded free fraction %f",
976 ((double) free()) / capacity());
977 }
978 }
979 }
981 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
982 return cmsSpace()->freelistLock();
983 }
985 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
986 bool tlab) {
987 CMSSynchronousYieldRequest yr;
988 MutexLockerEx x(freelistLock(),
989 Mutex::_no_safepoint_check_flag);
990 return have_lock_and_allocate(size, tlab);
991 }
993 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
994 bool tlab) {
995 assert_lock_strong(freelistLock());
996 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
997 HeapWord* res = cmsSpace()->allocate(adjustedSize);
998 // Allocate the object live (grey) if the background collector has
999 // started marking. This is necessary because the marker may
1000 // have passed this address and consequently this object will
1001 // not otherwise be greyed and would be incorrectly swept up.
1002 // Note that if this object contains references, the writing
1003 // of those references will dirty the card containing this object
1004 // allowing the object to be blackened (and its references scanned)
1005 // either during a preclean phase or at the final checkpoint.
1006 if (res != NULL) {
1007 collector()->direct_allocated(res, adjustedSize);
1008 _direct_allocated_words += adjustedSize;
1009 // allocation counters
1010 NOT_PRODUCT(
1011 _numObjectsAllocated++;
1012 _numWordsAllocated += (int)adjustedSize;
1013 )
1014 }
1015 return res;
1016 }
1018 // In the case of direct allocation by mutators in a generation that
1019 // is being concurrently collected, the object must be allocated
1020 // live (grey) if the background collector has started marking.
1021 // This is necessary because the marker may
1022 // have passed this address and consequently this object will
1023 // not otherwise be greyed and would be incorrectly swept up.
1024 // Note that if this object contains references, the writing
1025 // of those references will dirty the card containing this object
1026 // allowing the object to be blackened (and its references scanned)
1027 // either during a preclean phase or at the final checkpoint.
1028 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1029 assert(_markBitMap.covers(start, size), "Out of bounds");
1030 if (_collectorState >= Marking) {
1031 MutexLockerEx y(_markBitMap.lock(),
1032 Mutex::_no_safepoint_check_flag);
1033 // [see comments preceding SweepClosure::do_blk() below for details]
1034 // 1. need to mark the object as live so it isn't collected
1035 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1036 // 3. need to mark the end of the object so sweeper can skip over it
1037 // if it's uninitialized when the sweeper reaches it.
1038 _markBitMap.mark(start); // object is live
1039 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1040 _markBitMap.mark(start + size - 1);
1041 // mark end of object
1042 }
1043 // check that oop looks uninitialized
1044 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1045 }
1047 void CMSCollector::promoted(bool par, HeapWord* start,
1048 bool is_obj_array, size_t obj_size) {
1049 assert(_markBitMap.covers(start), "Out of bounds");
1050 // See comment in direct_allocated() about when objects should
1051 // be allocated live.
1052 if (_collectorState >= Marking) {
1053 // we already hold the marking bit map lock, taken in
1054 // the prologue
1055 if (par) {
1056 _markBitMap.par_mark(start);
1057 } else {
1058 _markBitMap.mark(start);
1059 }
1060 // We don't need to mark the object as uninitialized (as
1061 // in direct_allocated above) because this is being done with the
1062 // world stopped and the object will be initialized by the
1063 // time the sweeper gets to look at it.
1064 assert(SafepointSynchronize::is_at_safepoint(),
1065 "expect promotion only at safepoints");
1067 if (_collectorState < Sweeping) {
1068 // Mark the appropriate cards in the modUnionTable, so that
1069 // this object gets scanned before the sweep. If this is
1070 // not done, CMS generation references in the object might
1071 // not get marked.
1072 // For the case of arrays, which are otherwise precisely
1073 // marked, we need to dirty the entire array, not just its head.
1074 if (is_obj_array) {
1075 // The [par_]mark_range() method expects mr.end() below to
1076 // be aligned to the granularity of a bit's representation
1077 // in the heap. In the case of the MUT below, that's a
1078 // card size.
1079 MemRegion mr(start,
1080 (HeapWord*)round_to((intptr_t)(start + obj_size),
1081 CardTableModRefBS::card_size /* bytes */));
1082 if (par) {
1083 _modUnionTable.par_mark_range(mr);
1084 } else {
1085 _modUnionTable.mark_range(mr);
1086 }
1087 } else { // not an obj array; we can just mark the head
1088 if (par) {
1089 _modUnionTable.par_mark(start);
1090 } else {
1091 _modUnionTable.mark(start);
1092 }
1093 }
1094 }
1095 }
1096 }
1098 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1099 {
1100 size_t delta = pointer_delta(addr, space->bottom());
1101 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1102 }
1104 void CMSCollector::icms_update_allocation_limits()
1105 {
1106 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1107 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1109 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1110 if (CMSTraceIncrementalPacing) {
1111 stats().print();
1112 }
1114 assert(duty_cycle <= 100, "invalid duty cycle");
1115 if (duty_cycle != 0) {
1116 // The duty_cycle is a percentage between 0 and 100; convert to words and
1117 // then compute the offset from the endpoints of the space.
1118 size_t free_words = eden->free() / HeapWordSize;
1119 double free_words_dbl = (double)free_words;
1120 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1121 size_t offset_words = (free_words - duty_cycle_words) / 2;
1123 _icms_start_limit = eden->top() + offset_words;
1124 _icms_stop_limit = eden->end() - offset_words;
1126 // The limits may be adjusted (shifted to the right) by
1127 // CMSIncrementalOffset, to allow the application more mutator time after a
1128 // young gen gc (when all mutators were stopped) and before CMS starts and
1129 // takes away one or more cpus.
1130 if (CMSIncrementalOffset != 0) {
1131 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1132 size_t adjustment = (size_t)adjustment_dbl;
1133 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1134 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1135 _icms_start_limit += adjustment;
1136 _icms_stop_limit = tmp_stop;
1137 }
1138 }
1139 }
1140 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1141 _icms_start_limit = _icms_stop_limit = eden->end();
1142 }
1144 // Install the new start limit.
1145 eden->set_soft_end(_icms_start_limit);
1147 if (CMSTraceIncrementalMode) {
1148 gclog_or_tty->print(" icms alloc limits: "
1149 PTR_FORMAT "," PTR_FORMAT
1150 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1151 _icms_start_limit, _icms_stop_limit,
1152 percent_of_space(eden, _icms_start_limit),
1153 percent_of_space(eden, _icms_stop_limit));
1154 if (Verbose) {
1155 gclog_or_tty->print("eden: ");
1156 eden->print_on(gclog_or_tty);
1157 }
1158 }
1159 }
1161 // Any changes here should try to maintain the invariant
1162 // that if this method is called with _icms_start_limit
1163 // and _icms_stop_limit both NULL, then it should return NULL
1164 // and not notify the icms thread.
1165 HeapWord*
1166 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1167 size_t word_size)
1168 {
1169 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1170 // nop.
1171 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1172 if (top <= _icms_start_limit) {
1173 if (CMSTraceIncrementalMode) {
1174 space->print_on(gclog_or_tty);
1175 gclog_or_tty->stamp();
1176 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1177 ", new limit=" PTR_FORMAT
1178 " (" SIZE_FORMAT "%%)",
1179 top, _icms_stop_limit,
1180 percent_of_space(space, _icms_stop_limit));
1181 }
1182 ConcurrentMarkSweepThread::start_icms();
1183 assert(top < _icms_stop_limit, "Tautology");
1184 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1185 return _icms_stop_limit;
1186 }
1188 // The allocation will cross both the _start and _stop limits, so do the
1189 // stop notification also and return end().
1190 if (CMSTraceIncrementalMode) {
1191 space->print_on(gclog_or_tty);
1192 gclog_or_tty->stamp();
1193 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1194 ", new limit=" PTR_FORMAT
1195 " (" SIZE_FORMAT "%%)",
1196 top, space->end(),
1197 percent_of_space(space, space->end()));
1198 }
1199 ConcurrentMarkSweepThread::stop_icms();
1200 return space->end();
1201 }
1203 if (top <= _icms_stop_limit) {
1204 if (CMSTraceIncrementalMode) {
1205 space->print_on(gclog_or_tty);
1206 gclog_or_tty->stamp();
1207 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1208 ", new limit=" PTR_FORMAT
1209 " (" SIZE_FORMAT "%%)",
1210 top, space->end(),
1211 percent_of_space(space, space->end()));
1212 }
1213 ConcurrentMarkSweepThread::stop_icms();
1214 return space->end();
1215 }
1217 if (CMSTraceIncrementalMode) {
1218 space->print_on(gclog_or_tty);
1219 gclog_or_tty->stamp();
1220 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1221 ", new limit=" PTR_FORMAT,
1222 top, NULL);
1223 }
1224 }
1226 return NULL;
1227 }
1229 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1230 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1231 // allocate, copy and if necessary update promoinfo --
1232 // delegate to underlying space.
1233 assert_lock_strong(freelistLock());
1235 #ifndef PRODUCT
1236 if (Universe::heap()->promotion_should_fail()) {
1237 return NULL;
1238 }
1239 #endif // #ifndef PRODUCT
1241 oop res = _cmsSpace->promote(obj, obj_size);
1242 if (res == NULL) {
1243 // expand and retry
1244 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1245 expand(s*HeapWordSize, MinHeapDeltaBytes,
1246 CMSExpansionCause::_satisfy_promotion);
1247 // Since there's currently no next generation, we don't try to promote
1248 // into a more senior generation.
1249 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1250 "is made to pass on a possibly failing "
1251 "promotion to next generation");
1252 res = _cmsSpace->promote(obj, obj_size);
1253 }
1254 if (res != NULL) {
1255 // See comment in allocate() about when objects should
1256 // be allocated live.
1257 assert(obj->is_oop(), "Will dereference klass pointer below");
1258 collector()->promoted(false, // Not parallel
1259 (HeapWord*)res, obj->is_objArray(), obj_size);
1260 // promotion counters
1261 NOT_PRODUCT(
1262 _numObjectsPromoted++;
1263 _numWordsPromoted +=
1264 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1265 )
1266 }
1267 return res;
1268 }
1271 HeapWord*
1272 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1273 HeapWord* top,
1274 size_t word_sz)
1275 {
1276 return collector()->allocation_limit_reached(space, top, word_sz);
1277 }
1279 // Things to support parallel young-gen collection.
1280 oop
1281 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1282 oop old, markOop m,
1283 size_t word_sz) {
1284 #ifndef PRODUCT
1285 if (Universe::heap()->promotion_should_fail()) {
1286 return NULL;
1287 }
1288 #endif // #ifndef PRODUCT
1290 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1291 PromotionInfo* promoInfo = &ps->promo;
1292 // if we are tracking promotions, then first ensure space for
1293 // promotion (including spooling space for saving header if necessary).
1294 // then allocate and copy, then track promoted info if needed.
1295 // When tracking (see PromotionInfo::track()), the mark word may
1296 // be displaced and in this case restoration of the mark word
1297 // occurs in the (oop_since_save_marks_)iterate phase.
1298 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1299 // Out of space for allocating spooling buffers;
1300 // try expanding and allocating spooling buffers.
1301 if (!expand_and_ensure_spooling_space(promoInfo)) {
1302 return NULL;
1303 }
1304 }
1305 assert(promoInfo->has_spooling_space(), "Control point invariant");
1306 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1307 if (obj_ptr == NULL) {
1308 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1309 if (obj_ptr == NULL) {
1310 return NULL;
1311 }
1312 }
1313 oop obj = oop(obj_ptr);
1314 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1315 // Otherwise, copy the object. Here we must be careful to insert the
1316 // klass pointer last, since this marks the block as an allocated object.
1317 // Except with compressed oops it's the mark word.
1318 HeapWord* old_ptr = (HeapWord*)old;
1319 if (word_sz > (size_t)oopDesc::header_size()) {
1320 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1321 obj_ptr + oopDesc::header_size(),
1322 word_sz - oopDesc::header_size());
1323 }
1325 if (UseCompressedOops) {
1326 // Copy gap missed by (aligned) header size calculation above
1327 obj->set_klass_gap(old->klass_gap());
1328 }
1330 // Restore the mark word copied above.
1331 obj->set_mark(m);
1333 // Now we can track the promoted object, if necessary. We take care
1334 // To delay the transition from uninitialized to full object
1335 // (i.e., insertion of klass pointer) until after, so that it
1336 // atomically becomes a promoted object.
1337 if (promoInfo->tracking()) {
1338 promoInfo->track((PromotedObject*)obj, old->klass());
1339 }
1341 // Finally, install the klass pointer (this should be volatile).
1342 obj->set_klass(old->klass());
1344 assert(old->is_oop(), "Will dereference klass ptr below");
1345 collector()->promoted(true, // parallel
1346 obj_ptr, old->is_objArray(), word_sz);
1348 NOT_PRODUCT(
1349 Atomic::inc(&_numObjectsPromoted);
1350 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1351 &_numWordsPromoted);
1352 )
1354 return obj;
1355 }
1357 void
1358 ConcurrentMarkSweepGeneration::
1359 par_promote_alloc_undo(int thread_num,
1360 HeapWord* obj, size_t word_sz) {
1361 // CMS does not support promotion undo.
1362 ShouldNotReachHere();
1363 }
1365 void
1366 ConcurrentMarkSweepGeneration::
1367 par_promote_alloc_done(int thread_num) {
1368 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1369 ps->lab.retire();
1370 #if CFLS_LAB_REFILL_STATS
1371 if (thread_num == 0) {
1372 _cmsSpace->print_par_alloc_stats();
1373 }
1374 #endif
1375 }
1377 void
1378 ConcurrentMarkSweepGeneration::
1379 par_oop_since_save_marks_iterate_done(int thread_num) {
1380 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1381 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1382 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1383 }
1385 // XXXPERM
1386 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1387 size_t size,
1388 bool tlab)
1389 {
1390 // We allow a STW collection only if a full
1391 // collection was requested.
1392 return full || should_allocate(size, tlab); // FIX ME !!!
1393 // This and promotion failure handling are connected at the
1394 // hip and should be fixed by untying them.
1395 }
1397 bool CMSCollector::shouldConcurrentCollect() {
1398 if (_full_gc_requested) {
1399 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1400 if (Verbose && PrintGCDetails) {
1401 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1402 " gc request");
1403 }
1404 return true;
1405 }
1407 // For debugging purposes, change the type of collection.
1408 // If the rotation is not on the concurrent collection
1409 // type, don't start a concurrent collection.
1410 NOT_PRODUCT(
1411 if (RotateCMSCollectionTypes &&
1412 (_cmsGen->debug_collection_type() !=
1413 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1414 assert(_cmsGen->debug_collection_type() !=
1415 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1416 "Bad cms collection type");
1417 return false;
1418 }
1419 )
1421 FreelistLocker x(this);
1422 // ------------------------------------------------------------------
1423 // Print out lots of information which affects the initiation of
1424 // a collection.
1425 if (PrintCMSInitiationStatistics && stats().valid()) {
1426 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1427 gclog_or_tty->stamp();
1428 gclog_or_tty->print_cr("");
1429 stats().print_on(gclog_or_tty);
1430 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1431 stats().time_until_cms_gen_full());
1432 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1433 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1434 _cmsGen->contiguous_available());
1435 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1436 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1437 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1438 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1439 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1440 }
1441 // ------------------------------------------------------------------
1443 // If the estimated time to complete a cms collection (cms_duration())
1444 // is less than the estimated time remaining until the cms generation
1445 // is full, start a collection.
1446 if (!UseCMSInitiatingOccupancyOnly) {
1447 if (stats().valid()) {
1448 if (stats().time_until_cms_start() == 0.0) {
1449 return true;
1450 }
1451 } else {
1452 // We want to conservatively collect somewhat early in order
1453 // to try and "bootstrap" our CMS/promotion statistics;
1454 // this branch will not fire after the first successful CMS
1455 // collection because the stats should then be valid.
1456 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1457 if (Verbose && PrintGCDetails) {
1458 gclog_or_tty->print_cr(
1459 " CMSCollector: collect for bootstrapping statistics:"
1460 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1461 _bootstrap_occupancy);
1462 }
1463 return true;
1464 }
1465 }
1466 }
1468 // Otherwise, we start a collection cycle if either the perm gen or
1469 // old gen want a collection cycle started. Each may use
1470 // an appropriate criterion for making this decision.
1471 // XXX We need to make sure that the gen expansion
1472 // criterion dovetails well with this. XXX NEED TO FIX THIS
1473 if (_cmsGen->should_concurrent_collect()) {
1474 if (Verbose && PrintGCDetails) {
1475 gclog_or_tty->print_cr("CMS old gen initiated");
1476 }
1477 return true;
1478 }
1480 // We start a collection if we believe an incremental collection may fail;
1481 // this is not likely to be productive in practice because it's probably too
1482 // late anyway.
1483 GenCollectedHeap* gch = GenCollectedHeap::heap();
1484 assert(gch->collector_policy()->is_two_generation_policy(),
1485 "You may want to check the correctness of the following");
1486 if (gch->incremental_collection_will_fail()) {
1487 if (PrintGCDetails && Verbose) {
1488 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1489 }
1490 return true;
1491 }
1493 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1494 bool res = update_should_unload_classes();
1495 if (res) {
1496 if (Verbose && PrintGCDetails) {
1497 gclog_or_tty->print_cr("CMS perm gen initiated");
1498 }
1499 return true;
1500 }
1501 }
1502 return false;
1503 }
1505 // Clear _expansion_cause fields of constituent generations
1506 void CMSCollector::clear_expansion_cause() {
1507 _cmsGen->clear_expansion_cause();
1508 _permGen->clear_expansion_cause();
1509 }
1511 // We should be conservative in starting a collection cycle. To
1512 // start too eagerly runs the risk of collecting too often in the
1513 // extreme. To collect too rarely falls back on full collections,
1514 // which works, even if not optimum in terms of concurrent work.
1515 // As a work around for too eagerly collecting, use the flag
1516 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1517 // giving the user an easily understandable way of controlling the
1518 // collections.
1519 // We want to start a new collection cycle if any of the following
1520 // conditions hold:
1521 // . our current occupancy exceeds the configured initiating occupancy
1522 // for this generation, or
1523 // . we recently needed to expand this space and have not, since that
1524 // expansion, done a collection of this generation, or
1525 // . the underlying space believes that it may be a good idea to initiate
1526 // a concurrent collection (this may be based on criteria such as the
1527 // following: the space uses linear allocation and linear allocation is
1528 // going to fail, or there is believed to be excessive fragmentation in
1529 // the generation, etc... or ...
1530 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1531 // the case of the old generation, not the perm generation; see CR 6543076):
1532 // we may be approaching a point at which allocation requests may fail because
1533 // we will be out of sufficient free space given allocation rate estimates.]
1534 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1536 assert_lock_strong(freelistLock());
1537 if (occupancy() > initiating_occupancy()) {
1538 if (PrintGCDetails && Verbose) {
1539 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1540 short_name(), occupancy(), initiating_occupancy());
1541 }
1542 return true;
1543 }
1544 if (UseCMSInitiatingOccupancyOnly) {
1545 return false;
1546 }
1547 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1548 if (PrintGCDetails && Verbose) {
1549 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1550 short_name());
1551 }
1552 return true;
1553 }
1554 if (_cmsSpace->should_concurrent_collect()) {
1555 if (PrintGCDetails && Verbose) {
1556 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1557 short_name());
1558 }
1559 return true;
1560 }
1561 return false;
1562 }
1564 void ConcurrentMarkSweepGeneration::collect(bool full,
1565 bool clear_all_soft_refs,
1566 size_t size,
1567 bool tlab)
1568 {
1569 collector()->collect(full, clear_all_soft_refs, size, tlab);
1570 }
1572 void CMSCollector::collect(bool full,
1573 bool clear_all_soft_refs,
1574 size_t size,
1575 bool tlab)
1576 {
1577 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1578 // For debugging purposes skip the collection if the state
1579 // is not currently idle
1580 if (TraceCMSState) {
1581 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1582 Thread::current(), full, _collectorState);
1583 }
1584 return;
1585 }
1587 // The following "if" branch is present for defensive reasons.
1588 // In the current uses of this interface, it can be replaced with:
1589 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1590 // But I am not placing that assert here to allow future
1591 // generality in invoking this interface.
1592 if (GC_locker::is_active()) {
1593 // A consistency test for GC_locker
1594 assert(GC_locker::needs_gc(), "Should have been set already");
1595 // Skip this foreground collection, instead
1596 // expanding the heap if necessary.
1597 // Need the free list locks for the call to free() in compute_new_size()
1598 compute_new_size();
1599 return;
1600 }
1601 acquire_control_and_collect(full, clear_all_soft_refs);
1602 _full_gcs_since_conc_gc++;
1604 }
1606 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1607 GenCollectedHeap* gch = GenCollectedHeap::heap();
1608 unsigned int gc_count = gch->total_full_collections();
1609 if (gc_count == full_gc_count) {
1610 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1611 _full_gc_requested = true;
1612 CGC_lock->notify(); // nudge CMS thread
1613 }
1614 }
1617 // The foreground and background collectors need to coordinate in order
1618 // to make sure that they do not mutually interfere with CMS collections.
1619 // When a background collection is active,
1620 // the foreground collector may need to take over (preempt) and
1621 // synchronously complete an ongoing collection. Depending on the
1622 // frequency of the background collections and the heap usage
1623 // of the application, this preemption can be seldom or frequent.
1624 // There are only certain
1625 // points in the background collection that the "collection-baton"
1626 // can be passed to the foreground collector.
1627 //
1628 // The foreground collector will wait for the baton before
1629 // starting any part of the collection. The foreground collector
1630 // will only wait at one location.
1631 //
1632 // The background collector will yield the baton before starting a new
1633 // phase of the collection (e.g., before initial marking, marking from roots,
1634 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1635 // of the loop which switches the phases. The background collector does some
1636 // of the phases (initial mark, final re-mark) with the world stopped.
1637 // Because of locking involved in stopping the world,
1638 // the foreground collector should not block waiting for the background
1639 // collector when it is doing a stop-the-world phase. The background
1640 // collector will yield the baton at an additional point just before
1641 // it enters a stop-the-world phase. Once the world is stopped, the
1642 // background collector checks the phase of the collection. If the
1643 // phase has not changed, it proceeds with the collection. If the
1644 // phase has changed, it skips that phase of the collection. See
1645 // the comments on the use of the Heap_lock in collect_in_background().
1646 //
1647 // Variable used in baton passing.
1648 // _foregroundGCIsActive - Set to true by the foreground collector when
1649 // it wants the baton. The foreground clears it when it has finished
1650 // the collection.
1651 // _foregroundGCShouldWait - Set to true by the background collector
1652 // when it is running. The foreground collector waits while
1653 // _foregroundGCShouldWait is true.
1654 // CGC_lock - monitor used to protect access to the above variables
1655 // and to notify the foreground and background collectors.
1656 // _collectorState - current state of the CMS collection.
1657 //
1658 // The foreground collector
1659 // acquires the CGC_lock
1660 // sets _foregroundGCIsActive
1661 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1662 // various locks acquired in preparation for the collection
1663 // are released so as not to block the background collector
1664 // that is in the midst of a collection
1665 // proceeds with the collection
1666 // clears _foregroundGCIsActive
1667 // returns
1668 //
1669 // The background collector in a loop iterating on the phases of the
1670 // collection
1671 // acquires the CGC_lock
1672 // sets _foregroundGCShouldWait
1673 // if _foregroundGCIsActive is set
1674 // clears _foregroundGCShouldWait, notifies _CGC_lock
1675 // waits on _CGC_lock for _foregroundGCIsActive to become false
1676 // and exits the loop.
1677 // otherwise
1678 // proceed with that phase of the collection
1679 // if the phase is a stop-the-world phase,
1680 // yield the baton once more just before enqueueing
1681 // the stop-world CMS operation (executed by the VM thread).
1682 // returns after all phases of the collection are done
1683 //
1685 void CMSCollector::acquire_control_and_collect(bool full,
1686 bool clear_all_soft_refs) {
1687 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1688 assert(!Thread::current()->is_ConcurrentGC_thread(),
1689 "shouldn't try to acquire control from self!");
1691 // Start the protocol for acquiring control of the
1692 // collection from the background collector (aka CMS thread).
1693 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1694 "VM thread should have CMS token");
1695 // Remember the possibly interrupted state of an ongoing
1696 // concurrent collection
1697 CollectorState first_state = _collectorState;
1699 // Signal to a possibly ongoing concurrent collection that
1700 // we want to do a foreground collection.
1701 _foregroundGCIsActive = true;
1703 // Disable incremental mode during a foreground collection.
1704 ICMSDisabler icms_disabler;
1706 // release locks and wait for a notify from the background collector
1707 // releasing the locks in only necessary for phases which
1708 // do yields to improve the granularity of the collection.
1709 assert_lock_strong(bitMapLock());
1710 // We need to lock the Free list lock for the space that we are
1711 // currently collecting.
1712 assert(haveFreelistLocks(), "Must be holding free list locks");
1713 bitMapLock()->unlock();
1714 releaseFreelistLocks();
1715 {
1716 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1717 if (_foregroundGCShouldWait) {
1718 // We are going to be waiting for action for the CMS thread;
1719 // it had better not be gone (for instance at shutdown)!
1720 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1721 "CMS thread must be running");
1722 // Wait here until the background collector gives us the go-ahead
1723 ConcurrentMarkSweepThread::clear_CMS_flag(
1724 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1725 // Get a possibly blocked CMS thread going:
1726 // Note that we set _foregroundGCIsActive true above,
1727 // without protection of the CGC_lock.
1728 CGC_lock->notify();
1729 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1730 "Possible deadlock");
1731 while (_foregroundGCShouldWait) {
1732 // wait for notification
1733 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1734 // Possibility of delay/starvation here, since CMS token does
1735 // not know to give priority to VM thread? Actually, i think
1736 // there wouldn't be any delay/starvation, but the proof of
1737 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1738 }
1739 ConcurrentMarkSweepThread::set_CMS_flag(
1740 ConcurrentMarkSweepThread::CMS_vm_has_token);
1741 }
1742 }
1743 // The CMS_token is already held. Get back the other locks.
1744 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1745 "VM thread should have CMS token");
1746 getFreelistLocks();
1747 bitMapLock()->lock_without_safepoint_check();
1748 if (TraceCMSState) {
1749 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1750 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1751 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1752 }
1754 // Check if we need to do a compaction, or if not, whether
1755 // we need to start the mark-sweep from scratch.
1756 bool should_compact = false;
1757 bool should_start_over = false;
1758 decide_foreground_collection_type(clear_all_soft_refs,
1759 &should_compact, &should_start_over);
1761 NOT_PRODUCT(
1762 if (RotateCMSCollectionTypes) {
1763 if (_cmsGen->debug_collection_type() ==
1764 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1765 should_compact = true;
1766 } else if (_cmsGen->debug_collection_type() ==
1767 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1768 should_compact = false;
1769 }
1770 }
1771 )
1773 if (PrintGCDetails && first_state > Idling) {
1774 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1775 if (GCCause::is_user_requested_gc(cause) ||
1776 GCCause::is_serviceability_requested_gc(cause)) {
1777 gclog_or_tty->print(" (concurrent mode interrupted)");
1778 } else {
1779 gclog_or_tty->print(" (concurrent mode failure)");
1780 }
1781 }
1783 if (should_compact) {
1784 // If the collection is being acquired from the background
1785 // collector, there may be references on the discovered
1786 // references lists that have NULL referents (being those
1787 // that were concurrently cleared by a mutator) or
1788 // that are no longer active (having been enqueued concurrently
1789 // by the mutator).
1790 // Scrub the list of those references because Mark-Sweep-Compact
1791 // code assumes referents are not NULL and that all discovered
1792 // Reference objects are active.
1793 ref_processor()->clean_up_discovered_references();
1795 do_compaction_work(clear_all_soft_refs);
1797 // Has the GC time limit been exceeded?
1798 check_gc_time_limit();
1800 } else {
1801 do_mark_sweep_work(clear_all_soft_refs, first_state,
1802 should_start_over);
1803 }
1804 // Reset the expansion cause, now that we just completed
1805 // a collection cycle.
1806 clear_expansion_cause();
1807 _foregroundGCIsActive = false;
1808 return;
1809 }
1811 void CMSCollector::check_gc_time_limit() {
1813 // Ignore explicit GC's. Exiting here does not set the flag and
1814 // does not reset the count. Updating of the averages for system
1815 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1816 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1817 if (GCCause::is_user_requested_gc(gc_cause) ||
1818 GCCause::is_serviceability_requested_gc(gc_cause)) {
1819 return;
1820 }
1822 // Calculate the fraction of the CMS generation was freed during
1823 // the last collection.
1824 // Only consider the STW compacting cost for now.
1825 //
1826 // Note that the gc time limit test only works for the collections
1827 // of the young gen + tenured gen and not for collections of the
1828 // permanent gen. That is because the calculation of the space
1829 // freed by the collection is the free space in the young gen +
1830 // tenured gen.
1832 double fraction_free =
1833 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1834 if ((100.0 * size_policy()->compacting_gc_cost()) >
1835 ((double) GCTimeLimit) &&
1836 ((fraction_free * 100) < GCHeapFreeLimit)) {
1837 size_policy()->inc_gc_time_limit_count();
1838 if (UseGCOverheadLimit &&
1839 (size_policy()->gc_time_limit_count() >
1840 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1841 size_policy()->set_gc_time_limit_exceeded(true);
1842 // Avoid consecutive OOM due to the gc time limit by resetting
1843 // the counter.
1844 size_policy()->reset_gc_time_limit_count();
1845 if (PrintGCDetails) {
1846 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1847 "of %d%%", GCTimeLimit);
1848 }
1849 } else {
1850 if (PrintGCDetails) {
1851 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1852 "of %d%%", GCTimeLimit);
1853 }
1854 }
1855 } else {
1856 size_policy()->reset_gc_time_limit_count();
1857 }
1858 }
1860 // Resize the perm generation and the tenured generation
1861 // after obtaining the free list locks for the
1862 // two generations.
1863 void CMSCollector::compute_new_size() {
1864 assert_locked_or_safepoint(Heap_lock);
1865 FreelistLocker z(this);
1866 _permGen->compute_new_size();
1867 _cmsGen->compute_new_size();
1868 }
1870 // A work method used by foreground collection to determine
1871 // what type of collection (compacting or not, continuing or fresh)
1872 // it should do.
1873 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1874 // and CMSCompactWhenClearAllSoftRefs the default in the future
1875 // and do away with the flags after a suitable period.
1876 void CMSCollector::decide_foreground_collection_type(
1877 bool clear_all_soft_refs, bool* should_compact,
1878 bool* should_start_over) {
1879 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1880 // flag is set, and we have either requested a System.gc() or
1881 // the number of full gc's since the last concurrent cycle
1882 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1883 // or if an incremental collection has failed
1884 GenCollectedHeap* gch = GenCollectedHeap::heap();
1885 assert(gch->collector_policy()->is_two_generation_policy(),
1886 "You may want to check the correctness of the following");
1887 // Inform cms gen if this was due to partial collection failing.
1888 // The CMS gen may use this fact to determine its expansion policy.
1889 if (gch->incremental_collection_will_fail()) {
1890 assert(!_cmsGen->incremental_collection_failed(),
1891 "Should have been noticed, reacted to and cleared");
1892 _cmsGen->set_incremental_collection_failed();
1893 }
1894 *should_compact =
1895 UseCMSCompactAtFullCollection &&
1896 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1897 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1898 gch->incremental_collection_will_fail());
1899 *should_start_over = false;
1900 if (clear_all_soft_refs && !*should_compact) {
1901 // We are about to do a last ditch collection attempt
1902 // so it would normally make sense to do a compaction
1903 // to reclaim as much space as possible.
1904 if (CMSCompactWhenClearAllSoftRefs) {
1905 // Default: The rationale is that in this case either
1906 // we are past the final marking phase, in which case
1907 // we'd have to start over, or so little has been done
1908 // that there's little point in saving that work. Compaction
1909 // appears to be the sensible choice in either case.
1910 *should_compact = true;
1911 } else {
1912 // We have been asked to clear all soft refs, but not to
1913 // compact. Make sure that we aren't past the final checkpoint
1914 // phase, for that is where we process soft refs. If we are already
1915 // past that phase, we'll need to redo the refs discovery phase and
1916 // if necessary clear soft refs that weren't previously
1917 // cleared. We do so by remembering the phase in which
1918 // we came in, and if we are past the refs processing
1919 // phase, we'll choose to just redo the mark-sweep
1920 // collection from scratch.
1921 if (_collectorState > FinalMarking) {
1922 // We are past the refs processing phase;
1923 // start over and do a fresh synchronous CMS cycle
1924 _collectorState = Resetting; // skip to reset to start new cycle
1925 reset(false /* == !asynch */);
1926 *should_start_over = true;
1927 } // else we can continue a possibly ongoing current cycle
1928 }
1929 }
1930 }
1932 // A work method used by the foreground collector to do
1933 // a mark-sweep-compact.
1934 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1935 GenCollectedHeap* gch = GenCollectedHeap::heap();
1936 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1937 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1938 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1939 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1940 }
1942 // Sample collection interval time and reset for collection pause.
1943 if (UseAdaptiveSizePolicy) {
1944 size_policy()->msc_collection_begin();
1945 }
1947 // Temporarily widen the span of the weak reference processing to
1948 // the entire heap.
1949 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1950 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1952 // Temporarily, clear the "is_alive_non_header" field of the
1953 // reference processor.
1954 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1956 // Temporarily make reference _processing_ single threaded (non-MT).
1957 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1959 // Temporarily make refs discovery atomic
1960 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1962 ref_processor()->set_enqueuing_is_done(false);
1963 ref_processor()->enable_discovery();
1964 ref_processor()->setup_policy(clear_all_soft_refs);
1965 // If an asynchronous collection finishes, the _modUnionTable is
1966 // all clear. If we are assuming the collection from an asynchronous
1967 // collection, clear the _modUnionTable.
1968 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1969 "_modUnionTable should be clear if the baton was not passed");
1970 _modUnionTable.clear_all();
1972 // We must adjust the allocation statistics being maintained
1973 // in the free list space. We do so by reading and clearing
1974 // the sweep timer and updating the block flux rate estimates below.
1975 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1976 _sweep_timer.stop();
1977 // Note that we do not use this sample to update the _sweep_estimate.
1978 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1979 _sweep_estimate.padded_average());
1981 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1982 ref_processor(), clear_all_soft_refs);
1983 #ifdef ASSERT
1984 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1985 size_t free_size = cms_space->free();
1986 assert(free_size ==
1987 pointer_delta(cms_space->end(), cms_space->compaction_top())
1988 * HeapWordSize,
1989 "All the free space should be compacted into one chunk at top");
1990 assert(cms_space->dictionary()->totalChunkSize(
1991 debug_only(cms_space->freelistLock())) == 0 ||
1992 cms_space->totalSizeInIndexedFreeLists() == 0,
1993 "All the free space should be in a single chunk");
1994 size_t num = cms_space->totalCount();
1995 assert((free_size == 0 && num == 0) ||
1996 (free_size > 0 && (num == 1 || num == 2)),
1997 "There should be at most 2 free chunks after compaction");
1998 #endif // ASSERT
1999 _collectorState = Resetting;
2000 assert(_restart_addr == NULL,
2001 "Should have been NULL'd before baton was passed");
2002 reset(false /* == !asynch */);
2003 _cmsGen->reset_after_compaction();
2004 _concurrent_cycles_since_last_unload = 0;
2006 if (verifying() && !should_unload_classes()) {
2007 perm_gen_verify_bit_map()->clear_all();
2008 }
2010 // Clear any data recorded in the PLAB chunk arrays.
2011 if (_survivor_plab_array != NULL) {
2012 reset_survivor_plab_arrays();
2013 }
2015 // Adjust the per-size allocation stats for the next epoch.
2016 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
2017 // Restart the "sweep timer" for next epoch.
2018 _sweep_timer.reset();
2019 _sweep_timer.start();
2021 // Sample collection pause time and reset for collection interval.
2022 if (UseAdaptiveSizePolicy) {
2023 size_policy()->msc_collection_end(gch->gc_cause());
2024 }
2026 // For a mark-sweep-compact, compute_new_size() will be called
2027 // in the heap's do_collection() method.
2028 }
2030 // A work method used by the foreground collector to do
2031 // a mark-sweep, after taking over from a possibly on-going
2032 // concurrent mark-sweep collection.
2033 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2034 CollectorState first_state, bool should_start_over) {
2035 if (PrintGC && Verbose) {
2036 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2037 "collector with count %d",
2038 _full_gcs_since_conc_gc);
2039 }
2040 switch (_collectorState) {
2041 case Idling:
2042 if (first_state == Idling || should_start_over) {
2043 // The background GC was not active, or should
2044 // restarted from scratch; start the cycle.
2045 _collectorState = InitialMarking;
2046 }
2047 // If first_state was not Idling, then a background GC
2048 // was in progress and has now finished. No need to do it
2049 // again. Leave the state as Idling.
2050 break;
2051 case Precleaning:
2052 // In the foreground case don't do the precleaning since
2053 // it is not done concurrently and there is extra work
2054 // required.
2055 _collectorState = FinalMarking;
2056 }
2057 if (PrintGCDetails &&
2058 (_collectorState > Idling ||
2059 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2060 gclog_or_tty->print(" (concurrent mode failure)");
2061 }
2062 collect_in_foreground(clear_all_soft_refs);
2064 // For a mark-sweep, compute_new_size() will be called
2065 // in the heap's do_collection() method.
2066 }
2069 void CMSCollector::getFreelistLocks() const {
2070 // Get locks for all free lists in all generations that this
2071 // collector is responsible for
2072 _cmsGen->freelistLock()->lock_without_safepoint_check();
2073 _permGen->freelistLock()->lock_without_safepoint_check();
2074 }
2076 void CMSCollector::releaseFreelistLocks() const {
2077 // Release locks for all free lists in all generations that this
2078 // collector is responsible for
2079 _cmsGen->freelistLock()->unlock();
2080 _permGen->freelistLock()->unlock();
2081 }
2083 bool CMSCollector::haveFreelistLocks() const {
2084 // Check locks for all free lists in all generations that this
2085 // collector is responsible for
2086 assert_lock_strong(_cmsGen->freelistLock());
2087 assert_lock_strong(_permGen->freelistLock());
2088 PRODUCT_ONLY(ShouldNotReachHere());
2089 return true;
2090 }
2092 // A utility class that is used by the CMS collector to
2093 // temporarily "release" the foreground collector from its
2094 // usual obligation to wait for the background collector to
2095 // complete an ongoing phase before proceeding.
2096 class ReleaseForegroundGC: public StackObj {
2097 private:
2098 CMSCollector* _c;
2099 public:
2100 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2101 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2102 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2103 // allow a potentially blocked foreground collector to proceed
2104 _c->_foregroundGCShouldWait = false;
2105 if (_c->_foregroundGCIsActive) {
2106 CGC_lock->notify();
2107 }
2108 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2109 "Possible deadlock");
2110 }
2112 ~ReleaseForegroundGC() {
2113 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2114 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2115 _c->_foregroundGCShouldWait = true;
2116 }
2117 };
2119 // There are separate collect_in_background and collect_in_foreground because of
2120 // the different locking requirements of the background collector and the
2121 // foreground collector. There was originally an attempt to share
2122 // one "collect" method between the background collector and the foreground
2123 // collector but the if-then-else required made it cleaner to have
2124 // separate methods.
2125 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2126 assert(Thread::current()->is_ConcurrentGC_thread(),
2127 "A CMS asynchronous collection is only allowed on a CMS thread.");
2129 GenCollectedHeap* gch = GenCollectedHeap::heap();
2130 {
2131 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2132 MutexLockerEx hl(Heap_lock, safepoint_check);
2133 FreelistLocker fll(this);
2134 MutexLockerEx x(CGC_lock, safepoint_check);
2135 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2136 // The foreground collector is active or we're
2137 // not using asynchronous collections. Skip this
2138 // background collection.
2139 assert(!_foregroundGCShouldWait, "Should be clear");
2140 return;
2141 } else {
2142 assert(_collectorState == Idling, "Should be idling before start.");
2143 _collectorState = InitialMarking;
2144 // Reset the expansion cause, now that we are about to begin
2145 // a new cycle.
2146 clear_expansion_cause();
2147 }
2148 // Decide if we want to enable class unloading as part of the
2149 // ensuing concurrent GC cycle.
2150 update_should_unload_classes();
2151 _full_gc_requested = false; // acks all outstanding full gc requests
2152 // Signal that we are about to start a collection
2153 gch->increment_total_full_collections(); // ... starting a collection cycle
2154 _collection_count_start = gch->total_full_collections();
2155 }
2157 // Used for PrintGC
2158 size_t prev_used;
2159 if (PrintGC && Verbose) {
2160 prev_used = _cmsGen->used(); // XXXPERM
2161 }
2163 // The change of the collection state is normally done at this level;
2164 // the exceptions are phases that are executed while the world is
2165 // stopped. For those phases the change of state is done while the
2166 // world is stopped. For baton passing purposes this allows the
2167 // background collector to finish the phase and change state atomically.
2168 // The foreground collector cannot wait on a phase that is done
2169 // while the world is stopped because the foreground collector already
2170 // has the world stopped and would deadlock.
2171 while (_collectorState != Idling) {
2172 if (TraceCMSState) {
2173 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2174 Thread::current(), _collectorState);
2175 }
2176 // The foreground collector
2177 // holds the Heap_lock throughout its collection.
2178 // holds the CMS token (but not the lock)
2179 // except while it is waiting for the background collector to yield.
2180 //
2181 // The foreground collector should be blocked (not for long)
2182 // if the background collector is about to start a phase
2183 // executed with world stopped. If the background
2184 // collector has already started such a phase, the
2185 // foreground collector is blocked waiting for the
2186 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2187 // are executed in the VM thread.
2188 //
2189 // The locking order is
2190 // PendingListLock (PLL) -- if applicable (FinalMarking)
2191 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2192 // CMS token (claimed in
2193 // stop_world_and_do() -->
2194 // safepoint_synchronize() -->
2195 // CMSThread::synchronize())
2197 {
2198 // Check if the FG collector wants us to yield.
2199 CMSTokenSync x(true); // is cms thread
2200 if (waitForForegroundGC()) {
2201 // We yielded to a foreground GC, nothing more to be
2202 // done this round.
2203 assert(_foregroundGCShouldWait == false, "We set it to false in "
2204 "waitForForegroundGC()");
2205 if (TraceCMSState) {
2206 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2207 " exiting collection CMS state %d",
2208 Thread::current(), _collectorState);
2209 }
2210 return;
2211 } else {
2212 // The background collector can run but check to see if the
2213 // foreground collector has done a collection while the
2214 // background collector was waiting to get the CGC_lock
2215 // above. If yes, break so that _foregroundGCShouldWait
2216 // is cleared before returning.
2217 if (_collectorState == Idling) {
2218 break;
2219 }
2220 }
2221 }
2223 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2224 "should be waiting");
2226 switch (_collectorState) {
2227 case InitialMarking:
2228 {
2229 ReleaseForegroundGC x(this);
2230 stats().record_cms_begin();
2232 VM_CMS_Initial_Mark initial_mark_op(this);
2233 VMThread::execute(&initial_mark_op);
2234 }
2235 // The collector state may be any legal state at this point
2236 // since the background collector may have yielded to the
2237 // foreground collector.
2238 break;
2239 case Marking:
2240 // initial marking in checkpointRootsInitialWork has been completed
2241 if (markFromRoots(true)) { // we were successful
2242 assert(_collectorState == Precleaning, "Collector state should "
2243 "have changed");
2244 } else {
2245 assert(_foregroundGCIsActive, "Internal state inconsistency");
2246 }
2247 break;
2248 case Precleaning:
2249 if (UseAdaptiveSizePolicy) {
2250 size_policy()->concurrent_precleaning_begin();
2251 }
2252 // marking from roots in markFromRoots has been completed
2253 preclean();
2254 if (UseAdaptiveSizePolicy) {
2255 size_policy()->concurrent_precleaning_end();
2256 }
2257 assert(_collectorState == AbortablePreclean ||
2258 _collectorState == FinalMarking,
2259 "Collector state should have changed");
2260 break;
2261 case AbortablePreclean:
2262 if (UseAdaptiveSizePolicy) {
2263 size_policy()->concurrent_phases_resume();
2264 }
2265 abortable_preclean();
2266 if (UseAdaptiveSizePolicy) {
2267 size_policy()->concurrent_precleaning_end();
2268 }
2269 assert(_collectorState == FinalMarking, "Collector state should "
2270 "have changed");
2271 break;
2272 case FinalMarking:
2273 {
2274 ReleaseForegroundGC x(this);
2276 VM_CMS_Final_Remark final_remark_op(this);
2277 VMThread::execute(&final_remark_op);
2278 }
2279 assert(_foregroundGCShouldWait, "block post-condition");
2280 break;
2281 case Sweeping:
2282 if (UseAdaptiveSizePolicy) {
2283 size_policy()->concurrent_sweeping_begin();
2284 }
2285 // final marking in checkpointRootsFinal has been completed
2286 sweep(true);
2287 assert(_collectorState == Resizing, "Collector state change "
2288 "to Resizing must be done under the free_list_lock");
2289 _full_gcs_since_conc_gc = 0;
2291 // Stop the timers for adaptive size policy for the concurrent phases
2292 if (UseAdaptiveSizePolicy) {
2293 size_policy()->concurrent_sweeping_end();
2294 size_policy()->concurrent_phases_end(gch->gc_cause(),
2295 gch->prev_gen(_cmsGen)->capacity(),
2296 _cmsGen->free());
2297 }
2299 case Resizing: {
2300 // Sweeping has been completed...
2301 // At this point the background collection has completed.
2302 // Don't move the call to compute_new_size() down
2303 // into code that might be executed if the background
2304 // collection was preempted.
2305 {
2306 ReleaseForegroundGC x(this); // unblock FG collection
2307 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2308 CMSTokenSync z(true); // not strictly needed.
2309 if (_collectorState == Resizing) {
2310 compute_new_size();
2311 _collectorState = Resetting;
2312 } else {
2313 assert(_collectorState == Idling, "The state should only change"
2314 " because the foreground collector has finished the collection");
2315 }
2316 }
2317 break;
2318 }
2319 case Resetting:
2320 // CMS heap resizing has been completed
2321 reset(true);
2322 assert(_collectorState == Idling, "Collector state should "
2323 "have changed");
2324 stats().record_cms_end();
2325 // Don't move the concurrent_phases_end() and compute_new_size()
2326 // calls to here because a preempted background collection
2327 // has it's state set to "Resetting".
2328 break;
2329 case Idling:
2330 default:
2331 ShouldNotReachHere();
2332 break;
2333 }
2334 if (TraceCMSState) {
2335 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2336 Thread::current(), _collectorState);
2337 }
2338 assert(_foregroundGCShouldWait, "block post-condition");
2339 }
2341 // Should this be in gc_epilogue?
2342 collector_policy()->counters()->update_counters();
2344 {
2345 // Clear _foregroundGCShouldWait and, in the event that the
2346 // foreground collector is waiting, notify it, before
2347 // returning.
2348 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2349 _foregroundGCShouldWait = false;
2350 if (_foregroundGCIsActive) {
2351 CGC_lock->notify();
2352 }
2353 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2354 "Possible deadlock");
2355 }
2356 if (TraceCMSState) {
2357 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2358 " exiting collection CMS state %d",
2359 Thread::current(), _collectorState);
2360 }
2361 if (PrintGC && Verbose) {
2362 _cmsGen->print_heap_change(prev_used);
2363 }
2364 }
2366 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2367 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2368 "Foreground collector should be waiting, not executing");
2369 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2370 "may only be done by the VM Thread with the world stopped");
2371 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2372 "VM thread should have CMS token");
2374 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2375 true, gclog_or_tty);)
2376 if (UseAdaptiveSizePolicy) {
2377 size_policy()->ms_collection_begin();
2378 }
2379 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2381 HandleMark hm; // Discard invalid handles created during verification
2383 if (VerifyBeforeGC &&
2384 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2385 Universe::verify(true);
2386 }
2388 // Snapshot the soft reference policy to be used in this collection cycle.
2389 ref_processor()->setup_policy(clear_all_soft_refs);
2391 bool init_mark_was_synchronous = false; // until proven otherwise
2392 while (_collectorState != Idling) {
2393 if (TraceCMSState) {
2394 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2395 Thread::current(), _collectorState);
2396 }
2397 switch (_collectorState) {
2398 case InitialMarking:
2399 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2400 checkpointRootsInitial(false);
2401 assert(_collectorState == Marking, "Collector state should have changed"
2402 " within checkpointRootsInitial()");
2403 break;
2404 case Marking:
2405 // initial marking in checkpointRootsInitialWork has been completed
2406 if (VerifyDuringGC &&
2407 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2408 gclog_or_tty->print("Verify before initial mark: ");
2409 Universe::verify(true);
2410 }
2411 {
2412 bool res = markFromRoots(false);
2413 assert(res && _collectorState == FinalMarking, "Collector state should "
2414 "have changed");
2415 break;
2416 }
2417 case FinalMarking:
2418 if (VerifyDuringGC &&
2419 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2420 gclog_or_tty->print("Verify before re-mark: ");
2421 Universe::verify(true);
2422 }
2423 checkpointRootsFinal(false, clear_all_soft_refs,
2424 init_mark_was_synchronous);
2425 assert(_collectorState == Sweeping, "Collector state should not "
2426 "have changed within checkpointRootsFinal()");
2427 break;
2428 case Sweeping:
2429 // final marking in checkpointRootsFinal has been completed
2430 if (VerifyDuringGC &&
2431 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2432 gclog_or_tty->print("Verify before sweep: ");
2433 Universe::verify(true);
2434 }
2435 sweep(false);
2436 assert(_collectorState == Resizing, "Incorrect state");
2437 break;
2438 case Resizing: {
2439 // Sweeping has been completed; the actual resize in this case
2440 // is done separately; nothing to be done in this state.
2441 _collectorState = Resetting;
2442 break;
2443 }
2444 case Resetting:
2445 // The heap has been resized.
2446 if (VerifyDuringGC &&
2447 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2448 gclog_or_tty->print("Verify before reset: ");
2449 Universe::verify(true);
2450 }
2451 reset(false);
2452 assert(_collectorState == Idling, "Collector state should "
2453 "have changed");
2454 break;
2455 case Precleaning:
2456 case AbortablePreclean:
2457 // Elide the preclean phase
2458 _collectorState = FinalMarking;
2459 break;
2460 default:
2461 ShouldNotReachHere();
2462 }
2463 if (TraceCMSState) {
2464 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2465 Thread::current(), _collectorState);
2466 }
2467 }
2469 if (UseAdaptiveSizePolicy) {
2470 GenCollectedHeap* gch = GenCollectedHeap::heap();
2471 size_policy()->ms_collection_end(gch->gc_cause());
2472 }
2474 if (VerifyAfterGC &&
2475 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2476 Universe::verify(true);
2477 }
2478 if (TraceCMSState) {
2479 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2480 " exiting collection CMS state %d",
2481 Thread::current(), _collectorState);
2482 }
2483 }
2485 bool CMSCollector::waitForForegroundGC() {
2486 bool res = false;
2487 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2488 "CMS thread should have CMS token");
2489 // Block the foreground collector until the
2490 // background collectors decides whether to
2491 // yield.
2492 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2493 _foregroundGCShouldWait = true;
2494 if (_foregroundGCIsActive) {
2495 // The background collector yields to the
2496 // foreground collector and returns a value
2497 // indicating that it has yielded. The foreground
2498 // collector can proceed.
2499 res = true;
2500 _foregroundGCShouldWait = false;
2501 ConcurrentMarkSweepThread::clear_CMS_flag(
2502 ConcurrentMarkSweepThread::CMS_cms_has_token);
2503 ConcurrentMarkSweepThread::set_CMS_flag(
2504 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2505 // Get a possibly blocked foreground thread going
2506 CGC_lock->notify();
2507 if (TraceCMSState) {
2508 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2509 Thread::current(), _collectorState);
2510 }
2511 while (_foregroundGCIsActive) {
2512 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2513 }
2514 ConcurrentMarkSweepThread::set_CMS_flag(
2515 ConcurrentMarkSweepThread::CMS_cms_has_token);
2516 ConcurrentMarkSweepThread::clear_CMS_flag(
2517 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2518 }
2519 if (TraceCMSState) {
2520 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2521 Thread::current(), _collectorState);
2522 }
2523 return res;
2524 }
2526 // Because of the need to lock the free lists and other structures in
2527 // the collector, common to all the generations that the collector is
2528 // collecting, we need the gc_prologues of individual CMS generations
2529 // delegate to their collector. It may have been simpler had the
2530 // current infrastructure allowed one to call a prologue on a
2531 // collector. In the absence of that we have the generation's
2532 // prologue delegate to the collector, which delegates back
2533 // some "local" work to a worker method in the individual generations
2534 // that it's responsible for collecting, while itself doing any
2535 // work common to all generations it's responsible for. A similar
2536 // comment applies to the gc_epilogue()'s.
2537 // The role of the varaible _between_prologue_and_epilogue is to
2538 // enforce the invocation protocol.
2539 void CMSCollector::gc_prologue(bool full) {
2540 // Call gc_prologue_work() for each CMSGen and PermGen that
2541 // we are responsible for.
2543 // The following locking discipline assumes that we are only called
2544 // when the world is stopped.
2545 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2547 // The CMSCollector prologue must call the gc_prologues for the
2548 // "generations" (including PermGen if any) that it's responsible
2549 // for.
2551 assert( Thread::current()->is_VM_thread()
2552 || ( CMSScavengeBeforeRemark
2553 && Thread::current()->is_ConcurrentGC_thread()),
2554 "Incorrect thread type for prologue execution");
2556 if (_between_prologue_and_epilogue) {
2557 // We have already been invoked; this is a gc_prologue delegation
2558 // from yet another CMS generation that we are responsible for, just
2559 // ignore it since all relevant work has already been done.
2560 return;
2561 }
2563 // set a bit saying prologue has been called; cleared in epilogue
2564 _between_prologue_and_epilogue = true;
2565 // Claim locks for common data structures, then call gc_prologue_work()
2566 // for each CMSGen and PermGen that we are responsible for.
2568 getFreelistLocks(); // gets free list locks on constituent spaces
2569 bitMapLock()->lock_without_safepoint_check();
2571 // Should call gc_prologue_work() for all cms gens we are responsible for
2572 bool registerClosure = _collectorState >= Marking
2573 && _collectorState < Sweeping;
2574 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2575 : &_modUnionClosure;
2576 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2577 _permGen->gc_prologue_work(full, registerClosure, muc);
2579 if (!full) {
2580 stats().record_gc0_begin();
2581 }
2582 }
2584 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2585 // Delegate to CMScollector which knows how to coordinate between
2586 // this and any other CMS generations that it is responsible for
2587 // collecting.
2588 collector()->gc_prologue(full);
2589 }
2591 // This is a "private" interface for use by this generation's CMSCollector.
2592 // Not to be called directly by any other entity (for instance,
2593 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2594 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2595 bool registerClosure, ModUnionClosure* modUnionClosure) {
2596 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2597 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2598 "Should be NULL");
2599 if (registerClosure) {
2600 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2601 }
2602 cmsSpace()->gc_prologue();
2603 // Clear stat counters
2604 NOT_PRODUCT(
2605 assert(_numObjectsPromoted == 0, "check");
2606 assert(_numWordsPromoted == 0, "check");
2607 if (Verbose && PrintGC) {
2608 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2609 SIZE_FORMAT" bytes concurrently",
2610 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2611 }
2612 _numObjectsAllocated = 0;
2613 _numWordsAllocated = 0;
2614 )
2615 }
2617 void CMSCollector::gc_epilogue(bool full) {
2618 // The following locking discipline assumes that we are only called
2619 // when the world is stopped.
2620 assert(SafepointSynchronize::is_at_safepoint(),
2621 "world is stopped assumption");
2623 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2624 // if linear allocation blocks need to be appropriately marked to allow the
2625 // the blocks to be parsable. We also check here whether we need to nudge the
2626 // CMS collector thread to start a new cycle (if it's not already active).
2627 assert( Thread::current()->is_VM_thread()
2628 || ( CMSScavengeBeforeRemark
2629 && Thread::current()->is_ConcurrentGC_thread()),
2630 "Incorrect thread type for epilogue execution");
2632 if (!_between_prologue_and_epilogue) {
2633 // We have already been invoked; this is a gc_epilogue delegation
2634 // from yet another CMS generation that we are responsible for, just
2635 // ignore it since all relevant work has already been done.
2636 return;
2637 }
2638 assert(haveFreelistLocks(), "must have freelist locks");
2639 assert_lock_strong(bitMapLock());
2641 _cmsGen->gc_epilogue_work(full);
2642 _permGen->gc_epilogue_work(full);
2644 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2645 // in case sampling was not already enabled, enable it
2646 _start_sampling = true;
2647 }
2648 // reset _eden_chunk_array so sampling starts afresh
2649 _eden_chunk_index = 0;
2651 size_t cms_used = _cmsGen->cmsSpace()->used();
2652 size_t perm_used = _permGen->cmsSpace()->used();
2654 // update performance counters - this uses a special version of
2655 // update_counters() that allows the utilization to be passed as a
2656 // parameter, avoiding multiple calls to used().
2657 //
2658 _cmsGen->update_counters(cms_used);
2659 _permGen->update_counters(perm_used);
2661 if (CMSIncrementalMode) {
2662 icms_update_allocation_limits();
2663 }
2665 bitMapLock()->unlock();
2666 releaseFreelistLocks();
2668 _between_prologue_and_epilogue = false; // ready for next cycle
2669 }
2671 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2672 collector()->gc_epilogue(full);
2674 // Also reset promotion tracking in par gc thread states.
2675 if (ParallelGCThreads > 0) {
2676 for (uint i = 0; i < ParallelGCThreads; i++) {
2677 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2678 }
2679 }
2680 }
2682 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2683 assert(!incremental_collection_failed(), "Should have been cleared");
2684 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2685 cmsSpace()->gc_epilogue();
2686 // Print stat counters
2687 NOT_PRODUCT(
2688 assert(_numObjectsAllocated == 0, "check");
2689 assert(_numWordsAllocated == 0, "check");
2690 if (Verbose && PrintGC) {
2691 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2692 SIZE_FORMAT" bytes",
2693 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2694 }
2695 _numObjectsPromoted = 0;
2696 _numWordsPromoted = 0;
2697 )
2699 if (PrintGC && Verbose) {
2700 // Call down the chain in contiguous_available needs the freelistLock
2701 // so print this out before releasing the freeListLock.
2702 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2703 contiguous_available());
2704 }
2705 }
2707 #ifndef PRODUCT
2708 bool CMSCollector::have_cms_token() {
2709 Thread* thr = Thread::current();
2710 if (thr->is_VM_thread()) {
2711 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2712 } else if (thr->is_ConcurrentGC_thread()) {
2713 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2714 } else if (thr->is_GC_task_thread()) {
2715 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2716 ParGCRareEvent_lock->owned_by_self();
2717 }
2718 return false;
2719 }
2720 #endif
2722 // Check reachability of the given heap address in CMS generation,
2723 // treating all other generations as roots.
2724 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2725 // We could "guarantee" below, rather than assert, but i'll
2726 // leave these as "asserts" so that an adventurous debugger
2727 // could try this in the product build provided some subset of
2728 // the conditions were met, provided they were intersted in the
2729 // results and knew that the computation below wouldn't interfere
2730 // with other concurrent computations mutating the structures
2731 // being read or written.
2732 assert(SafepointSynchronize::is_at_safepoint(),
2733 "Else mutations in object graph will make answer suspect");
2734 assert(have_cms_token(), "Should hold cms token");
2735 assert(haveFreelistLocks(), "must hold free list locks");
2736 assert_lock_strong(bitMapLock());
2738 // Clear the marking bit map array before starting, but, just
2739 // for kicks, first report if the given address is already marked
2740 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2741 _markBitMap.isMarked(addr) ? "" : " not");
2743 if (verify_after_remark()) {
2744 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2745 bool result = verification_mark_bm()->isMarked(addr);
2746 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2747 result ? "IS" : "is NOT");
2748 return result;
2749 } else {
2750 gclog_or_tty->print_cr("Could not compute result");
2751 return false;
2752 }
2753 }
2755 ////////////////////////////////////////////////////////
2756 // CMS Verification Support
2757 ////////////////////////////////////////////////////////
2758 // Following the remark phase, the following invariant
2759 // should hold -- each object in the CMS heap which is
2760 // marked in markBitMap() should be marked in the verification_mark_bm().
2762 class VerifyMarkedClosure: public BitMapClosure {
2763 CMSBitMap* _marks;
2764 bool _failed;
2766 public:
2767 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2769 bool do_bit(size_t offset) {
2770 HeapWord* addr = _marks->offsetToHeapWord(offset);
2771 if (!_marks->isMarked(addr)) {
2772 oop(addr)->print();
2773 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2774 _failed = true;
2775 }
2776 return true;
2777 }
2779 bool failed() { return _failed; }
2780 };
2782 bool CMSCollector::verify_after_remark() {
2783 gclog_or_tty->print(" [Verifying CMS Marking... ");
2784 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2785 static bool init = false;
2787 assert(SafepointSynchronize::is_at_safepoint(),
2788 "Else mutations in object graph will make answer suspect");
2789 assert(have_cms_token(),
2790 "Else there may be mutual interference in use of "
2791 " verification data structures");
2792 assert(_collectorState > Marking && _collectorState <= Sweeping,
2793 "Else marking info checked here may be obsolete");
2794 assert(haveFreelistLocks(), "must hold free list locks");
2795 assert_lock_strong(bitMapLock());
2798 // Allocate marking bit map if not already allocated
2799 if (!init) { // first time
2800 if (!verification_mark_bm()->allocate(_span)) {
2801 return false;
2802 }
2803 init = true;
2804 }
2806 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2808 // Turn off refs discovery -- so we will be tracing through refs.
2809 // This is as intended, because by this time
2810 // GC must already have cleared any refs that need to be cleared,
2811 // and traced those that need to be marked; moreover,
2812 // the marking done here is not going to intefere in any
2813 // way with the marking information used by GC.
2814 NoRefDiscovery no_discovery(ref_processor());
2816 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2818 // Clear any marks from a previous round
2819 verification_mark_bm()->clear_all();
2820 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2821 assert(overflow_list_is_empty(), "overflow list should be empty");
2823 GenCollectedHeap* gch = GenCollectedHeap::heap();
2824 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2825 // Update the saved marks which may affect the root scans.
2826 gch->save_marks();
2828 if (CMSRemarkVerifyVariant == 1) {
2829 // In this first variant of verification, we complete
2830 // all marking, then check if the new marks-verctor is
2831 // a subset of the CMS marks-vector.
2832 verify_after_remark_work_1();
2833 } else if (CMSRemarkVerifyVariant == 2) {
2834 // In this second variant of verification, we flag an error
2835 // (i.e. an object reachable in the new marks-vector not reachable
2836 // in the CMS marks-vector) immediately, also indicating the
2837 // identify of an object (A) that references the unmarked object (B) --
2838 // presumably, a mutation to A failed to be picked up by preclean/remark?
2839 verify_after_remark_work_2();
2840 } else {
2841 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2842 CMSRemarkVerifyVariant);
2843 }
2844 gclog_or_tty->print(" done] ");
2845 return true;
2846 }
2848 void CMSCollector::verify_after_remark_work_1() {
2849 ResourceMark rm;
2850 HandleMark hm;
2851 GenCollectedHeap* gch = GenCollectedHeap::heap();
2853 // Mark from roots one level into CMS
2854 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
2855 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2857 gch->gen_process_strong_roots(_cmsGen->level(),
2858 true, // younger gens are roots
2859 true, // collecting perm gen
2860 SharedHeap::ScanningOption(roots_scanning_options()),
2861 NULL, ¬Older);
2863 // Now mark from the roots
2864 assert(_revisitStack.isEmpty(), "Should be empty");
2865 MarkFromRootsClosure markFromRootsClosure(this, _span,
2866 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2867 false /* don't yield */, true /* verifying */);
2868 assert(_restart_addr == NULL, "Expected pre-condition");
2869 verification_mark_bm()->iterate(&markFromRootsClosure);
2870 while (_restart_addr != NULL) {
2871 // Deal with stack overflow: by restarting at the indicated
2872 // address.
2873 HeapWord* ra = _restart_addr;
2874 markFromRootsClosure.reset(ra);
2875 _restart_addr = NULL;
2876 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2877 }
2878 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2879 verify_work_stacks_empty();
2880 // Should reset the revisit stack above, since no class tree
2881 // surgery is forthcoming.
2882 _revisitStack.reset(); // throwing away all contents
2884 // Marking completed -- now verify that each bit marked in
2885 // verification_mark_bm() is also marked in markBitMap(); flag all
2886 // errors by printing corresponding objects.
2887 VerifyMarkedClosure vcl(markBitMap());
2888 verification_mark_bm()->iterate(&vcl);
2889 if (vcl.failed()) {
2890 gclog_or_tty->print("Verification failed");
2891 Universe::heap()->print();
2892 fatal(" ... aborting");
2893 }
2894 }
2896 void CMSCollector::verify_after_remark_work_2() {
2897 ResourceMark rm;
2898 HandleMark hm;
2899 GenCollectedHeap* gch = GenCollectedHeap::heap();
2901 // Mark from roots one level into CMS
2902 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2903 markBitMap(), true /* nmethods */);
2904 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2905 gch->gen_process_strong_roots(_cmsGen->level(),
2906 true, // younger gens are roots
2907 true, // collecting perm gen
2908 SharedHeap::ScanningOption(roots_scanning_options()),
2909 NULL, ¬Older);
2911 // Now mark from the roots
2912 assert(_revisitStack.isEmpty(), "Should be empty");
2913 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2914 verification_mark_bm(), markBitMap(), verification_mark_stack());
2915 assert(_restart_addr == NULL, "Expected pre-condition");
2916 verification_mark_bm()->iterate(&markFromRootsClosure);
2917 while (_restart_addr != NULL) {
2918 // Deal with stack overflow: by restarting at the indicated
2919 // address.
2920 HeapWord* ra = _restart_addr;
2921 markFromRootsClosure.reset(ra);
2922 _restart_addr = NULL;
2923 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2924 }
2925 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2926 verify_work_stacks_empty();
2927 // Should reset the revisit stack above, since no class tree
2928 // surgery is forthcoming.
2929 _revisitStack.reset(); // throwing away all contents
2931 // Marking completed -- now verify that each bit marked in
2932 // verification_mark_bm() is also marked in markBitMap(); flag all
2933 // errors by printing corresponding objects.
2934 VerifyMarkedClosure vcl(markBitMap());
2935 verification_mark_bm()->iterate(&vcl);
2936 assert(!vcl.failed(), "Else verification above should not have succeeded");
2937 }
2939 void ConcurrentMarkSweepGeneration::save_marks() {
2940 // delegate to CMS space
2941 cmsSpace()->save_marks();
2942 for (uint i = 0; i < ParallelGCThreads; i++) {
2943 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2944 }
2945 }
2947 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2948 return cmsSpace()->no_allocs_since_save_marks();
2949 }
2951 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2952 \
2953 void ConcurrentMarkSweepGeneration:: \
2954 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2955 cl->set_generation(this); \
2956 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2957 cl->reset_generation(); \
2958 save_marks(); \
2959 }
2961 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2963 void
2964 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2965 {
2966 // Not currently implemented; need to do the following. -- ysr.
2967 // dld -- I think that is used for some sort of allocation profiler. So it
2968 // really means the objects allocated by the mutator since the last
2969 // GC. We could potentially implement this cheaply by recording only
2970 // the direct allocations in a side data structure.
2971 //
2972 // I think we probably ought not to be required to support these
2973 // iterations at any arbitrary point; I think there ought to be some
2974 // call to enable/disable allocation profiling in a generation/space,
2975 // and the iterator ought to return the objects allocated in the
2976 // gen/space since the enable call, or the last iterator call (which
2977 // will probably be at a GC.) That way, for gens like CM&S that would
2978 // require some extra data structure to support this, we only pay the
2979 // cost when it's in use...
2980 cmsSpace()->object_iterate_since_last_GC(blk);
2981 }
2983 void
2984 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2985 cl->set_generation(this);
2986 younger_refs_in_space_iterate(_cmsSpace, cl);
2987 cl->reset_generation();
2988 }
2990 void
2991 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2992 if (freelistLock()->owned_by_self()) {
2993 Generation::oop_iterate(mr, cl);
2994 } else {
2995 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2996 Generation::oop_iterate(mr, cl);
2997 }
2998 }
3000 void
3001 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3002 if (freelistLock()->owned_by_self()) {
3003 Generation::oop_iterate(cl);
3004 } else {
3005 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3006 Generation::oop_iterate(cl);
3007 }
3008 }
3010 void
3011 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3012 if (freelistLock()->owned_by_self()) {
3013 Generation::object_iterate(cl);
3014 } else {
3015 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3016 Generation::object_iterate(cl);
3017 }
3018 }
3020 void
3021 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3022 if (freelistLock()->owned_by_self()) {
3023 Generation::safe_object_iterate(cl);
3024 } else {
3025 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3026 Generation::safe_object_iterate(cl);
3027 }
3028 }
3030 void
3031 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3032 }
3034 void
3035 ConcurrentMarkSweepGeneration::post_compact() {
3036 }
3038 void
3039 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3040 // Fix the linear allocation blocks to look like free blocks.
3042 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3043 // are not called when the heap is verified during universe initialization and
3044 // at vm shutdown.
3045 if (freelistLock()->owned_by_self()) {
3046 cmsSpace()->prepare_for_verify();
3047 } else {
3048 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3049 cmsSpace()->prepare_for_verify();
3050 }
3051 }
3053 void
3054 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3055 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3056 // are not called when the heap is verified during universe initialization and
3057 // at vm shutdown.
3058 if (freelistLock()->owned_by_self()) {
3059 cmsSpace()->verify(false /* ignored */);
3060 } else {
3061 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3062 cmsSpace()->verify(false /* ignored */);
3063 }
3064 }
3066 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3067 _cmsGen->verify(allow_dirty);
3068 _permGen->verify(allow_dirty);
3069 }
3071 #ifndef PRODUCT
3072 bool CMSCollector::overflow_list_is_empty() const {
3073 assert(_num_par_pushes >= 0, "Inconsistency");
3074 if (_overflow_list == NULL) {
3075 assert(_num_par_pushes == 0, "Inconsistency");
3076 }
3077 return _overflow_list == NULL;
3078 }
3080 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3081 // merely consolidate assertion checks that appear to occur together frequently.
3082 void CMSCollector::verify_work_stacks_empty() const {
3083 assert(_markStack.isEmpty(), "Marking stack should be empty");
3084 assert(overflow_list_is_empty(), "Overflow list should be empty");
3085 }
3087 void CMSCollector::verify_overflow_empty() const {
3088 assert(overflow_list_is_empty(), "Overflow list should be empty");
3089 assert(no_preserved_marks(), "No preserved marks");
3090 }
3091 #endif // PRODUCT
3093 // Decide if we want to enable class unloading as part of the
3094 // ensuing concurrent GC cycle. We will collect the perm gen and
3095 // unload classes if it's the case that:
3096 // (1) an explicit gc request has been made and the flag
3097 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3098 // (2) (a) class unloading is enabled at the command line, and
3099 // (b) (i) perm gen threshold has been crossed, or
3100 // (ii) old gen is getting really full, or
3101 // (iii) the previous N CMS collections did not collect the
3102 // perm gen
3103 // NOTE: Provided there is no change in the state of the heap between
3104 // calls to this method, it should have idempotent results. Moreover,
3105 // its results should be monotonically increasing (i.e. going from 0 to 1,
3106 // but not 1 to 0) between successive calls between which the heap was
3107 // not collected. For the implementation below, it must thus rely on
3108 // the property that concurrent_cycles_since_last_unload()
3109 // will not decrease unless a collection cycle happened and that
3110 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3111 // themselves also monotonic in that sense. See check_monotonicity()
3112 // below.
3113 bool CMSCollector::update_should_unload_classes() {
3114 _should_unload_classes = false;
3115 // Condition 1 above
3116 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3117 _should_unload_classes = true;
3118 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3119 // Disjuncts 2.b.(i,ii,iii) above
3120 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3121 CMSClassUnloadingMaxInterval)
3122 || _permGen->should_concurrent_collect()
3123 || _cmsGen->is_too_full();
3124 }
3125 return _should_unload_classes;
3126 }
3128 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3129 bool res = should_concurrent_collect();
3130 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3131 return res;
3132 }
3134 void CMSCollector::setup_cms_unloading_and_verification_state() {
3135 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3136 || VerifyBeforeExit;
3137 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3138 | SharedHeap::SO_CodeCache;
3140 if (should_unload_classes()) { // Should unload classes this cycle
3141 remove_root_scanning_option(rso); // Shrink the root set appropriately
3142 set_verifying(should_verify); // Set verification state for this cycle
3143 return; // Nothing else needs to be done at this time
3144 }
3146 // Not unloading classes this cycle
3147 assert(!should_unload_classes(), "Inconsitency!");
3148 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3149 // We were not verifying, or we _were_ unloading classes in the last cycle,
3150 // AND some verification options are enabled this cycle; in this case,
3151 // we must make sure that the deadness map is allocated if not already so,
3152 // and cleared (if already allocated previously --
3153 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3154 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3155 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3156 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3157 "permanent generation verification disabled");
3158 return; // Note that we leave verification disabled, so we'll retry this
3159 // allocation next cycle. We _could_ remember this failure
3160 // and skip further attempts and permanently disable verification
3161 // attempts if that is considered more desirable.
3162 }
3163 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3164 "_perm_gen_ver_bit_map inconsistency?");
3165 } else {
3166 perm_gen_verify_bit_map()->clear_all();
3167 }
3168 // Include symbols, strings and code cache elements to prevent their resurrection.
3169 add_root_scanning_option(rso);
3170 set_verifying(true);
3171 } else if (verifying() && !should_verify) {
3172 // We were verifying, but some verification flags got disabled.
3173 set_verifying(false);
3174 // Exclude symbols, strings and code cache elements from root scanning to
3175 // reduce IM and RM pauses.
3176 remove_root_scanning_option(rso);
3177 }
3178 }
3181 #ifndef PRODUCT
3182 HeapWord* CMSCollector::block_start(const void* p) const {
3183 const HeapWord* addr = (HeapWord*)p;
3184 if (_span.contains(p)) {
3185 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3186 return _cmsGen->cmsSpace()->block_start(p);
3187 } else {
3188 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3189 "Inconsistent _span?");
3190 return _permGen->cmsSpace()->block_start(p);
3191 }
3192 }
3193 return NULL;
3194 }
3195 #endif
3197 HeapWord*
3198 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3199 bool tlab,
3200 bool parallel) {
3201 assert(!tlab, "Can't deal with TLAB allocation");
3202 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3203 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3204 CMSExpansionCause::_satisfy_allocation);
3205 if (GCExpandToAllocateDelayMillis > 0) {
3206 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3207 }
3208 return have_lock_and_allocate(word_size, tlab);
3209 }
3211 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3212 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3213 // to CardGeneration and share it...
3214 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3215 return CardGeneration::expand(bytes, expand_bytes);
3216 }
3218 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3219 CMSExpansionCause::Cause cause)
3220 {
3222 bool success = expand(bytes, expand_bytes);
3224 // remember why we expanded; this information is used
3225 // by shouldConcurrentCollect() when making decisions on whether to start
3226 // a new CMS cycle.
3227 if (success) {
3228 set_expansion_cause(cause);
3229 if (PrintGCDetails && Verbose) {
3230 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3231 CMSExpansionCause::to_string(cause));
3232 }
3233 }
3234 }
3236 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3237 HeapWord* res = NULL;
3238 MutexLocker x(ParGCRareEvent_lock);
3239 while (true) {
3240 // Expansion by some other thread might make alloc OK now:
3241 res = ps->lab.alloc(word_sz);
3242 if (res != NULL) return res;
3243 // If there's not enough expansion space available, give up.
3244 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3245 return NULL;
3246 }
3247 // Otherwise, we try expansion.
3248 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3249 CMSExpansionCause::_allocate_par_lab);
3250 // Now go around the loop and try alloc again;
3251 // A competing par_promote might beat us to the expansion space,
3252 // so we may go around the loop again if promotion fails agaion.
3253 if (GCExpandToAllocateDelayMillis > 0) {
3254 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3255 }
3256 }
3257 }
3260 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3261 PromotionInfo* promo) {
3262 MutexLocker x(ParGCRareEvent_lock);
3263 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3264 while (true) {
3265 // Expansion by some other thread might make alloc OK now:
3266 if (promo->ensure_spooling_space()) {
3267 assert(promo->has_spooling_space(),
3268 "Post-condition of successful ensure_spooling_space()");
3269 return true;
3270 }
3271 // If there's not enough expansion space available, give up.
3272 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3273 return false;
3274 }
3275 // Otherwise, we try expansion.
3276 expand(refill_size_bytes, MinHeapDeltaBytes,
3277 CMSExpansionCause::_allocate_par_spooling_space);
3278 // Now go around the loop and try alloc again;
3279 // A competing allocation might beat us to the expansion space,
3280 // so we may go around the loop again if allocation fails again.
3281 if (GCExpandToAllocateDelayMillis > 0) {
3282 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3283 }
3284 }
3285 }
3289 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3290 assert_locked_or_safepoint(Heap_lock);
3291 size_t size = ReservedSpace::page_align_size_down(bytes);
3292 if (size > 0) {
3293 shrink_by(size);
3294 }
3295 }
3297 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3298 assert_locked_or_safepoint(Heap_lock);
3299 bool result = _virtual_space.expand_by(bytes);
3300 if (result) {
3301 HeapWord* old_end = _cmsSpace->end();
3302 size_t new_word_size =
3303 heap_word_size(_virtual_space.committed_size());
3304 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3305 _bts->resize(new_word_size); // resize the block offset shared array
3306 Universe::heap()->barrier_set()->resize_covered_region(mr);
3307 // Hmmmm... why doesn't CFLS::set_end verify locking?
3308 // This is quite ugly; FIX ME XXX
3309 _cmsSpace->assert_locked();
3310 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3312 // update the space and generation capacity counters
3313 if (UsePerfData) {
3314 _space_counters->update_capacity();
3315 _gen_counters->update_all();
3316 }
3318 if (Verbose && PrintGC) {
3319 size_t new_mem_size = _virtual_space.committed_size();
3320 size_t old_mem_size = new_mem_size - bytes;
3321 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3322 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3323 }
3324 }
3325 return result;
3326 }
3328 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3329 assert_locked_or_safepoint(Heap_lock);
3330 bool success = true;
3331 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3332 if (remaining_bytes > 0) {
3333 success = grow_by(remaining_bytes);
3334 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3335 }
3336 return success;
3337 }
3339 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3340 assert_locked_or_safepoint(Heap_lock);
3341 assert_lock_strong(freelistLock());
3342 // XXX Fix when compaction is implemented.
3343 warning("Shrinking of CMS not yet implemented");
3344 return;
3345 }
3348 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3349 // phases.
3350 class CMSPhaseAccounting: public StackObj {
3351 public:
3352 CMSPhaseAccounting(CMSCollector *collector,
3353 const char *phase,
3354 bool print_cr = true);
3355 ~CMSPhaseAccounting();
3357 private:
3358 CMSCollector *_collector;
3359 const char *_phase;
3360 elapsedTimer _wallclock;
3361 bool _print_cr;
3363 public:
3364 // Not MT-safe; so do not pass around these StackObj's
3365 // where they may be accessed by other threads.
3366 jlong wallclock_millis() {
3367 assert(_wallclock.is_active(), "Wall clock should not stop");
3368 _wallclock.stop(); // to record time
3369 jlong ret = _wallclock.milliseconds();
3370 _wallclock.start(); // restart
3371 return ret;
3372 }
3373 };
3375 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3376 const char *phase,
3377 bool print_cr) :
3378 _collector(collector), _phase(phase), _print_cr(print_cr) {
3380 if (PrintCMSStatistics != 0) {
3381 _collector->resetYields();
3382 }
3383 if (PrintGCDetails && PrintGCTimeStamps) {
3384 gclog_or_tty->date_stamp(PrintGCDateStamps);
3385 gclog_or_tty->stamp();
3386 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3387 _collector->cmsGen()->short_name(), _phase);
3388 }
3389 _collector->resetTimer();
3390 _wallclock.start();
3391 _collector->startTimer();
3392 }
3394 CMSPhaseAccounting::~CMSPhaseAccounting() {
3395 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3396 _collector->stopTimer();
3397 _wallclock.stop();
3398 if (PrintGCDetails) {
3399 gclog_or_tty->date_stamp(PrintGCDateStamps);
3400 if (PrintGCTimeStamps) {
3401 gclog_or_tty->stamp();
3402 gclog_or_tty->print(": ");
3403 }
3404 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3405 _collector->cmsGen()->short_name(),
3406 _phase, _collector->timerValue(), _wallclock.seconds());
3407 if (_print_cr) {
3408 gclog_or_tty->print_cr("");
3409 }
3410 if (PrintCMSStatistics != 0) {
3411 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3412 _collector->yields());
3413 }
3414 }
3415 }
3417 // CMS work
3419 // Checkpoint the roots into this generation from outside
3420 // this generation. [Note this initial checkpoint need only
3421 // be approximate -- we'll do a catch up phase subsequently.]
3422 void CMSCollector::checkpointRootsInitial(bool asynch) {
3423 assert(_collectorState == InitialMarking, "Wrong collector state");
3424 check_correct_thread_executing();
3425 ReferenceProcessor* rp = ref_processor();
3426 SpecializationStats::clear();
3427 assert(_restart_addr == NULL, "Control point invariant");
3428 if (asynch) {
3429 // acquire locks for subsequent manipulations
3430 MutexLockerEx x(bitMapLock(),
3431 Mutex::_no_safepoint_check_flag);
3432 checkpointRootsInitialWork(asynch);
3433 rp->verify_no_references_recorded();
3434 rp->enable_discovery(); // enable ("weak") refs discovery
3435 _collectorState = Marking;
3436 } else {
3437 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3438 // which recognizes if we are a CMS generation, and doesn't try to turn on
3439 // discovery; verify that they aren't meddling.
3440 assert(!rp->discovery_is_atomic(),
3441 "incorrect setting of discovery predicate");
3442 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3443 "ref discovery for this generation kind");
3444 // already have locks
3445 checkpointRootsInitialWork(asynch);
3446 rp->enable_discovery(); // now enable ("weak") refs discovery
3447 _collectorState = Marking;
3448 }
3449 SpecializationStats::print();
3450 }
3452 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3453 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3454 assert(_collectorState == InitialMarking, "just checking");
3456 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3457 // precede our marking with a collection of all
3458 // younger generations to keep floating garbage to a minimum.
3459 // XXX: we won't do this for now -- it's an optimization to be done later.
3461 // already have locks
3462 assert_lock_strong(bitMapLock());
3463 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3465 // Setup the verification and class unloading state for this
3466 // CMS collection cycle.
3467 setup_cms_unloading_and_verification_state();
3469 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3470 PrintGCDetails && Verbose, true, gclog_or_tty);)
3471 if (UseAdaptiveSizePolicy) {
3472 size_policy()->checkpoint_roots_initial_begin();
3473 }
3475 // Reset all the PLAB chunk arrays if necessary.
3476 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3477 reset_survivor_plab_arrays();
3478 }
3480 ResourceMark rm;
3481 HandleMark hm;
3483 FalseClosure falseClosure;
3484 // In the case of a synchronous collection, we will elide the
3485 // remark step, so it's important to catch all the nmethod oops
3486 // in this step; hence the last argument to the constrcutor below.
3487 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
3488 GenCollectedHeap* gch = GenCollectedHeap::heap();
3490 verify_work_stacks_empty();
3491 verify_overflow_empty();
3493 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3494 // Update the saved marks which may affect the root scans.
3495 gch->save_marks();
3497 // weak reference processing has not started yet.
3498 ref_processor()->set_enqueuing_is_done(false);
3500 {
3501 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3502 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3503 gch->gen_process_strong_roots(_cmsGen->level(),
3504 true, // younger gens are roots
3505 true, // collecting perm gen
3506 SharedHeap::ScanningOption(roots_scanning_options()),
3507 NULL, ¬Older);
3508 }
3510 // Clear mod-union table; it will be dirtied in the prologue of
3511 // CMS generation per each younger generation collection.
3513 assert(_modUnionTable.isAllClear(),
3514 "Was cleared in most recent final checkpoint phase"
3515 " or no bits are set in the gc_prologue before the start of the next "
3516 "subsequent marking phase.");
3518 // Temporarily disabled, since pre/post-consumption closures don't
3519 // care about precleaned cards
3520 #if 0
3521 {
3522 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3523 (HeapWord*)_virtual_space.high());
3524 _ct->ct_bs()->preclean_dirty_cards(mr);
3525 }
3526 #endif
3528 // Save the end of the used_region of the constituent generations
3529 // to be used to limit the extent of sweep in each generation.
3530 save_sweep_limits();
3531 if (UseAdaptiveSizePolicy) {
3532 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3533 }
3534 verify_overflow_empty();
3535 }
3537 bool CMSCollector::markFromRoots(bool asynch) {
3538 // we might be tempted to assert that:
3539 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3540 // "inconsistent argument?");
3541 // However that wouldn't be right, because it's possible that
3542 // a safepoint is indeed in progress as a younger generation
3543 // stop-the-world GC happens even as we mark in this generation.
3544 assert(_collectorState == Marking, "inconsistent state?");
3545 check_correct_thread_executing();
3546 verify_overflow_empty();
3548 bool res;
3549 if (asynch) {
3551 // Start the timers for adaptive size policy for the concurrent phases
3552 // Do it here so that the foreground MS can use the concurrent
3553 // timer since a foreground MS might has the sweep done concurrently
3554 // or STW.
3555 if (UseAdaptiveSizePolicy) {
3556 size_policy()->concurrent_marking_begin();
3557 }
3559 // Weak ref discovery note: We may be discovering weak
3560 // refs in this generation concurrent (but interleaved) with
3561 // weak ref discovery by a younger generation collector.
3563 CMSTokenSyncWithLocks ts(true, bitMapLock());
3564 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3565 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3566 res = markFromRootsWork(asynch);
3567 if (res) {
3568 _collectorState = Precleaning;
3569 } else { // We failed and a foreground collection wants to take over
3570 assert(_foregroundGCIsActive, "internal state inconsistency");
3571 assert(_restart_addr == NULL, "foreground will restart from scratch");
3572 if (PrintGCDetails) {
3573 gclog_or_tty->print_cr("bailing out to foreground collection");
3574 }
3575 }
3576 if (UseAdaptiveSizePolicy) {
3577 size_policy()->concurrent_marking_end();
3578 }
3579 } else {
3580 assert(SafepointSynchronize::is_at_safepoint(),
3581 "inconsistent with asynch == false");
3582 if (UseAdaptiveSizePolicy) {
3583 size_policy()->ms_collection_marking_begin();
3584 }
3585 // already have locks
3586 res = markFromRootsWork(asynch);
3587 _collectorState = FinalMarking;
3588 if (UseAdaptiveSizePolicy) {
3589 GenCollectedHeap* gch = GenCollectedHeap::heap();
3590 size_policy()->ms_collection_marking_end(gch->gc_cause());
3591 }
3592 }
3593 verify_overflow_empty();
3594 return res;
3595 }
3597 bool CMSCollector::markFromRootsWork(bool asynch) {
3598 // iterate over marked bits in bit map, doing a full scan and mark
3599 // from these roots using the following algorithm:
3600 // . if oop is to the right of the current scan pointer,
3601 // mark corresponding bit (we'll process it later)
3602 // . else (oop is to left of current scan pointer)
3603 // push oop on marking stack
3604 // . drain the marking stack
3606 // Note that when we do a marking step we need to hold the
3607 // bit map lock -- recall that direct allocation (by mutators)
3608 // and promotion (by younger generation collectors) is also
3609 // marking the bit map. [the so-called allocate live policy.]
3610 // Because the implementation of bit map marking is not
3611 // robust wrt simultaneous marking of bits in the same word,
3612 // we need to make sure that there is no such interference
3613 // between concurrent such updates.
3615 // already have locks
3616 assert_lock_strong(bitMapLock());
3618 // Clear the revisit stack, just in case there are any
3619 // obsolete contents from a short-circuited previous CMS cycle.
3620 _revisitStack.reset();
3621 verify_work_stacks_empty();
3622 verify_overflow_empty();
3623 assert(_revisitStack.isEmpty(), "tabula rasa");
3625 bool result = false;
3626 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3627 result = do_marking_mt(asynch);
3628 } else {
3629 result = do_marking_st(asynch);
3630 }
3631 return result;
3632 }
3634 // Forward decl
3635 class CMSConcMarkingTask;
3637 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3638 CMSCollector* _collector;
3639 CMSConcMarkingTask* _task;
3640 bool _yield;
3641 protected:
3642 virtual void yield();
3643 public:
3644 // "n_threads" is the number of threads to be terminated.
3645 // "queue_set" is a set of work queues of other threads.
3646 // "collector" is the CMS collector associated with this task terminator.
3647 // "yield" indicates whether we need the gang as a whole to yield.
3648 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3649 CMSCollector* collector, bool yield) :
3650 ParallelTaskTerminator(n_threads, queue_set),
3651 _collector(collector),
3652 _yield(yield) { }
3654 void set_task(CMSConcMarkingTask* task) {
3655 _task = task;
3656 }
3657 };
3659 // MT Concurrent Marking Task
3660 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3661 CMSCollector* _collector;
3662 YieldingFlexibleWorkGang* _workers; // the whole gang
3663 int _n_workers; // requested/desired # workers
3664 bool _asynch;
3665 bool _result;
3666 CompactibleFreeListSpace* _cms_space;
3667 CompactibleFreeListSpace* _perm_space;
3668 HeapWord* _global_finger;
3669 HeapWord* _restart_addr;
3671 // Exposed here for yielding support
3672 Mutex* const _bit_map_lock;
3674 // The per thread work queues, available here for stealing
3675 OopTaskQueueSet* _task_queues;
3676 CMSConcMarkingTerminator _term;
3678 public:
3679 CMSConcMarkingTask(CMSCollector* collector,
3680 CompactibleFreeListSpace* cms_space,
3681 CompactibleFreeListSpace* perm_space,
3682 bool asynch, int n_workers,
3683 YieldingFlexibleWorkGang* workers,
3684 OopTaskQueueSet* task_queues):
3685 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3686 _collector(collector),
3687 _cms_space(cms_space),
3688 _perm_space(perm_space),
3689 _asynch(asynch), _n_workers(n_workers), _result(true),
3690 _workers(workers), _task_queues(task_queues),
3691 _term(n_workers, task_queues, _collector, asynch),
3692 _bit_map_lock(collector->bitMapLock())
3693 {
3694 assert(n_workers <= workers->total_workers(),
3695 "Else termination won't work correctly today"); // XXX FIX ME!
3696 _requested_size = n_workers;
3697 _term.set_task(this);
3698 assert(_cms_space->bottom() < _perm_space->bottom(),
3699 "Finger incorrectly initialized below");
3700 _restart_addr = _global_finger = _cms_space->bottom();
3701 }
3704 OopTaskQueueSet* task_queues() { return _task_queues; }
3706 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3708 HeapWord** global_finger_addr() { return &_global_finger; }
3710 CMSConcMarkingTerminator* terminator() { return &_term; }
3712 void work(int i);
3714 virtual void coordinator_yield(); // stuff done by coordinator
3715 bool result() { return _result; }
3717 void reset(HeapWord* ra) {
3718 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3719 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3720 assert(ra < _perm_space->end(), "ra too large");
3721 _restart_addr = _global_finger = ra;
3722 _term.reset_for_reuse();
3723 }
3725 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3726 OopTaskQueue* work_q);
3728 private:
3729 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3730 void do_work_steal(int i);
3731 void bump_global_finger(HeapWord* f);
3732 };
3734 void CMSConcMarkingTerminator::yield() {
3735 if (ConcurrentMarkSweepThread::should_yield() &&
3736 !_collector->foregroundGCIsActive() &&
3737 _yield) {
3738 _task->yield();
3739 } else {
3740 ParallelTaskTerminator::yield();
3741 }
3742 }
3744 ////////////////////////////////////////////////////////////////
3745 // Concurrent Marking Algorithm Sketch
3746 ////////////////////////////////////////////////////////////////
3747 // Until all tasks exhausted (both spaces):
3748 // -- claim next available chunk
3749 // -- bump global finger via CAS
3750 // -- find first object that starts in this chunk
3751 // and start scanning bitmap from that position
3752 // -- scan marked objects for oops
3753 // -- CAS-mark target, and if successful:
3754 // . if target oop is above global finger (volatile read)
3755 // nothing to do
3756 // . if target oop is in chunk and above local finger
3757 // then nothing to do
3758 // . else push on work-queue
3759 // -- Deal with possible overflow issues:
3760 // . local work-queue overflow causes stuff to be pushed on
3761 // global (common) overflow queue
3762 // . always first empty local work queue
3763 // . then get a batch of oops from global work queue if any
3764 // . then do work stealing
3765 // -- When all tasks claimed (both spaces)
3766 // and local work queue empty,
3767 // then in a loop do:
3768 // . check global overflow stack; steal a batch of oops and trace
3769 // . try to steal from other threads oif GOS is empty
3770 // . if neither is available, offer termination
3771 // -- Terminate and return result
3772 //
3773 void CMSConcMarkingTask::work(int i) {
3774 elapsedTimer _timer;
3775 ResourceMark rm;
3776 HandleMark hm;
3778 DEBUG_ONLY(_collector->verify_overflow_empty();)
3780 // Before we begin work, our work queue should be empty
3781 assert(work_queue(i)->size() == 0, "Expected to be empty");
3782 // Scan the bitmap covering _cms_space, tracing through grey objects.
3783 _timer.start();
3784 do_scan_and_mark(i, _cms_space);
3785 _timer.stop();
3786 if (PrintCMSStatistics != 0) {
3787 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3788 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3789 }
3791 // ... do the same for the _perm_space
3792 _timer.reset();
3793 _timer.start();
3794 do_scan_and_mark(i, _perm_space);
3795 _timer.stop();
3796 if (PrintCMSStatistics != 0) {
3797 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3798 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3799 }
3801 // ... do work stealing
3802 _timer.reset();
3803 _timer.start();
3804 do_work_steal(i);
3805 _timer.stop();
3806 if (PrintCMSStatistics != 0) {
3807 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3808 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3809 }
3810 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3811 assert(work_queue(i)->size() == 0, "Should have been emptied");
3812 // Note that under the current task protocol, the
3813 // following assertion is true even of the spaces
3814 // expanded since the completion of the concurrent
3815 // marking. XXX This will likely change under a strict
3816 // ABORT semantics.
3817 assert(_global_finger > _cms_space->end() &&
3818 _global_finger >= _perm_space->end(),
3819 "All tasks have been completed");
3820 DEBUG_ONLY(_collector->verify_overflow_empty();)
3821 }
3823 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3824 HeapWord* read = _global_finger;
3825 HeapWord* cur = read;
3826 while (f > read) {
3827 cur = read;
3828 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3829 if (cur == read) {
3830 // our cas succeeded
3831 assert(_global_finger >= f, "protocol consistency");
3832 break;
3833 }
3834 }
3835 }
3837 // This is really inefficient, and should be redone by
3838 // using (not yet available) block-read and -write interfaces to the
3839 // stack and the work_queue. XXX FIX ME !!!
3840 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3841 OopTaskQueue* work_q) {
3842 // Fast lock-free check
3843 if (ovflw_stk->length() == 0) {
3844 return false;
3845 }
3846 assert(work_q->size() == 0, "Shouldn't steal");
3847 MutexLockerEx ml(ovflw_stk->par_lock(),
3848 Mutex::_no_safepoint_check_flag);
3849 // Grab up to 1/4 the size of the work queue
3850 size_t num = MIN2((size_t)work_q->max_elems()/4,
3851 (size_t)ParGCDesiredObjsFromOverflowList);
3852 num = MIN2(num, ovflw_stk->length());
3853 for (int i = (int) num; i > 0; i--) {
3854 oop cur = ovflw_stk->pop();
3855 assert(cur != NULL, "Counted wrong?");
3856 work_q->push(cur);
3857 }
3858 return num > 0;
3859 }
3861 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3862 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3863 int n_tasks = pst->n_tasks();
3864 // We allow that there may be no tasks to do here because
3865 // we are restarting after a stack overflow.
3866 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3867 int nth_task = 0;
3869 HeapWord* aligned_start = sp->bottom();
3870 if (sp->used_region().contains(_restart_addr)) {
3871 // Align down to a card boundary for the start of 0th task
3872 // for this space.
3873 aligned_start =
3874 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3875 CardTableModRefBS::card_size);
3876 }
3878 size_t chunk_size = sp->marking_task_size();
3879 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3880 // Having claimed the nth task in this space,
3881 // compute the chunk that it corresponds to:
3882 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3883 aligned_start + (nth_task+1)*chunk_size);
3884 // Try and bump the global finger via a CAS;
3885 // note that we need to do the global finger bump
3886 // _before_ taking the intersection below, because
3887 // the task corresponding to that region will be
3888 // deemed done even if the used_region() expands
3889 // because of allocation -- as it almost certainly will
3890 // during start-up while the threads yield in the
3891 // closure below.
3892 HeapWord* finger = span.end();
3893 bump_global_finger(finger); // atomically
3894 // There are null tasks here corresponding to chunks
3895 // beyond the "top" address of the space.
3896 span = span.intersection(sp->used_region());
3897 if (!span.is_empty()) { // Non-null task
3898 HeapWord* prev_obj;
3899 assert(!span.contains(_restart_addr) || nth_task == 0,
3900 "Inconsistency");
3901 if (nth_task == 0) {
3902 // For the 0th task, we'll not need to compute a block_start.
3903 if (span.contains(_restart_addr)) {
3904 // In the case of a restart because of stack overflow,
3905 // we might additionally skip a chunk prefix.
3906 prev_obj = _restart_addr;
3907 } else {
3908 prev_obj = span.start();
3909 }
3910 } else {
3911 // We want to skip the first object because
3912 // the protocol is to scan any object in its entirety
3913 // that _starts_ in this span; a fortiori, any
3914 // object starting in an earlier span is scanned
3915 // as part of an earlier claimed task.
3916 // Below we use the "careful" version of block_start
3917 // so we do not try to navigate uninitialized objects.
3918 prev_obj = sp->block_start_careful(span.start());
3919 // Below we use a variant of block_size that uses the
3920 // Printezis bits to avoid waiting for allocated
3921 // objects to become initialized/parsable.
3922 while (prev_obj < span.start()) {
3923 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3924 if (sz > 0) {
3925 prev_obj += sz;
3926 } else {
3927 // In this case we may end up doing a bit of redundant
3928 // scanning, but that appears unavoidable, short of
3929 // locking the free list locks; see bug 6324141.
3930 break;
3931 }
3932 }
3933 }
3934 if (prev_obj < span.end()) {
3935 MemRegion my_span = MemRegion(prev_obj, span.end());
3936 // Do the marking work within a non-empty span --
3937 // the last argument to the constructor indicates whether the
3938 // iteration should be incremental with periodic yields.
3939 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3940 &_collector->_markBitMap,
3941 work_queue(i),
3942 &_collector->_markStack,
3943 &_collector->_revisitStack,
3944 _asynch);
3945 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3946 } // else nothing to do for this task
3947 } // else nothing to do for this task
3948 }
3949 // We'd be tempted to assert here that since there are no
3950 // more tasks left to claim in this space, the global_finger
3951 // must exceed space->top() and a fortiori space->end(). However,
3952 // that would not quite be correct because the bumping of
3953 // global_finger occurs strictly after the claiming of a task,
3954 // so by the time we reach here the global finger may not yet
3955 // have been bumped up by the thread that claimed the last
3956 // task.
3957 pst->all_tasks_completed();
3958 }
3960 class Par_ConcMarkingClosure: public OopClosure {
3961 private:
3962 CMSCollector* _collector;
3963 MemRegion _span;
3964 CMSBitMap* _bit_map;
3965 CMSMarkStack* _overflow_stack;
3966 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3967 OopTaskQueue* _work_queue;
3968 protected:
3969 DO_OOP_WORK_DEFN
3970 public:
3971 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3972 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3973 _collector(collector),
3974 _span(_collector->_span),
3975 _work_queue(work_queue),
3976 _bit_map(bit_map),
3977 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3978 virtual void do_oop(oop* p);
3979 virtual void do_oop(narrowOop* p);
3980 void trim_queue(size_t max);
3981 void handle_stack_overflow(HeapWord* lost);
3982 };
3984 // Grey object scanning during work stealing phase --
3985 // the salient assumption here is that any references
3986 // that are in these stolen objects being scanned must
3987 // already have been initialized (else they would not have
3988 // been published), so we do not need to check for
3989 // uninitialized objects before pushing here.
3990 void Par_ConcMarkingClosure::do_oop(oop obj) {
3991 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
3992 HeapWord* addr = (HeapWord*)obj;
3993 // Check if oop points into the CMS generation
3994 // and is not marked
3995 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3996 // a white object ...
3997 // If we manage to "claim" the object, by being the
3998 // first thread to mark it, then we push it on our
3999 // marking stack
4000 if (_bit_map->par_mark(addr)) { // ... now grey
4001 // push on work queue (grey set)
4002 bool simulate_overflow = false;
4003 NOT_PRODUCT(
4004 if (CMSMarkStackOverflowALot &&
4005 _collector->simulate_overflow()) {
4006 // simulate a stack overflow
4007 simulate_overflow = true;
4008 }
4009 )
4010 if (simulate_overflow ||
4011 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4012 // stack overflow
4013 if (PrintCMSStatistics != 0) {
4014 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4015 SIZE_FORMAT, _overflow_stack->capacity());
4016 }
4017 // We cannot assert that the overflow stack is full because
4018 // it may have been emptied since.
4019 assert(simulate_overflow ||
4020 _work_queue->size() == _work_queue->max_elems(),
4021 "Else push should have succeeded");
4022 handle_stack_overflow(addr);
4023 }
4024 } // Else, some other thread got there first
4025 }
4026 }
4028 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4029 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4031 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4032 while (_work_queue->size() > max) {
4033 oop new_oop;
4034 if (_work_queue->pop_local(new_oop)) {
4035 assert(new_oop->is_oop(), "Should be an oop");
4036 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4037 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4038 assert(new_oop->is_parsable(), "Should be parsable");
4039 new_oop->oop_iterate(this); // do_oop() above
4040 }
4041 }
4042 }
4044 // Upon stack overflow, we discard (part of) the stack,
4045 // remembering the least address amongst those discarded
4046 // in CMSCollector's _restart_address.
4047 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4048 // We need to do this under a mutex to prevent other
4049 // workers from interfering with the work done below.
4050 MutexLockerEx ml(_overflow_stack->par_lock(),
4051 Mutex::_no_safepoint_check_flag);
4052 // Remember the least grey address discarded
4053 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4054 _collector->lower_restart_addr(ra);
4055 _overflow_stack->reset(); // discard stack contents
4056 _overflow_stack->expand(); // expand the stack if possible
4057 }
4060 void CMSConcMarkingTask::do_work_steal(int i) {
4061 OopTaskQueue* work_q = work_queue(i);
4062 oop obj_to_scan;
4063 CMSBitMap* bm = &(_collector->_markBitMap);
4064 CMSMarkStack* ovflw = &(_collector->_markStack);
4065 int* seed = _collector->hash_seed(i);
4066 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
4067 while (true) {
4068 cl.trim_queue(0);
4069 assert(work_q->size() == 0, "Should have been emptied above");
4070 if (get_work_from_overflow_stack(ovflw, work_q)) {
4071 // Can't assert below because the work obtained from the
4072 // overflow stack may already have been stolen from us.
4073 // assert(work_q->size() > 0, "Work from overflow stack");
4074 continue;
4075 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4076 assert(obj_to_scan->is_oop(), "Should be an oop");
4077 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4078 obj_to_scan->oop_iterate(&cl);
4079 } else if (terminator()->offer_termination()) {
4080 assert(work_q->size() == 0, "Impossible!");
4081 break;
4082 }
4083 }
4084 }
4086 // This is run by the CMS (coordinator) thread.
4087 void CMSConcMarkingTask::coordinator_yield() {
4088 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4089 "CMS thread should hold CMS token");
4091 // First give up the locks, then yield, then re-lock
4092 // We should probably use a constructor/destructor idiom to
4093 // do this unlock/lock or modify the MutexUnlocker class to
4094 // serve our purpose. XXX
4095 assert_lock_strong(_bit_map_lock);
4096 _bit_map_lock->unlock();
4097 ConcurrentMarkSweepThread::desynchronize(true);
4098 ConcurrentMarkSweepThread::acknowledge_yield_request();
4099 _collector->stopTimer();
4100 if (PrintCMSStatistics != 0) {
4101 _collector->incrementYields();
4102 }
4103 _collector->icms_wait();
4105 // It is possible for whichever thread initiated the yield request
4106 // not to get a chance to wake up and take the bitmap lock between
4107 // this thread releasing it and reacquiring it. So, while the
4108 // should_yield() flag is on, let's sleep for a bit to give the
4109 // other thread a chance to wake up. The limit imposed on the number
4110 // of iterations is defensive, to avoid any unforseen circumstances
4111 // putting us into an infinite loop. Since it's always been this
4112 // (coordinator_yield()) method that was observed to cause the
4113 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4114 // which is by default non-zero. For the other seven methods that
4115 // also perform the yield operation, as are using a different
4116 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4117 // can enable the sleeping for those methods too, if necessary.
4118 // See 6442774.
4119 //
4120 // We really need to reconsider the synchronization between the GC
4121 // thread and the yield-requesting threads in the future and we
4122 // should really use wait/notify, which is the recommended
4123 // way of doing this type of interaction. Additionally, we should
4124 // consolidate the eight methods that do the yield operation and they
4125 // are almost identical into one for better maintenability and
4126 // readability. See 6445193.
4127 //
4128 // Tony 2006.06.29
4129 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4130 ConcurrentMarkSweepThread::should_yield() &&
4131 !CMSCollector::foregroundGCIsActive(); ++i) {
4132 os::sleep(Thread::current(), 1, false);
4133 ConcurrentMarkSweepThread::acknowledge_yield_request();
4134 }
4136 ConcurrentMarkSweepThread::synchronize(true);
4137 _bit_map_lock->lock_without_safepoint_check();
4138 _collector->startTimer();
4139 }
4141 bool CMSCollector::do_marking_mt(bool asynch) {
4142 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4143 // In the future this would be determined ergonomically, based
4144 // on #cpu's, # active mutator threads (and load), and mutation rate.
4145 int num_workers = ParallelCMSThreads;
4147 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4148 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4150 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4151 asynch, num_workers /* number requested XXX */,
4152 conc_workers(), task_queues());
4154 // Since the actual number of workers we get may be different
4155 // from the number we requested above, do we need to do anything different
4156 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4157 // class?? XXX
4158 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4159 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4161 // Refs discovery is already non-atomic.
4162 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4163 // Mutate the Refs discovery so it is MT during the
4164 // multi-threaded marking phase.
4165 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4167 conc_workers()->start_task(&tsk);
4168 while (tsk.yielded()) {
4169 tsk.coordinator_yield();
4170 conc_workers()->continue_task(&tsk);
4171 }
4172 // If the task was aborted, _restart_addr will be non-NULL
4173 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4174 while (_restart_addr != NULL) {
4175 // XXX For now we do not make use of ABORTED state and have not
4176 // yet implemented the right abort semantics (even in the original
4177 // single-threaded CMS case). That needs some more investigation
4178 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4179 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4180 // If _restart_addr is non-NULL, a marking stack overflow
4181 // occurred; we need to do a fresh marking iteration from the
4182 // indicated restart address.
4183 if (_foregroundGCIsActive && asynch) {
4184 // We may be running into repeated stack overflows, having
4185 // reached the limit of the stack size, while making very
4186 // slow forward progress. It may be best to bail out and
4187 // let the foreground collector do its job.
4188 // Clear _restart_addr, so that foreground GC
4189 // works from scratch. This avoids the headache of
4190 // a "rescan" which would otherwise be needed because
4191 // of the dirty mod union table & card table.
4192 _restart_addr = NULL;
4193 return false;
4194 }
4195 // Adjust the task to restart from _restart_addr
4196 tsk.reset(_restart_addr);
4197 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4198 _restart_addr);
4199 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4200 _restart_addr);
4201 _restart_addr = NULL;
4202 // Get the workers going again
4203 conc_workers()->start_task(&tsk);
4204 while (tsk.yielded()) {
4205 tsk.coordinator_yield();
4206 conc_workers()->continue_task(&tsk);
4207 }
4208 }
4209 assert(tsk.completed(), "Inconsistency");
4210 assert(tsk.result() == true, "Inconsistency");
4211 return true;
4212 }
4214 bool CMSCollector::do_marking_st(bool asynch) {
4215 ResourceMark rm;
4216 HandleMark hm;
4218 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4219 &_markStack, &_revisitStack, CMSYield && asynch);
4220 // the last argument to iterate indicates whether the iteration
4221 // should be incremental with periodic yields.
4222 _markBitMap.iterate(&markFromRootsClosure);
4223 // If _restart_addr is non-NULL, a marking stack overflow
4224 // occurred; we need to do a fresh iteration from the
4225 // indicated restart address.
4226 while (_restart_addr != NULL) {
4227 if (_foregroundGCIsActive && asynch) {
4228 // We may be running into repeated stack overflows, having
4229 // reached the limit of the stack size, while making very
4230 // slow forward progress. It may be best to bail out and
4231 // let the foreground collector do its job.
4232 // Clear _restart_addr, so that foreground GC
4233 // works from scratch. This avoids the headache of
4234 // a "rescan" which would otherwise be needed because
4235 // of the dirty mod union table & card table.
4236 _restart_addr = NULL;
4237 return false; // indicating failure to complete marking
4238 }
4239 // Deal with stack overflow:
4240 // we restart marking from _restart_addr
4241 HeapWord* ra = _restart_addr;
4242 markFromRootsClosure.reset(ra);
4243 _restart_addr = NULL;
4244 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4245 }
4246 return true;
4247 }
4249 void CMSCollector::preclean() {
4250 check_correct_thread_executing();
4251 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4252 verify_work_stacks_empty();
4253 verify_overflow_empty();
4254 _abort_preclean = false;
4255 if (CMSPrecleaningEnabled) {
4256 _eden_chunk_index = 0;
4257 size_t used = get_eden_used();
4258 size_t capacity = get_eden_capacity();
4259 // Don't start sampling unless we will get sufficiently
4260 // many samples.
4261 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4262 * CMSScheduleRemarkEdenPenetration)) {
4263 _start_sampling = true;
4264 } else {
4265 _start_sampling = false;
4266 }
4267 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4268 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4269 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4270 }
4271 CMSTokenSync x(true); // is cms thread
4272 if (CMSPrecleaningEnabled) {
4273 sample_eden();
4274 _collectorState = AbortablePreclean;
4275 } else {
4276 _collectorState = FinalMarking;
4277 }
4278 verify_work_stacks_empty();
4279 verify_overflow_empty();
4280 }
4282 // Try and schedule the remark such that young gen
4283 // occupancy is CMSScheduleRemarkEdenPenetration %.
4284 void CMSCollector::abortable_preclean() {
4285 check_correct_thread_executing();
4286 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4287 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4289 // If Eden's current occupancy is below this threshold,
4290 // immediately schedule the remark; else preclean
4291 // past the next scavenge in an effort to
4292 // schedule the pause as described avove. By choosing
4293 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4294 // we will never do an actual abortable preclean cycle.
4295 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4296 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4297 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4298 // We need more smarts in the abortable preclean
4299 // loop below to deal with cases where allocation
4300 // in young gen is very very slow, and our precleaning
4301 // is running a losing race against a horde of
4302 // mutators intent on flooding us with CMS updates
4303 // (dirty cards).
4304 // One, admittedly dumb, strategy is to give up
4305 // after a certain number of abortable precleaning loops
4306 // or after a certain maximum time. We want to make
4307 // this smarter in the next iteration.
4308 // XXX FIX ME!!! YSR
4309 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4310 while (!(should_abort_preclean() ||
4311 ConcurrentMarkSweepThread::should_terminate())) {
4312 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4313 cumworkdone += workdone;
4314 loops++;
4315 // Voluntarily terminate abortable preclean phase if we have
4316 // been at it for too long.
4317 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4318 loops >= CMSMaxAbortablePrecleanLoops) {
4319 if (PrintGCDetails) {
4320 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4321 }
4322 break;
4323 }
4324 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4325 if (PrintGCDetails) {
4326 gclog_or_tty->print(" CMS: abort preclean due to time ");
4327 }
4328 break;
4329 }
4330 // If we are doing little work each iteration, we should
4331 // take a short break.
4332 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4333 // Sleep for some time, waiting for work to accumulate
4334 stopTimer();
4335 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4336 startTimer();
4337 waited++;
4338 }
4339 }
4340 if (PrintCMSStatistics > 0) {
4341 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4342 loops, waited, cumworkdone);
4343 }
4344 }
4345 CMSTokenSync x(true); // is cms thread
4346 if (_collectorState != Idling) {
4347 assert(_collectorState == AbortablePreclean,
4348 "Spontaneous state transition?");
4349 _collectorState = FinalMarking;
4350 } // Else, a foreground collection completed this CMS cycle.
4351 return;
4352 }
4354 // Respond to an Eden sampling opportunity
4355 void CMSCollector::sample_eden() {
4356 // Make sure a young gc cannot sneak in between our
4357 // reading and recording of a sample.
4358 assert(Thread::current()->is_ConcurrentGC_thread(),
4359 "Only the cms thread may collect Eden samples");
4360 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4361 "Should collect samples while holding CMS token");
4362 if (!_start_sampling) {
4363 return;
4364 }
4365 if (_eden_chunk_array) {
4366 if (_eden_chunk_index < _eden_chunk_capacity) {
4367 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4368 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4369 "Unexpected state of Eden");
4370 // We'd like to check that what we just sampled is an oop-start address;
4371 // however, we cannot do that here since the object may not yet have been
4372 // initialized. So we'll instead do the check when we _use_ this sample
4373 // later.
4374 if (_eden_chunk_index == 0 ||
4375 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4376 _eden_chunk_array[_eden_chunk_index-1])
4377 >= CMSSamplingGrain)) {
4378 _eden_chunk_index++; // commit sample
4379 }
4380 }
4381 }
4382 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4383 size_t used = get_eden_used();
4384 size_t capacity = get_eden_capacity();
4385 assert(used <= capacity, "Unexpected state of Eden");
4386 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4387 _abort_preclean = true;
4388 }
4389 }
4390 }
4393 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4394 assert(_collectorState == Precleaning ||
4395 _collectorState == AbortablePreclean, "incorrect state");
4396 ResourceMark rm;
4397 HandleMark hm;
4398 // Do one pass of scrubbing the discovered reference lists
4399 // to remove any reference objects with strongly-reachable
4400 // referents.
4401 if (clean_refs) {
4402 ReferenceProcessor* rp = ref_processor();
4403 CMSPrecleanRefsYieldClosure yield_cl(this);
4404 assert(rp->span().equals(_span), "Spans should be equal");
4405 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4406 &_markStack, true /* preclean */);
4407 CMSDrainMarkingStackClosure complete_trace(this,
4408 _span, &_markBitMap, &_markStack,
4409 &keep_alive, true /* preclean */);
4411 // We don't want this step to interfere with a young
4412 // collection because we don't want to take CPU
4413 // or memory bandwidth away from the young GC threads
4414 // (which may be as many as there are CPUs).
4415 // Note that we don't need to protect ourselves from
4416 // interference with mutators because they can't
4417 // manipulate the discovered reference lists nor affect
4418 // the computed reachability of the referents, the
4419 // only properties manipulated by the precleaning
4420 // of these reference lists.
4421 stopTimer();
4422 CMSTokenSyncWithLocks x(true /* is cms thread */,
4423 bitMapLock());
4424 startTimer();
4425 sample_eden();
4426 // The following will yield to allow foreground
4427 // collection to proceed promptly. XXX YSR:
4428 // The code in this method may need further
4429 // tweaking for better performance and some restructuring
4430 // for cleaner interfaces.
4431 rp->preclean_discovered_references(
4432 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4433 &yield_cl);
4434 }
4436 if (clean_survivor) { // preclean the active survivor space(s)
4437 assert(_young_gen->kind() == Generation::DefNew ||
4438 _young_gen->kind() == Generation::ParNew ||
4439 _young_gen->kind() == Generation::ASParNew,
4440 "incorrect type for cast");
4441 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4442 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4443 &_markBitMap, &_modUnionTable,
4444 &_markStack, &_revisitStack,
4445 true /* precleaning phase */);
4446 stopTimer();
4447 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4448 bitMapLock());
4449 startTimer();
4450 unsigned int before_count =
4451 GenCollectedHeap::heap()->total_collections();
4452 SurvivorSpacePrecleanClosure
4453 sss_cl(this, _span, &_markBitMap, &_markStack,
4454 &pam_cl, before_count, CMSYield);
4455 dng->from()->object_iterate_careful(&sss_cl);
4456 dng->to()->object_iterate_careful(&sss_cl);
4457 }
4458 MarkRefsIntoAndScanClosure
4459 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4460 &_markStack, &_revisitStack, this, CMSYield,
4461 true /* precleaning phase */);
4462 // CAUTION: The following closure has persistent state that may need to
4463 // be reset upon a decrease in the sequence of addresses it
4464 // processes.
4465 ScanMarkedObjectsAgainCarefullyClosure
4466 smoac_cl(this, _span,
4467 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4469 // Preclean dirty cards in ModUnionTable and CardTable using
4470 // appropriate convergence criterion;
4471 // repeat CMSPrecleanIter times unless we find that
4472 // we are losing.
4473 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4474 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4475 "Bad convergence multiplier");
4476 assert(CMSPrecleanThreshold >= 100,
4477 "Unreasonably low CMSPrecleanThreshold");
4479 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4480 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4481 numIter < CMSPrecleanIter;
4482 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4483 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4484 if (CMSPermGenPrecleaningEnabled) {
4485 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4486 }
4487 if (Verbose && PrintGCDetails) {
4488 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4489 }
4490 // Either there are very few dirty cards, so re-mark
4491 // pause will be small anyway, or our pre-cleaning isn't
4492 // that much faster than the rate at which cards are being
4493 // dirtied, so we might as well stop and re-mark since
4494 // precleaning won't improve our re-mark time by much.
4495 if (curNumCards <= CMSPrecleanThreshold ||
4496 (numIter > 0 &&
4497 (curNumCards * CMSPrecleanDenominator >
4498 lastNumCards * CMSPrecleanNumerator))) {
4499 numIter++;
4500 cumNumCards += curNumCards;
4501 break;
4502 }
4503 }
4504 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4505 if (CMSPermGenPrecleaningEnabled) {
4506 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4507 }
4508 cumNumCards += curNumCards;
4509 if (PrintGCDetails && PrintCMSStatistics != 0) {
4510 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4511 curNumCards, cumNumCards, numIter);
4512 }
4513 return cumNumCards; // as a measure of useful work done
4514 }
4516 // PRECLEANING NOTES:
4517 // Precleaning involves:
4518 // . reading the bits of the modUnionTable and clearing the set bits.
4519 // . For the cards corresponding to the set bits, we scan the
4520 // objects on those cards. This means we need the free_list_lock
4521 // so that we can safely iterate over the CMS space when scanning
4522 // for oops.
4523 // . When we scan the objects, we'll be both reading and setting
4524 // marks in the marking bit map, so we'll need the marking bit map.
4525 // . For protecting _collector_state transitions, we take the CGC_lock.
4526 // Note that any races in the reading of of card table entries by the
4527 // CMS thread on the one hand and the clearing of those entries by the
4528 // VM thread or the setting of those entries by the mutator threads on the
4529 // other are quite benign. However, for efficiency it makes sense to keep
4530 // the VM thread from racing with the CMS thread while the latter is
4531 // dirty card info to the modUnionTable. We therefore also use the
4532 // CGC_lock to protect the reading of the card table and the mod union
4533 // table by the CM thread.
4534 // . We run concurrently with mutator updates, so scanning
4535 // needs to be done carefully -- we should not try to scan
4536 // potentially uninitialized objects.
4537 //
4538 // Locking strategy: While holding the CGC_lock, we scan over and
4539 // reset a maximal dirty range of the mod union / card tables, then lock
4540 // the free_list_lock and bitmap lock to do a full marking, then
4541 // release these locks; and repeat the cycle. This allows for a
4542 // certain amount of fairness in the sharing of these locks between
4543 // the CMS collector on the one hand, and the VM thread and the
4544 // mutators on the other.
4546 // NOTE: preclean_mod_union_table() and preclean_card_table()
4547 // further below are largely identical; if you need to modify
4548 // one of these methods, please check the other method too.
4550 size_t CMSCollector::preclean_mod_union_table(
4551 ConcurrentMarkSweepGeneration* gen,
4552 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4553 verify_work_stacks_empty();
4554 verify_overflow_empty();
4556 // strategy: starting with the first card, accumulate contiguous
4557 // ranges of dirty cards; clear these cards, then scan the region
4558 // covered by these cards.
4560 // Since all of the MUT is committed ahead, we can just use
4561 // that, in case the generations expand while we are precleaning.
4562 // It might also be fine to just use the committed part of the
4563 // generation, but we might potentially miss cards when the
4564 // generation is rapidly expanding while we are in the midst
4565 // of precleaning.
4566 HeapWord* startAddr = gen->reserved().start();
4567 HeapWord* endAddr = gen->reserved().end();
4569 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4571 size_t numDirtyCards, cumNumDirtyCards;
4572 HeapWord *nextAddr, *lastAddr;
4573 for (cumNumDirtyCards = numDirtyCards = 0,
4574 nextAddr = lastAddr = startAddr;
4575 nextAddr < endAddr;
4576 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4578 ResourceMark rm;
4579 HandleMark hm;
4581 MemRegion dirtyRegion;
4582 {
4583 stopTimer();
4584 CMSTokenSync ts(true);
4585 startTimer();
4586 sample_eden();
4587 // Get dirty region starting at nextOffset (inclusive),
4588 // simultaneously clearing it.
4589 dirtyRegion =
4590 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4591 assert(dirtyRegion.start() >= nextAddr,
4592 "returned region inconsistent?");
4593 }
4594 // Remember where the next search should begin.
4595 // The returned region (if non-empty) is a right open interval,
4596 // so lastOffset is obtained from the right end of that
4597 // interval.
4598 lastAddr = dirtyRegion.end();
4599 // Should do something more transparent and less hacky XXX
4600 numDirtyCards =
4601 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4603 // We'll scan the cards in the dirty region (with periodic
4604 // yields for foreground GC as needed).
4605 if (!dirtyRegion.is_empty()) {
4606 assert(numDirtyCards > 0, "consistency check");
4607 HeapWord* stop_point = NULL;
4608 stopTimer();
4609 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4610 bitMapLock());
4611 startTimer();
4612 {
4613 verify_work_stacks_empty();
4614 verify_overflow_empty();
4615 sample_eden();
4616 stop_point =
4617 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4618 }
4619 if (stop_point != NULL) {
4620 // The careful iteration stopped early either because it found an
4621 // uninitialized object, or because we were in the midst of an
4622 // "abortable preclean", which should now be aborted. Redirty
4623 // the bits corresponding to the partially-scanned or unscanned
4624 // cards. We'll either restart at the next block boundary or
4625 // abort the preclean.
4626 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4627 (_collectorState == AbortablePreclean && should_abort_preclean()),
4628 "Unparsable objects should only be in perm gen.");
4629 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4630 if (should_abort_preclean()) {
4631 break; // out of preclean loop
4632 } else {
4633 // Compute the next address at which preclean should pick up;
4634 // might need bitMapLock in order to read P-bits.
4635 lastAddr = next_card_start_after_block(stop_point);
4636 }
4637 }
4638 } else {
4639 assert(lastAddr == endAddr, "consistency check");
4640 assert(numDirtyCards == 0, "consistency check");
4641 break;
4642 }
4643 }
4644 verify_work_stacks_empty();
4645 verify_overflow_empty();
4646 return cumNumDirtyCards;
4647 }
4649 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4650 // below are largely identical; if you need to modify
4651 // one of these methods, please check the other method too.
4653 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4654 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4655 // strategy: it's similar to precleamModUnionTable above, in that
4656 // we accumulate contiguous ranges of dirty cards, mark these cards
4657 // precleaned, then scan the region covered by these cards.
4658 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4659 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4661 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4663 size_t numDirtyCards, cumNumDirtyCards;
4664 HeapWord *lastAddr, *nextAddr;
4666 for (cumNumDirtyCards = numDirtyCards = 0,
4667 nextAddr = lastAddr = startAddr;
4668 nextAddr < endAddr;
4669 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4671 ResourceMark rm;
4672 HandleMark hm;
4674 MemRegion dirtyRegion;
4675 {
4676 // See comments in "Precleaning notes" above on why we
4677 // do this locking. XXX Could the locking overheads be
4678 // too high when dirty cards are sparse? [I don't think so.]
4679 stopTimer();
4680 CMSTokenSync x(true); // is cms thread
4681 startTimer();
4682 sample_eden();
4683 // Get and clear dirty region from card table
4684 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4685 MemRegion(nextAddr, endAddr),
4686 true,
4687 CardTableModRefBS::precleaned_card_val());
4689 assert(dirtyRegion.start() >= nextAddr,
4690 "returned region inconsistent?");
4691 }
4692 lastAddr = dirtyRegion.end();
4693 numDirtyCards =
4694 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4696 if (!dirtyRegion.is_empty()) {
4697 stopTimer();
4698 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4699 startTimer();
4700 sample_eden();
4701 verify_work_stacks_empty();
4702 verify_overflow_empty();
4703 HeapWord* stop_point =
4704 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4705 if (stop_point != NULL) {
4706 // The careful iteration stopped early because it found an
4707 // uninitialized object. Redirty the bits corresponding to the
4708 // partially-scanned or unscanned cards, and start again at the
4709 // next block boundary.
4710 assert(CMSPermGenPrecleaningEnabled ||
4711 (_collectorState == AbortablePreclean && should_abort_preclean()),
4712 "Unparsable objects should only be in perm gen.");
4713 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4714 if (should_abort_preclean()) {
4715 break; // out of preclean loop
4716 } else {
4717 // Compute the next address at which preclean should pick up.
4718 lastAddr = next_card_start_after_block(stop_point);
4719 }
4720 }
4721 } else {
4722 break;
4723 }
4724 }
4725 verify_work_stacks_empty();
4726 verify_overflow_empty();
4727 return cumNumDirtyCards;
4728 }
4730 void CMSCollector::checkpointRootsFinal(bool asynch,
4731 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4732 assert(_collectorState == FinalMarking, "incorrect state transition?");
4733 check_correct_thread_executing();
4734 // world is stopped at this checkpoint
4735 assert(SafepointSynchronize::is_at_safepoint(),
4736 "world should be stopped");
4737 verify_work_stacks_empty();
4738 verify_overflow_empty();
4740 SpecializationStats::clear();
4741 if (PrintGCDetails) {
4742 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4743 _young_gen->used() / K,
4744 _young_gen->capacity() / K);
4745 }
4746 if (asynch) {
4747 if (CMSScavengeBeforeRemark) {
4748 GenCollectedHeap* gch = GenCollectedHeap::heap();
4749 // Temporarily set flag to false, GCH->do_collection will
4750 // expect it to be false and set to true
4751 FlagSetting fl(gch->_is_gc_active, false);
4752 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4753 PrintGCDetails && Verbose, true, gclog_or_tty);)
4754 int level = _cmsGen->level() - 1;
4755 if (level >= 0) {
4756 gch->do_collection(true, // full (i.e. force, see below)
4757 false, // !clear_all_soft_refs
4758 0, // size
4759 false, // is_tlab
4760 level // max_level
4761 );
4762 }
4763 }
4764 FreelistLocker x(this);
4765 MutexLockerEx y(bitMapLock(),
4766 Mutex::_no_safepoint_check_flag);
4767 assert(!init_mark_was_synchronous, "but that's impossible!");
4768 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4769 } else {
4770 // already have all the locks
4771 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4772 init_mark_was_synchronous);
4773 }
4774 verify_work_stacks_empty();
4775 verify_overflow_empty();
4776 SpecializationStats::print();
4777 }
4779 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4780 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4782 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4784 assert(haveFreelistLocks(), "must have free list locks");
4785 assert_lock_strong(bitMapLock());
4787 if (UseAdaptiveSizePolicy) {
4788 size_policy()->checkpoint_roots_final_begin();
4789 }
4791 ResourceMark rm;
4792 HandleMark hm;
4794 GenCollectedHeap* gch = GenCollectedHeap::heap();
4796 if (should_unload_classes()) {
4797 CodeCache::gc_prologue();
4798 }
4799 assert(haveFreelistLocks(), "must have free list locks");
4800 assert_lock_strong(bitMapLock());
4802 if (!init_mark_was_synchronous) {
4803 // We might assume that we need not fill TLAB's when
4804 // CMSScavengeBeforeRemark is set, because we may have just done
4805 // a scavenge which would have filled all TLAB's -- and besides
4806 // Eden would be empty. This however may not always be the case --
4807 // for instance although we asked for a scavenge, it may not have
4808 // happened because of a JNI critical section. We probably need
4809 // a policy for deciding whether we can in that case wait until
4810 // the critical section releases and then do the remark following
4811 // the scavenge, and skip it here. In the absence of that policy,
4812 // or of an indication of whether the scavenge did indeed occur,
4813 // we cannot rely on TLAB's having been filled and must do
4814 // so here just in case a scavenge did not happen.
4815 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4816 // Update the saved marks which may affect the root scans.
4817 gch->save_marks();
4819 {
4820 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4822 // Note on the role of the mod union table:
4823 // Since the marker in "markFromRoots" marks concurrently with
4824 // mutators, it is possible for some reachable objects not to have been
4825 // scanned. For instance, an only reference to an object A was
4826 // placed in object B after the marker scanned B. Unless B is rescanned,
4827 // A would be collected. Such updates to references in marked objects
4828 // are detected via the mod union table which is the set of all cards
4829 // dirtied since the first checkpoint in this GC cycle and prior to
4830 // the most recent young generation GC, minus those cleaned up by the
4831 // concurrent precleaning.
4832 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4833 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4834 do_remark_parallel();
4835 } else {
4836 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4837 gclog_or_tty);
4838 do_remark_non_parallel();
4839 }
4840 }
4841 } else {
4842 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4843 // The initial mark was stop-world, so there's no rescanning to
4844 // do; go straight on to the next step below.
4845 }
4846 verify_work_stacks_empty();
4847 verify_overflow_empty();
4849 {
4850 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4851 refProcessingWork(asynch, clear_all_soft_refs);
4852 }
4853 verify_work_stacks_empty();
4854 verify_overflow_empty();
4856 if (should_unload_classes()) {
4857 CodeCache::gc_epilogue();
4858 }
4860 // If we encountered any (marking stack / work queue) overflow
4861 // events during the current CMS cycle, take appropriate
4862 // remedial measures, where possible, so as to try and avoid
4863 // recurrence of that condition.
4864 assert(_markStack.isEmpty(), "No grey objects");
4865 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4866 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4867 if (ser_ovflw > 0) {
4868 if (PrintCMSStatistics != 0) {
4869 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4870 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4871 ", kac_preclean="SIZE_FORMAT")",
4872 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4873 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4874 }
4875 _markStack.expand();
4876 _ser_pmc_remark_ovflw = 0;
4877 _ser_pmc_preclean_ovflw = 0;
4878 _ser_kac_preclean_ovflw = 0;
4879 _ser_kac_ovflw = 0;
4880 }
4881 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4882 if (PrintCMSStatistics != 0) {
4883 gclog_or_tty->print_cr("Work queue overflow (benign) "
4884 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4885 _par_pmc_remark_ovflw, _par_kac_ovflw);
4886 }
4887 _par_pmc_remark_ovflw = 0;
4888 _par_kac_ovflw = 0;
4889 }
4890 if (PrintCMSStatistics != 0) {
4891 if (_markStack._hit_limit > 0) {
4892 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4893 _markStack._hit_limit);
4894 }
4895 if (_markStack._failed_double > 0) {
4896 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4897 " current capacity "SIZE_FORMAT,
4898 _markStack._failed_double,
4899 _markStack.capacity());
4900 }
4901 }
4902 _markStack._hit_limit = 0;
4903 _markStack._failed_double = 0;
4905 if ((VerifyAfterGC || VerifyDuringGC) &&
4906 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4907 verify_after_remark();
4908 }
4910 // Change under the freelistLocks.
4911 _collectorState = Sweeping;
4912 // Call isAllClear() under bitMapLock
4913 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4914 " final marking");
4915 if (UseAdaptiveSizePolicy) {
4916 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4917 }
4918 }
4920 // Parallel remark task
4921 class CMSParRemarkTask: public AbstractGangTask {
4922 CMSCollector* _collector;
4923 WorkGang* _workers;
4924 int _n_workers;
4925 CompactibleFreeListSpace* _cms_space;
4926 CompactibleFreeListSpace* _perm_space;
4928 // The per-thread work queues, available here for stealing.
4929 OopTaskQueueSet* _task_queues;
4930 ParallelTaskTerminator _term;
4932 public:
4933 CMSParRemarkTask(CMSCollector* collector,
4934 CompactibleFreeListSpace* cms_space,
4935 CompactibleFreeListSpace* perm_space,
4936 int n_workers, WorkGang* workers,
4937 OopTaskQueueSet* task_queues):
4938 AbstractGangTask("Rescan roots and grey objects in parallel"),
4939 _collector(collector),
4940 _cms_space(cms_space), _perm_space(perm_space),
4941 _n_workers(n_workers),
4942 _workers(workers),
4943 _task_queues(task_queues),
4944 _term(workers->total_workers(), task_queues) { }
4946 OopTaskQueueSet* task_queues() { return _task_queues; }
4948 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4950 ParallelTaskTerminator* terminator() { return &_term; }
4952 void work(int i);
4954 private:
4955 // Work method in support of parallel rescan ... of young gen spaces
4956 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4957 ContiguousSpace* space,
4958 HeapWord** chunk_array, size_t chunk_top);
4960 // ... of dirty cards in old space
4961 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4962 Par_MarkRefsIntoAndScanClosure* cl);
4964 // ... work stealing for the above
4965 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4966 };
4968 void CMSParRemarkTask::work(int i) {
4969 elapsedTimer _timer;
4970 ResourceMark rm;
4971 HandleMark hm;
4973 // ---------- rescan from roots --------------
4974 _timer.start();
4975 GenCollectedHeap* gch = GenCollectedHeap::heap();
4976 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4977 _collector->_span, _collector->ref_processor(),
4978 &(_collector->_markBitMap),
4979 work_queue(i), &(_collector->_revisitStack));
4981 // Rescan young gen roots first since these are likely
4982 // coarsely partitioned and may, on that account, constitute
4983 // the critical path; thus, it's best to start off that
4984 // work first.
4985 // ---------- young gen roots --------------
4986 {
4987 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
4988 EdenSpace* eden_space = dng->eden();
4989 ContiguousSpace* from_space = dng->from();
4990 ContiguousSpace* to_space = dng->to();
4992 HeapWord** eca = _collector->_eden_chunk_array;
4993 size_t ect = _collector->_eden_chunk_index;
4994 HeapWord** sca = _collector->_survivor_chunk_array;
4995 size_t sct = _collector->_survivor_chunk_index;
4997 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4998 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5000 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
5001 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
5002 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
5004 _timer.stop();
5005 if (PrintCMSStatistics != 0) {
5006 gclog_or_tty->print_cr(
5007 "Finished young gen rescan work in %dth thread: %3.3f sec",
5008 i, _timer.seconds());
5009 }
5010 }
5012 // ---------- remaining roots --------------
5013 _timer.reset();
5014 _timer.start();
5015 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5016 false, // yg was scanned above
5017 true, // collecting perm gen
5018 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5019 NULL, &par_mrias_cl);
5020 _timer.stop();
5021 if (PrintCMSStatistics != 0) {
5022 gclog_or_tty->print_cr(
5023 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5024 i, _timer.seconds());
5025 }
5027 // ---------- rescan dirty cards ------------
5028 _timer.reset();
5029 _timer.start();
5031 // Do the rescan tasks for each of the two spaces
5032 // (cms_space and perm_space) in turn.
5033 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5034 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5035 _timer.stop();
5036 if (PrintCMSStatistics != 0) {
5037 gclog_or_tty->print_cr(
5038 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5039 i, _timer.seconds());
5040 }
5042 // ---------- steal work from other threads ...
5043 // ---------- ... and drain overflow list.
5044 _timer.reset();
5045 _timer.start();
5046 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5047 _timer.stop();
5048 if (PrintCMSStatistics != 0) {
5049 gclog_or_tty->print_cr(
5050 "Finished work stealing in %dth thread: %3.3f sec",
5051 i, _timer.seconds());
5052 }
5053 }
5055 void
5056 CMSParRemarkTask::do_young_space_rescan(int i,
5057 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5058 HeapWord** chunk_array, size_t chunk_top) {
5059 // Until all tasks completed:
5060 // . claim an unclaimed task
5061 // . compute region boundaries corresponding to task claimed
5062 // using chunk_array
5063 // . par_oop_iterate(cl) over that region
5065 ResourceMark rm;
5066 HandleMark hm;
5068 SequentialSubTasksDone* pst = space->par_seq_tasks();
5069 assert(pst->valid(), "Uninitialized use?");
5071 int nth_task = 0;
5072 int n_tasks = pst->n_tasks();
5074 HeapWord *start, *end;
5075 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5076 // We claimed task # nth_task; compute its boundaries.
5077 if (chunk_top == 0) { // no samples were taken
5078 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5079 start = space->bottom();
5080 end = space->top();
5081 } else if (nth_task == 0) {
5082 start = space->bottom();
5083 end = chunk_array[nth_task];
5084 } else if (nth_task < (jint)chunk_top) {
5085 assert(nth_task >= 1, "Control point invariant");
5086 start = chunk_array[nth_task - 1];
5087 end = chunk_array[nth_task];
5088 } else {
5089 assert(nth_task == (jint)chunk_top, "Control point invariant");
5090 start = chunk_array[chunk_top - 1];
5091 end = space->top();
5092 }
5093 MemRegion mr(start, end);
5094 // Verify that mr is in space
5095 assert(mr.is_empty() || space->used_region().contains(mr),
5096 "Should be in space");
5097 // Verify that "start" is an object boundary
5098 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5099 "Should be an oop");
5100 space->par_oop_iterate(mr, cl);
5101 }
5102 pst->all_tasks_completed();
5103 }
5105 void
5106 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5107 CompactibleFreeListSpace* sp, int i,
5108 Par_MarkRefsIntoAndScanClosure* cl) {
5109 // Until all tasks completed:
5110 // . claim an unclaimed task
5111 // . compute region boundaries corresponding to task claimed
5112 // . transfer dirty bits ct->mut for that region
5113 // . apply rescanclosure to dirty mut bits for that region
5115 ResourceMark rm;
5116 HandleMark hm;
5118 OopTaskQueue* work_q = work_queue(i);
5119 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5120 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5121 // CAUTION: This closure has state that persists across calls to
5122 // the work method dirty_range_iterate_clear() in that it has
5123 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5124 // use of that state in the imbedded UpwardsObjectClosure instance
5125 // assumes that the cards are always iterated (even if in parallel
5126 // by several threads) in monotonically increasing order per each
5127 // thread. This is true of the implementation below which picks
5128 // card ranges (chunks) in monotonically increasing order globally
5129 // and, a-fortiori, in monotonically increasing order per thread
5130 // (the latter order being a subsequence of the former).
5131 // If the work code below is ever reorganized into a more chaotic
5132 // work-partitioning form than the current "sequential tasks"
5133 // paradigm, the use of that persistent state will have to be
5134 // revisited and modified appropriately. See also related
5135 // bug 4756801 work on which should examine this code to make
5136 // sure that the changes there do not run counter to the
5137 // assumptions made here and necessary for correctness and
5138 // efficiency. Note also that this code might yield inefficient
5139 // behaviour in the case of very large objects that span one or
5140 // more work chunks. Such objects would potentially be scanned
5141 // several times redundantly. Work on 4756801 should try and
5142 // address that performance anomaly if at all possible. XXX
5143 MemRegion full_span = _collector->_span;
5144 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5145 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5146 MarkFromDirtyCardsClosure
5147 greyRescanClosure(_collector, full_span, // entire span of interest
5148 sp, bm, work_q, rs, cl);
5150 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5151 assert(pst->valid(), "Uninitialized use?");
5152 int nth_task = 0;
5153 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5154 MemRegion span = sp->used_region();
5155 HeapWord* start_addr = span.start();
5156 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5157 alignment);
5158 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5159 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5160 start_addr, "Check alignment");
5161 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5162 chunk_size, "Check alignment");
5164 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5165 // Having claimed the nth_task, compute corresponding mem-region,
5166 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5167 // The alignment restriction ensures that we do not need any
5168 // synchronization with other gang-workers while setting or
5169 // clearing bits in thus chunk of the MUT.
5170 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5171 start_addr + (nth_task+1)*chunk_size);
5172 // The last chunk's end might be way beyond end of the
5173 // used region. In that case pull back appropriately.
5174 if (this_span.end() > end_addr) {
5175 this_span.set_end(end_addr);
5176 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5177 }
5178 // Iterate over the dirty cards covering this chunk, marking them
5179 // precleaned, and setting the corresponding bits in the mod union
5180 // table. Since we have been careful to partition at Card and MUT-word
5181 // boundaries no synchronization is needed between parallel threads.
5182 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5183 &modUnionClosure);
5185 // Having transferred these marks into the modUnionTable,
5186 // rescan the marked objects on the dirty cards in the modUnionTable.
5187 // Even if this is at a synchronous collection, the initial marking
5188 // may have been done during an asynchronous collection so there
5189 // may be dirty bits in the mod-union table.
5190 _collector->_modUnionTable.dirty_range_iterate_clear(
5191 this_span, &greyRescanClosure);
5192 _collector->_modUnionTable.verifyNoOneBitsInRange(
5193 this_span.start(),
5194 this_span.end());
5195 }
5196 pst->all_tasks_completed(); // declare that i am done
5197 }
5199 // . see if we can share work_queues with ParNew? XXX
5200 void
5201 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5202 int* seed) {
5203 OopTaskQueue* work_q = work_queue(i);
5204 NOT_PRODUCT(int num_steals = 0;)
5205 oop obj_to_scan;
5206 CMSBitMap* bm = &(_collector->_markBitMap);
5207 size_t num_from_overflow_list =
5208 MIN2((size_t)work_q->max_elems()/4,
5209 (size_t)ParGCDesiredObjsFromOverflowList);
5211 while (true) {
5212 // Completely finish any left over work from (an) earlier round(s)
5213 cl->trim_queue(0);
5214 // Now check if there's any work in the overflow list
5215 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5216 work_q)) {
5217 // found something in global overflow list;
5218 // not yet ready to go stealing work from others.
5219 // We'd like to assert(work_q->size() != 0, ...)
5220 // because we just took work from the overflow list,
5221 // but of course we can't since all of that could have
5222 // been already stolen from us.
5223 // "He giveth and He taketh away."
5224 continue;
5225 }
5226 // Verify that we have no work before we resort to stealing
5227 assert(work_q->size() == 0, "Have work, shouldn't steal");
5228 // Try to steal from other queues that have work
5229 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5230 NOT_PRODUCT(num_steals++;)
5231 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5232 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5233 // Do scanning work
5234 obj_to_scan->oop_iterate(cl);
5235 // Loop around, finish this work, and try to steal some more
5236 } else if (terminator()->offer_termination()) {
5237 break; // nirvana from the infinite cycle
5238 }
5239 }
5240 NOT_PRODUCT(
5241 if (PrintCMSStatistics != 0) {
5242 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5243 }
5244 )
5245 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5246 "Else our work is not yet done");
5247 }
5249 // Return a thread-local PLAB recording array, as appropriate.
5250 void* CMSCollector::get_data_recorder(int thr_num) {
5251 if (_survivor_plab_array != NULL &&
5252 (CMSPLABRecordAlways ||
5253 (_collectorState > Marking && _collectorState < FinalMarking))) {
5254 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5255 ChunkArray* ca = &_survivor_plab_array[thr_num];
5256 ca->reset(); // clear it so that fresh data is recorded
5257 return (void*) ca;
5258 } else {
5259 return NULL;
5260 }
5261 }
5263 // Reset all the thread-local PLAB recording arrays
5264 void CMSCollector::reset_survivor_plab_arrays() {
5265 for (uint i = 0; i < ParallelGCThreads; i++) {
5266 _survivor_plab_array[i].reset();
5267 }
5268 }
5270 // Merge the per-thread plab arrays into the global survivor chunk
5271 // array which will provide the partitioning of the survivor space
5272 // for CMS rescan.
5273 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5274 assert(_survivor_plab_array != NULL, "Error");
5275 assert(_survivor_chunk_array != NULL, "Error");
5276 assert(_collectorState == FinalMarking, "Error");
5277 for (uint j = 0; j < ParallelGCThreads; j++) {
5278 _cursor[j] = 0;
5279 }
5280 HeapWord* top = surv->top();
5281 size_t i;
5282 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5283 HeapWord* min_val = top; // Higher than any PLAB address
5284 uint min_tid = 0; // position of min_val this round
5285 for (uint j = 0; j < ParallelGCThreads; j++) {
5286 ChunkArray* cur_sca = &_survivor_plab_array[j];
5287 if (_cursor[j] == cur_sca->end()) {
5288 continue;
5289 }
5290 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5291 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5292 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5293 if (cur_val < min_val) {
5294 min_tid = j;
5295 min_val = cur_val;
5296 } else {
5297 assert(cur_val < top, "All recorded addresses should be less");
5298 }
5299 }
5300 // At this point min_val and min_tid are respectively
5301 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5302 // and the thread (j) that witnesses that address.
5303 // We record this address in the _survivor_chunk_array[i]
5304 // and increment _cursor[min_tid] prior to the next round i.
5305 if (min_val == top) {
5306 break;
5307 }
5308 _survivor_chunk_array[i] = min_val;
5309 _cursor[min_tid]++;
5310 }
5311 // We are all done; record the size of the _survivor_chunk_array
5312 _survivor_chunk_index = i; // exclusive: [0, i)
5313 if (PrintCMSStatistics > 0) {
5314 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5315 }
5316 // Verify that we used up all the recorded entries
5317 #ifdef ASSERT
5318 size_t total = 0;
5319 for (uint j = 0; j < ParallelGCThreads; j++) {
5320 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5321 total += _cursor[j];
5322 }
5323 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5324 // Check that the merged array is in sorted order
5325 if (total > 0) {
5326 for (size_t i = 0; i < total - 1; i++) {
5327 if (PrintCMSStatistics > 0) {
5328 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5329 i, _survivor_chunk_array[i]);
5330 }
5331 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5332 "Not sorted");
5333 }
5334 }
5335 #endif // ASSERT
5336 }
5338 // Set up the space's par_seq_tasks structure for work claiming
5339 // for parallel rescan of young gen.
5340 // See ParRescanTask where this is currently used.
5341 void
5342 CMSCollector::
5343 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5344 assert(n_threads > 0, "Unexpected n_threads argument");
5345 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5347 // Eden space
5348 {
5349 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5350 assert(!pst->valid(), "Clobbering existing data?");
5351 // Each valid entry in [0, _eden_chunk_index) represents a task.
5352 size_t n_tasks = _eden_chunk_index + 1;
5353 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5354 pst->set_par_threads(n_threads);
5355 pst->set_n_tasks((int)n_tasks);
5356 }
5358 // Merge the survivor plab arrays into _survivor_chunk_array
5359 if (_survivor_plab_array != NULL) {
5360 merge_survivor_plab_arrays(dng->from());
5361 } else {
5362 assert(_survivor_chunk_index == 0, "Error");
5363 }
5365 // To space
5366 {
5367 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5368 assert(!pst->valid(), "Clobbering existing data?");
5369 pst->set_par_threads(n_threads);
5370 pst->set_n_tasks(1);
5371 assert(pst->valid(), "Error");
5372 }
5374 // From space
5375 {
5376 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5377 assert(!pst->valid(), "Clobbering existing data?");
5378 size_t n_tasks = _survivor_chunk_index + 1;
5379 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5380 pst->set_par_threads(n_threads);
5381 pst->set_n_tasks((int)n_tasks);
5382 assert(pst->valid(), "Error");
5383 }
5384 }
5386 // Parallel version of remark
5387 void CMSCollector::do_remark_parallel() {
5388 GenCollectedHeap* gch = GenCollectedHeap::heap();
5389 WorkGang* workers = gch->workers();
5390 assert(workers != NULL, "Need parallel worker threads.");
5391 int n_workers = workers->total_workers();
5392 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5393 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5395 CMSParRemarkTask tsk(this,
5396 cms_space, perm_space,
5397 n_workers, workers, task_queues());
5399 // Set up for parallel process_strong_roots work.
5400 gch->set_par_threads(n_workers);
5401 gch->change_strong_roots_parity();
5402 // We won't be iterating over the cards in the card table updating
5403 // the younger_gen cards, so we shouldn't call the following else
5404 // the verification code as well as subsequent younger_refs_iterate
5405 // code would get confused. XXX
5406 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5408 // The young gen rescan work will not be done as part of
5409 // process_strong_roots (which currently doesn't knw how to
5410 // parallelize such a scan), but rather will be broken up into
5411 // a set of parallel tasks (via the sampling that the [abortable]
5412 // preclean phase did of EdenSpace, plus the [two] tasks of
5413 // scanning the [two] survivor spaces. Further fine-grain
5414 // parallelization of the scanning of the survivor spaces
5415 // themselves, and of precleaning of the younger gen itself
5416 // is deferred to the future.
5417 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5419 // The dirty card rescan work is broken up into a "sequence"
5420 // of parallel tasks (per constituent space) that are dynamically
5421 // claimed by the parallel threads.
5422 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5423 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5425 // It turns out that even when we're using 1 thread, doing the work in a
5426 // separate thread causes wide variance in run times. We can't help this
5427 // in the multi-threaded case, but we special-case n=1 here to get
5428 // repeatable measurements of the 1-thread overhead of the parallel code.
5429 if (n_workers > 1) {
5430 // Make refs discovery MT-safe
5431 ReferenceProcessorMTMutator mt(ref_processor(), true);
5432 workers->run_task(&tsk);
5433 } else {
5434 tsk.work(0);
5435 }
5436 gch->set_par_threads(0); // 0 ==> non-parallel.
5437 // restore, single-threaded for now, any preserved marks
5438 // as a result of work_q overflow
5439 restore_preserved_marks_if_any();
5440 }
5442 // Non-parallel version of remark
5443 void CMSCollector::do_remark_non_parallel() {
5444 ResourceMark rm;
5445 HandleMark hm;
5446 GenCollectedHeap* gch = GenCollectedHeap::heap();
5447 MarkRefsIntoAndScanClosure
5448 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5449 &_markStack, &_revisitStack, this,
5450 false /* should_yield */, false /* not precleaning */);
5451 MarkFromDirtyCardsClosure
5452 markFromDirtyCardsClosure(this, _span,
5453 NULL, // space is set further below
5454 &_markBitMap, &_markStack, &_revisitStack,
5455 &mrias_cl);
5456 {
5457 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5458 // Iterate over the dirty cards, setting the corresponding bits in the
5459 // mod union table.
5460 {
5461 ModUnionClosure modUnionClosure(&_modUnionTable);
5462 _ct->ct_bs()->dirty_card_iterate(
5463 _cmsGen->used_region(),
5464 &modUnionClosure);
5465 _ct->ct_bs()->dirty_card_iterate(
5466 _permGen->used_region(),
5467 &modUnionClosure);
5468 }
5469 // Having transferred these marks into the modUnionTable, we just need
5470 // to rescan the marked objects on the dirty cards in the modUnionTable.
5471 // The initial marking may have been done during an asynchronous
5472 // collection so there may be dirty bits in the mod-union table.
5473 const int alignment =
5474 CardTableModRefBS::card_size * BitsPerWord;
5475 {
5476 // ... First handle dirty cards in CMS gen
5477 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5478 MemRegion ur = _cmsGen->used_region();
5479 HeapWord* lb = ur.start();
5480 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5481 MemRegion cms_span(lb, ub);
5482 _modUnionTable.dirty_range_iterate_clear(cms_span,
5483 &markFromDirtyCardsClosure);
5484 verify_work_stacks_empty();
5485 if (PrintCMSStatistics != 0) {
5486 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5487 markFromDirtyCardsClosure.num_dirty_cards());
5488 }
5489 }
5490 {
5491 // .. and then repeat for dirty cards in perm gen
5492 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5493 MemRegion ur = _permGen->used_region();
5494 HeapWord* lb = ur.start();
5495 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5496 MemRegion perm_span(lb, ub);
5497 _modUnionTable.dirty_range_iterate_clear(perm_span,
5498 &markFromDirtyCardsClosure);
5499 verify_work_stacks_empty();
5500 if (PrintCMSStatistics != 0) {
5501 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5502 markFromDirtyCardsClosure.num_dirty_cards());
5503 }
5504 }
5505 }
5506 if (VerifyDuringGC &&
5507 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5508 HandleMark hm; // Discard invalid handles created during verification
5509 Universe::verify(true);
5510 }
5511 {
5512 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5514 verify_work_stacks_empty();
5516 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5517 gch->gen_process_strong_roots(_cmsGen->level(),
5518 true, // younger gens as roots
5519 true, // collecting perm gen
5520 SharedHeap::ScanningOption(roots_scanning_options()),
5521 NULL, &mrias_cl);
5522 }
5523 verify_work_stacks_empty();
5524 // Restore evacuated mark words, if any, used for overflow list links
5525 if (!CMSOverflowEarlyRestoration) {
5526 restore_preserved_marks_if_any();
5527 }
5528 verify_overflow_empty();
5529 }
5531 ////////////////////////////////////////////////////////
5532 // Parallel Reference Processing Task Proxy Class
5533 ////////////////////////////////////////////////////////
5534 class CMSRefProcTaskProxy: public AbstractGangTask {
5535 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5536 CMSCollector* _collector;
5537 CMSBitMap* _mark_bit_map;
5538 const MemRegion _span;
5539 OopTaskQueueSet* _task_queues;
5540 ParallelTaskTerminator _term;
5541 ProcessTask& _task;
5543 public:
5544 CMSRefProcTaskProxy(ProcessTask& task,
5545 CMSCollector* collector,
5546 const MemRegion& span,
5547 CMSBitMap* mark_bit_map,
5548 int total_workers,
5549 OopTaskQueueSet* task_queues):
5550 AbstractGangTask("Process referents by policy in parallel"),
5551 _task(task),
5552 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5553 _task_queues(task_queues),
5554 _term(total_workers, task_queues)
5555 {
5556 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5557 "Inconsistency in _span");
5558 }
5560 OopTaskQueueSet* task_queues() { return _task_queues; }
5562 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5564 ParallelTaskTerminator* terminator() { return &_term; }
5566 void do_work_steal(int i,
5567 CMSParDrainMarkingStackClosure* drain,
5568 CMSParKeepAliveClosure* keep_alive,
5569 int* seed);
5571 virtual void work(int i);
5572 };
5574 void CMSRefProcTaskProxy::work(int i) {
5575 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5576 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5577 _mark_bit_map, work_queue(i));
5578 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5579 _mark_bit_map, work_queue(i));
5580 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5581 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5582 if (_task.marks_oops_alive()) {
5583 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5584 _collector->hash_seed(i));
5585 }
5586 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5587 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5588 }
5590 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5591 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5592 EnqueueTask& _task;
5594 public:
5595 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5596 : AbstractGangTask("Enqueue reference objects in parallel"),
5597 _task(task)
5598 { }
5600 virtual void work(int i)
5601 {
5602 _task.work(i);
5603 }
5604 };
5606 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5607 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5608 _collector(collector),
5609 _span(span),
5610 _bit_map(bit_map),
5611 _work_queue(work_queue),
5612 _mark_and_push(collector, span, bit_map, work_queue),
5613 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5614 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5615 { }
5617 // . see if we can share work_queues with ParNew? XXX
5618 void CMSRefProcTaskProxy::do_work_steal(int i,
5619 CMSParDrainMarkingStackClosure* drain,
5620 CMSParKeepAliveClosure* keep_alive,
5621 int* seed) {
5622 OopTaskQueue* work_q = work_queue(i);
5623 NOT_PRODUCT(int num_steals = 0;)
5624 oop obj_to_scan;
5625 size_t num_from_overflow_list =
5626 MIN2((size_t)work_q->max_elems()/4,
5627 (size_t)ParGCDesiredObjsFromOverflowList);
5629 while (true) {
5630 // Completely finish any left over work from (an) earlier round(s)
5631 drain->trim_queue(0);
5632 // Now check if there's any work in the overflow list
5633 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5634 work_q)) {
5635 // Found something in global overflow list;
5636 // not yet ready to go stealing work from others.
5637 // We'd like to assert(work_q->size() != 0, ...)
5638 // because we just took work from the overflow list,
5639 // but of course we can't, since all of that might have
5640 // been already stolen from us.
5641 continue;
5642 }
5643 // Verify that we have no work before we resort to stealing
5644 assert(work_q->size() == 0, "Have work, shouldn't steal");
5645 // Try to steal from other queues that have work
5646 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5647 NOT_PRODUCT(num_steals++;)
5648 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5649 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5650 // Do scanning work
5651 obj_to_scan->oop_iterate(keep_alive);
5652 // Loop around, finish this work, and try to steal some more
5653 } else if (terminator()->offer_termination()) {
5654 break; // nirvana from the infinite cycle
5655 }
5656 }
5657 NOT_PRODUCT(
5658 if (PrintCMSStatistics != 0) {
5659 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5660 }
5661 )
5662 }
5664 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5665 {
5666 GenCollectedHeap* gch = GenCollectedHeap::heap();
5667 WorkGang* workers = gch->workers();
5668 assert(workers != NULL, "Need parallel worker threads.");
5669 int n_workers = workers->total_workers();
5670 CMSRefProcTaskProxy rp_task(task, &_collector,
5671 _collector.ref_processor()->span(),
5672 _collector.markBitMap(),
5673 n_workers, _collector.task_queues());
5674 workers->run_task(&rp_task);
5675 }
5677 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5678 {
5680 GenCollectedHeap* gch = GenCollectedHeap::heap();
5681 WorkGang* workers = gch->workers();
5682 assert(workers != NULL, "Need parallel worker threads.");
5683 CMSRefEnqueueTaskProxy enq_task(task);
5684 workers->run_task(&enq_task);
5685 }
5687 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5689 ResourceMark rm;
5690 HandleMark hm;
5692 ReferenceProcessor* rp = ref_processor();
5693 assert(rp->span().equals(_span), "Spans should be equal");
5694 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5695 // Process weak references.
5696 rp->setup_policy(clear_all_soft_refs);
5697 verify_work_stacks_empty();
5699 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5700 &_markStack, false /* !preclean */);
5701 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5702 _span, &_markBitMap, &_markStack,
5703 &cmsKeepAliveClosure, false /* !preclean */);
5704 {
5705 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5706 if (rp->processing_is_mt()) {
5707 CMSRefProcTaskExecutor task_executor(*this);
5708 rp->process_discovered_references(&_is_alive_closure,
5709 &cmsKeepAliveClosure,
5710 &cmsDrainMarkingStackClosure,
5711 &task_executor);
5712 } else {
5713 rp->process_discovered_references(&_is_alive_closure,
5714 &cmsKeepAliveClosure,
5715 &cmsDrainMarkingStackClosure,
5716 NULL);
5717 }
5718 verify_work_stacks_empty();
5719 }
5721 if (should_unload_classes()) {
5722 {
5723 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5725 // Follow SystemDictionary roots and unload classes
5726 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5728 // Follow CodeCache roots and unload any methods marked for unloading
5729 CodeCache::do_unloading(&_is_alive_closure,
5730 &cmsKeepAliveClosure,
5731 purged_class);
5733 cmsDrainMarkingStackClosure.do_void();
5734 verify_work_stacks_empty();
5736 // Update subklass/sibling/implementor links in KlassKlass descendants
5737 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5738 oop k;
5739 while ((k = _revisitStack.pop()) != NULL) {
5740 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5741 &_is_alive_closure,
5742 &cmsKeepAliveClosure);
5743 }
5744 assert(!ClassUnloading ||
5745 (_markStack.isEmpty() && overflow_list_is_empty()),
5746 "Should not have found new reachable objects");
5747 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5748 cmsDrainMarkingStackClosure.do_void();
5749 verify_work_stacks_empty();
5750 }
5752 {
5753 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5754 // Now clean up stale oops in SymbolTable and StringTable
5755 SymbolTable::unlink(&_is_alive_closure);
5756 StringTable::unlink(&_is_alive_closure);
5757 }
5758 }
5760 verify_work_stacks_empty();
5761 // Restore any preserved marks as a result of mark stack or
5762 // work queue overflow
5763 restore_preserved_marks_if_any(); // done single-threaded for now
5765 rp->set_enqueuing_is_done(true);
5766 if (rp->processing_is_mt()) {
5767 CMSRefProcTaskExecutor task_executor(*this);
5768 rp->enqueue_discovered_references(&task_executor);
5769 } else {
5770 rp->enqueue_discovered_references(NULL);
5771 }
5772 rp->verify_no_references_recorded();
5773 assert(!rp->discovery_enabled(), "should have been disabled");
5775 // JVMTI object tagging is based on JNI weak refs. If any of these
5776 // refs were cleared then JVMTI needs to update its maps and
5777 // maybe post ObjectFrees to agents.
5778 JvmtiExport::cms_ref_processing_epilogue();
5779 }
5781 #ifndef PRODUCT
5782 void CMSCollector::check_correct_thread_executing() {
5783 Thread* t = Thread::current();
5784 // Only the VM thread or the CMS thread should be here.
5785 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5786 "Unexpected thread type");
5787 // If this is the vm thread, the foreground process
5788 // should not be waiting. Note that _foregroundGCIsActive is
5789 // true while the foreground collector is waiting.
5790 if (_foregroundGCShouldWait) {
5791 // We cannot be the VM thread
5792 assert(t->is_ConcurrentGC_thread(),
5793 "Should be CMS thread");
5794 } else {
5795 // We can be the CMS thread only if we are in a stop-world
5796 // phase of CMS collection.
5797 if (t->is_ConcurrentGC_thread()) {
5798 assert(_collectorState == InitialMarking ||
5799 _collectorState == FinalMarking,
5800 "Should be a stop-world phase");
5801 // The CMS thread should be holding the CMS_token.
5802 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5803 "Potential interference with concurrently "
5804 "executing VM thread");
5805 }
5806 }
5807 }
5808 #endif
5810 void CMSCollector::sweep(bool asynch) {
5811 assert(_collectorState == Sweeping, "just checking");
5812 check_correct_thread_executing();
5813 verify_work_stacks_empty();
5814 verify_overflow_empty();
5815 incrementSweepCount();
5816 _sweep_timer.stop();
5817 _sweep_estimate.sample(_sweep_timer.seconds());
5818 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5820 // PermGen verification support: If perm gen sweeping is disabled in
5821 // this cycle, we preserve the perm gen object "deadness" information
5822 // in the perm_gen_verify_bit_map. In order to do that we traverse
5823 // all blocks in perm gen and mark all dead objects.
5824 if (verifying() && !should_unload_classes()) {
5825 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5826 "Should have already been allocated");
5827 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5828 markBitMap(), perm_gen_verify_bit_map());
5829 if (asynch) {
5830 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5831 bitMapLock());
5832 _permGen->cmsSpace()->blk_iterate(&mdo);
5833 } else {
5834 // In the case of synchronous sweep, we already have
5835 // the requisite locks/tokens.
5836 _permGen->cmsSpace()->blk_iterate(&mdo);
5837 }
5838 }
5840 if (asynch) {
5841 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5842 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5843 // First sweep the old gen then the perm gen
5844 {
5845 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5846 bitMapLock());
5847 sweepWork(_cmsGen, asynch);
5848 }
5850 // Now repeat for perm gen
5851 if (should_unload_classes()) {
5852 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5853 bitMapLock());
5854 sweepWork(_permGen, asynch);
5855 }
5857 // Update Universe::_heap_*_at_gc figures.
5858 // We need all the free list locks to make the abstract state
5859 // transition from Sweeping to Resetting. See detailed note
5860 // further below.
5861 {
5862 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5863 _permGen->freelistLock());
5864 // Update heap occupancy information which is used as
5865 // input to soft ref clearing policy at the next gc.
5866 Universe::update_heap_info_at_gc();
5867 _collectorState = Resizing;
5868 }
5869 } else {
5870 // already have needed locks
5871 sweepWork(_cmsGen, asynch);
5873 if (should_unload_classes()) {
5874 sweepWork(_permGen, asynch);
5875 }
5876 // Update heap occupancy information which is used as
5877 // input to soft ref clearing policy at the next gc.
5878 Universe::update_heap_info_at_gc();
5879 _collectorState = Resizing;
5880 }
5881 verify_work_stacks_empty();
5882 verify_overflow_empty();
5884 _sweep_timer.reset();
5885 _sweep_timer.start();
5887 update_time_of_last_gc(os::javaTimeMillis());
5889 // NOTE on abstract state transitions:
5890 // Mutators allocate-live and/or mark the mod-union table dirty
5891 // based on the state of the collection. The former is done in
5892 // the interval [Marking, Sweeping] and the latter in the interval
5893 // [Marking, Sweeping). Thus the transitions into the Marking state
5894 // and out of the Sweeping state must be synchronously visible
5895 // globally to the mutators.
5896 // The transition into the Marking state happens with the world
5897 // stopped so the mutators will globally see it. Sweeping is
5898 // done asynchronously by the background collector so the transition
5899 // from the Sweeping state to the Resizing state must be done
5900 // under the freelistLock (as is the check for whether to
5901 // allocate-live and whether to dirty the mod-union table).
5902 assert(_collectorState == Resizing, "Change of collector state to"
5903 " Resizing must be done under the freelistLocks (plural)");
5905 // Now that sweeping has been completed, if the GCH's
5906 // incremental_collection_will_fail flag is set, clear it,
5907 // thus inviting a younger gen collection to promote into
5908 // this generation. If such a promotion may still fail,
5909 // the flag will be set again when a young collection is
5910 // attempted.
5911 // I think the incremental_collection_will_fail flag's use
5912 // is specific to a 2 generation collection policy, so i'll
5913 // assert that that's the configuration we are operating within.
5914 // The use of the flag can and should be generalized appropriately
5915 // in the future to deal with a general n-generation system.
5917 GenCollectedHeap* gch = GenCollectedHeap::heap();
5918 assert(gch->collector_policy()->is_two_generation_policy(),
5919 "Resetting of incremental_collection_will_fail flag"
5920 " may be incorrect otherwise");
5921 gch->clear_incremental_collection_will_fail();
5922 gch->update_full_collections_completed(_collection_count_start);
5923 }
5925 // FIX ME!!! Looks like this belongs in CFLSpace, with
5926 // CMSGen merely delegating to it.
5927 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5928 double nearLargestPercent = 0.999;
5929 HeapWord* minAddr = _cmsSpace->bottom();
5930 HeapWord* largestAddr =
5931 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5932 if (largestAddr == 0) {
5933 // The dictionary appears to be empty. In this case
5934 // try to coalesce at the end of the heap.
5935 largestAddr = _cmsSpace->end();
5936 }
5937 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5938 size_t nearLargestOffset =
5939 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5940 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5941 }
5943 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5944 return addr >= _cmsSpace->nearLargestChunk();
5945 }
5947 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5948 return _cmsSpace->find_chunk_at_end();
5949 }
5951 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5952 bool full) {
5953 // The next lower level has been collected. Gather any statistics
5954 // that are of interest at this point.
5955 if (!full && (current_level + 1) == level()) {
5956 // Gather statistics on the young generation collection.
5957 collector()->stats().record_gc0_end(used());
5958 }
5959 }
5961 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
5962 GenCollectedHeap* gch = GenCollectedHeap::heap();
5963 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
5964 "Wrong type of heap");
5965 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
5966 gch->gen_policy()->size_policy();
5967 assert(sp->is_gc_cms_adaptive_size_policy(),
5968 "Wrong type of size policy");
5969 return sp;
5970 }
5972 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5973 if (PrintGCDetails && Verbose) {
5974 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
5975 }
5976 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
5977 _debug_collection_type =
5978 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
5979 if (PrintGCDetails && Verbose) {
5980 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
5981 }
5982 }
5984 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
5985 bool asynch) {
5986 // We iterate over the space(s) underlying this generation,
5987 // checking the mark bit map to see if the bits corresponding
5988 // to specific blocks are marked or not. Blocks that are
5989 // marked are live and are not swept up. All remaining blocks
5990 // are swept up, with coalescing on-the-fly as we sweep up
5991 // contiguous free and/or garbage blocks:
5992 // We need to ensure that the sweeper synchronizes with allocators
5993 // and stop-the-world collectors. In particular, the following
5994 // locks are used:
5995 // . CMS token: if this is held, a stop the world collection cannot occur
5996 // . freelistLock: if this is held no allocation can occur from this
5997 // generation by another thread
5998 // . bitMapLock: if this is held, no other thread can access or update
5999 //
6001 // Note that we need to hold the freelistLock if we use
6002 // block iterate below; else the iterator might go awry if
6003 // a mutator (or promotion) causes block contents to change
6004 // (for instance if the allocator divvies up a block).
6005 // If we hold the free list lock, for all practical purposes
6006 // young generation GC's can't occur (they'll usually need to
6007 // promote), so we might as well prevent all young generation
6008 // GC's while we do a sweeping step. For the same reason, we might
6009 // as well take the bit map lock for the entire duration
6011 // check that we hold the requisite locks
6012 assert(have_cms_token(), "Should hold cms token");
6013 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6014 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6015 "Should possess CMS token to sweep");
6016 assert_lock_strong(gen->freelistLock());
6017 assert_lock_strong(bitMapLock());
6019 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
6020 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
6021 _sweep_estimate.padded_average());
6022 gen->setNearLargestChunk();
6024 {
6025 SweepClosure sweepClosure(this, gen, &_markBitMap,
6026 CMSYield && asynch);
6027 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6028 // We need to free-up/coalesce garbage/blocks from a
6029 // co-terminal free run. This is done in the SweepClosure
6030 // destructor; so, do not remove this scope, else the
6031 // end-of-sweep-census below will be off by a little bit.
6032 }
6033 gen->cmsSpace()->sweep_completed();
6034 gen->cmsSpace()->endSweepFLCensus(sweepCount());
6035 if (should_unload_classes()) { // unloaded classes this cycle,
6036 _concurrent_cycles_since_last_unload = 0; // ... reset count
6037 } else { // did not unload classes,
6038 _concurrent_cycles_since_last_unload++; // ... increment count
6039 }
6040 }
6042 // Reset CMS data structures (for now just the marking bit map)
6043 // preparatory for the next cycle.
6044 void CMSCollector::reset(bool asynch) {
6045 GenCollectedHeap* gch = GenCollectedHeap::heap();
6046 CMSAdaptiveSizePolicy* sp = size_policy();
6047 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6048 if (asynch) {
6049 CMSTokenSyncWithLocks ts(true, bitMapLock());
6051 // If the state is not "Resetting", the foreground thread
6052 // has done a collection and the resetting.
6053 if (_collectorState != Resetting) {
6054 assert(_collectorState == Idling, "The state should only change"
6055 " because the foreground collector has finished the collection");
6056 return;
6057 }
6059 // Clear the mark bitmap (no grey objects to start with)
6060 // for the next cycle.
6061 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6062 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6064 HeapWord* curAddr = _markBitMap.startWord();
6065 while (curAddr < _markBitMap.endWord()) {
6066 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6067 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6068 _markBitMap.clear_large_range(chunk);
6069 if (ConcurrentMarkSweepThread::should_yield() &&
6070 !foregroundGCIsActive() &&
6071 CMSYield) {
6072 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6073 "CMS thread should hold CMS token");
6074 assert_lock_strong(bitMapLock());
6075 bitMapLock()->unlock();
6076 ConcurrentMarkSweepThread::desynchronize(true);
6077 ConcurrentMarkSweepThread::acknowledge_yield_request();
6078 stopTimer();
6079 if (PrintCMSStatistics != 0) {
6080 incrementYields();
6081 }
6082 icms_wait();
6084 // See the comment in coordinator_yield()
6085 for (unsigned i = 0; i < CMSYieldSleepCount &&
6086 ConcurrentMarkSweepThread::should_yield() &&
6087 !CMSCollector::foregroundGCIsActive(); ++i) {
6088 os::sleep(Thread::current(), 1, false);
6089 ConcurrentMarkSweepThread::acknowledge_yield_request();
6090 }
6092 ConcurrentMarkSweepThread::synchronize(true);
6093 bitMapLock()->lock_without_safepoint_check();
6094 startTimer();
6095 }
6096 curAddr = chunk.end();
6097 }
6098 _collectorState = Idling;
6099 } else {
6100 // already have the lock
6101 assert(_collectorState == Resetting, "just checking");
6102 assert_lock_strong(bitMapLock());
6103 _markBitMap.clear_all();
6104 _collectorState = Idling;
6105 }
6107 // Stop incremental mode after a cycle completes, so that any future cycles
6108 // are triggered by allocation.
6109 stop_icms();
6111 NOT_PRODUCT(
6112 if (RotateCMSCollectionTypes) {
6113 _cmsGen->rotate_debug_collection_type();
6114 }
6115 )
6116 }
6118 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6119 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6120 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6121 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6122 TraceCollectorStats tcs(counters());
6124 switch (op) {
6125 case CMS_op_checkpointRootsInitial: {
6126 checkpointRootsInitial(true); // asynch
6127 if (PrintGC) {
6128 _cmsGen->printOccupancy("initial-mark");
6129 }
6130 break;
6131 }
6132 case CMS_op_checkpointRootsFinal: {
6133 checkpointRootsFinal(true, // asynch
6134 false, // !clear_all_soft_refs
6135 false); // !init_mark_was_synchronous
6136 if (PrintGC) {
6137 _cmsGen->printOccupancy("remark");
6138 }
6139 break;
6140 }
6141 default:
6142 fatal("No such CMS_op");
6143 }
6144 }
6146 #ifndef PRODUCT
6147 size_t const CMSCollector::skip_header_HeapWords() {
6148 return FreeChunk::header_size();
6149 }
6151 // Try and collect here conditions that should hold when
6152 // CMS thread is exiting. The idea is that the foreground GC
6153 // thread should not be blocked if it wants to terminate
6154 // the CMS thread and yet continue to run the VM for a while
6155 // after that.
6156 void CMSCollector::verify_ok_to_terminate() const {
6157 assert(Thread::current()->is_ConcurrentGC_thread(),
6158 "should be called by CMS thread");
6159 assert(!_foregroundGCShouldWait, "should be false");
6160 // We could check here that all the various low-level locks
6161 // are not held by the CMS thread, but that is overkill; see
6162 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6163 // is checked.
6164 }
6165 #endif
6167 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6168 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6169 "missing Printezis mark?");
6170 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6171 size_t size = pointer_delta(nextOneAddr + 1, addr);
6172 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6173 "alignment problem");
6174 assert(size >= 3, "Necessary for Printezis marks to work");
6175 return size;
6176 }
6178 // A variant of the above (block_size_using_printezis_bits()) except
6179 // that we return 0 if the P-bits are not yet set.
6180 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6181 if (_markBitMap.isMarked(addr)) {
6182 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6183 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6184 size_t size = pointer_delta(nextOneAddr + 1, addr);
6185 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6186 "alignment problem");
6187 assert(size >= 3, "Necessary for Printezis marks to work");
6188 return size;
6189 } else {
6190 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6191 return 0;
6192 }
6193 }
6195 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6196 size_t sz = 0;
6197 oop p = (oop)addr;
6198 if (p->klass_or_null() != NULL && p->is_parsable()) {
6199 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6200 } else {
6201 sz = block_size_using_printezis_bits(addr);
6202 }
6203 assert(sz > 0, "size must be nonzero");
6204 HeapWord* next_block = addr + sz;
6205 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6206 CardTableModRefBS::card_size);
6207 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6208 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6209 "must be different cards");
6210 return next_card;
6211 }
6214 // CMS Bit Map Wrapper /////////////////////////////////////////
6216 // Construct a CMS bit map infrastructure, but don't create the
6217 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6218 // further below.
6219 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6220 _bm(),
6221 _shifter(shifter),
6222 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6223 {
6224 _bmStartWord = 0;
6225 _bmWordSize = 0;
6226 }
6228 bool CMSBitMap::allocate(MemRegion mr) {
6229 _bmStartWord = mr.start();
6230 _bmWordSize = mr.word_size();
6231 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6232 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6233 if (!brs.is_reserved()) {
6234 warning("CMS bit map allocation failure");
6235 return false;
6236 }
6237 // For now we'll just commit all of the bit map up fromt.
6238 // Later on we'll try to be more parsimonious with swap.
6239 if (!_virtual_space.initialize(brs, brs.size())) {
6240 warning("CMS bit map backing store failure");
6241 return false;
6242 }
6243 assert(_virtual_space.committed_size() == brs.size(),
6244 "didn't reserve backing store for all of CMS bit map?");
6245 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6246 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6247 _bmWordSize, "inconsistency in bit map sizing");
6248 _bm.set_size(_bmWordSize >> _shifter);
6250 // bm.clear(); // can we rely on getting zero'd memory? verify below
6251 assert(isAllClear(),
6252 "Expected zero'd memory from ReservedSpace constructor");
6253 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6254 "consistency check");
6255 return true;
6256 }
6258 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6259 HeapWord *next_addr, *end_addr, *last_addr;
6260 assert_locked();
6261 assert(covers(mr), "out-of-range error");
6262 // XXX assert that start and end are appropriately aligned
6263 for (next_addr = mr.start(), end_addr = mr.end();
6264 next_addr < end_addr; next_addr = last_addr) {
6265 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6266 last_addr = dirty_region.end();
6267 if (!dirty_region.is_empty()) {
6268 cl->do_MemRegion(dirty_region);
6269 } else {
6270 assert(last_addr == end_addr, "program logic");
6271 return;
6272 }
6273 }
6274 }
6276 #ifndef PRODUCT
6277 void CMSBitMap::assert_locked() const {
6278 CMSLockVerifier::assert_locked(lock());
6279 }
6281 bool CMSBitMap::covers(MemRegion mr) const {
6282 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6283 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6284 "size inconsistency");
6285 return (mr.start() >= _bmStartWord) &&
6286 (mr.end() <= endWord());
6287 }
6289 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6290 return (start >= _bmStartWord && (start + size) <= endWord());
6291 }
6293 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6294 // verify that there are no 1 bits in the interval [left, right)
6295 FalseBitMapClosure falseBitMapClosure;
6296 iterate(&falseBitMapClosure, left, right);
6297 }
6299 void CMSBitMap::region_invariant(MemRegion mr)
6300 {
6301 assert_locked();
6302 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6303 assert(!mr.is_empty(), "unexpected empty region");
6304 assert(covers(mr), "mr should be covered by bit map");
6305 // convert address range into offset range
6306 size_t start_ofs = heapWordToOffset(mr.start());
6307 // Make sure that end() is appropriately aligned
6308 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6309 (1 << (_shifter+LogHeapWordSize))),
6310 "Misaligned mr.end()");
6311 size_t end_ofs = heapWordToOffset(mr.end());
6312 assert(end_ofs > start_ofs, "Should mark at least one bit");
6313 }
6315 #endif
6317 bool CMSMarkStack::allocate(size_t size) {
6318 // allocate a stack of the requisite depth
6319 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6320 size * sizeof(oop)));
6321 if (!rs.is_reserved()) {
6322 warning("CMSMarkStack allocation failure");
6323 return false;
6324 }
6325 if (!_virtual_space.initialize(rs, rs.size())) {
6326 warning("CMSMarkStack backing store failure");
6327 return false;
6328 }
6329 assert(_virtual_space.committed_size() == rs.size(),
6330 "didn't reserve backing store for all of CMS stack?");
6331 _base = (oop*)(_virtual_space.low());
6332 _index = 0;
6333 _capacity = size;
6334 NOT_PRODUCT(_max_depth = 0);
6335 return true;
6336 }
6338 // XXX FIX ME !!! In the MT case we come in here holding a
6339 // leaf lock. For printing we need to take a further lock
6340 // which has lower rank. We need to recallibrate the two
6341 // lock-ranks involved in order to be able to rpint the
6342 // messages below. (Or defer the printing to the caller.
6343 // For now we take the expedient path of just disabling the
6344 // messages for the problematic case.)
6345 void CMSMarkStack::expand() {
6346 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6347 if (_capacity == CMSMarkStackSizeMax) {
6348 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6349 // We print a warning message only once per CMS cycle.
6350 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6351 }
6352 return;
6353 }
6354 // Double capacity if possible
6355 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6356 // Do not give up existing stack until we have managed to
6357 // get the double capacity that we desired.
6358 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6359 new_capacity * sizeof(oop)));
6360 if (rs.is_reserved()) {
6361 // Release the backing store associated with old stack
6362 _virtual_space.release();
6363 // Reinitialize virtual space for new stack
6364 if (!_virtual_space.initialize(rs, rs.size())) {
6365 fatal("Not enough swap for expanded marking stack");
6366 }
6367 _base = (oop*)(_virtual_space.low());
6368 _index = 0;
6369 _capacity = new_capacity;
6370 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6371 // Failed to double capacity, continue;
6372 // we print a detail message only once per CMS cycle.
6373 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6374 SIZE_FORMAT"K",
6375 _capacity / K, new_capacity / K);
6376 }
6377 }
6380 // Closures
6381 // XXX: there seems to be a lot of code duplication here;
6382 // should refactor and consolidate common code.
6384 // This closure is used to mark refs into the CMS generation in
6385 // the CMS bit map. Called at the first checkpoint. This closure
6386 // assumes that we do not need to re-mark dirty cards; if the CMS
6387 // generation on which this is used is not an oldest (modulo perm gen)
6388 // generation then this will lose younger_gen cards!
6390 MarkRefsIntoClosure::MarkRefsIntoClosure(
6391 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
6392 _span(span),
6393 _bitMap(bitMap),
6394 _should_do_nmethods(should_do_nmethods)
6395 {
6396 assert(_ref_processor == NULL, "deliberately left NULL");
6397 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6398 }
6400 void MarkRefsIntoClosure::do_oop(oop obj) {
6401 // if p points into _span, then mark corresponding bit in _markBitMap
6402 assert(obj->is_oop(), "expected an oop");
6403 HeapWord* addr = (HeapWord*)obj;
6404 if (_span.contains(addr)) {
6405 // this should be made more efficient
6406 _bitMap->mark(addr);
6407 }
6408 }
6410 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6411 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6413 // A variant of the above, used for CMS marking verification.
6414 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6415 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6416 bool should_do_nmethods):
6417 _span(span),
6418 _verification_bm(verification_bm),
6419 _cms_bm(cms_bm),
6420 _should_do_nmethods(should_do_nmethods) {
6421 assert(_ref_processor == NULL, "deliberately left NULL");
6422 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6423 }
6425 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6426 // if p points into _span, then mark corresponding bit in _markBitMap
6427 assert(obj->is_oop(), "expected an oop");
6428 HeapWord* addr = (HeapWord*)obj;
6429 if (_span.contains(addr)) {
6430 _verification_bm->mark(addr);
6431 if (!_cms_bm->isMarked(addr)) {
6432 oop(addr)->print();
6433 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6434 fatal("... aborting");
6435 }
6436 }
6437 }
6439 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6440 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6442 //////////////////////////////////////////////////
6443 // MarkRefsIntoAndScanClosure
6444 //////////////////////////////////////////////////
6446 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6447 ReferenceProcessor* rp,
6448 CMSBitMap* bit_map,
6449 CMSBitMap* mod_union_table,
6450 CMSMarkStack* mark_stack,
6451 CMSMarkStack* revisit_stack,
6452 CMSCollector* collector,
6453 bool should_yield,
6454 bool concurrent_precleaning):
6455 _collector(collector),
6456 _span(span),
6457 _bit_map(bit_map),
6458 _mark_stack(mark_stack),
6459 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6460 mark_stack, revisit_stack, concurrent_precleaning),
6461 _yield(should_yield),
6462 _concurrent_precleaning(concurrent_precleaning),
6463 _freelistLock(NULL)
6464 {
6465 _ref_processor = rp;
6466 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6467 }
6469 // This closure is used to mark refs into the CMS generation at the
6470 // second (final) checkpoint, and to scan and transitively follow
6471 // the unmarked oops. It is also used during the concurrent precleaning
6472 // phase while scanning objects on dirty cards in the CMS generation.
6473 // The marks are made in the marking bit map and the marking stack is
6474 // used for keeping the (newly) grey objects during the scan.
6475 // The parallel version (Par_...) appears further below.
6476 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6477 if (obj != NULL) {
6478 assert(obj->is_oop(), "expected an oop");
6479 HeapWord* addr = (HeapWord*)obj;
6480 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6481 assert(_collector->overflow_list_is_empty(),
6482 "overflow list should be empty");
6483 if (_span.contains(addr) &&
6484 !_bit_map->isMarked(addr)) {
6485 // mark bit map (object is now grey)
6486 _bit_map->mark(addr);
6487 // push on marking stack (stack should be empty), and drain the
6488 // stack by applying this closure to the oops in the oops popped
6489 // from the stack (i.e. blacken the grey objects)
6490 bool res = _mark_stack->push(obj);
6491 assert(res, "Should have space to push on empty stack");
6492 do {
6493 oop new_oop = _mark_stack->pop();
6494 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6495 assert(new_oop->is_parsable(), "Found unparsable oop");
6496 assert(_bit_map->isMarked((HeapWord*)new_oop),
6497 "only grey objects on this stack");
6498 // iterate over the oops in this oop, marking and pushing
6499 // the ones in CMS heap (i.e. in _span).
6500 new_oop->oop_iterate(&_pushAndMarkClosure);
6501 // check if it's time to yield
6502 do_yield_check();
6503 } while (!_mark_stack->isEmpty() ||
6504 (!_concurrent_precleaning && take_from_overflow_list()));
6505 // if marking stack is empty, and we are not doing this
6506 // during precleaning, then check the overflow list
6507 }
6508 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6509 assert(_collector->overflow_list_is_empty(),
6510 "overflow list was drained above");
6511 // We could restore evacuated mark words, if any, used for
6512 // overflow list links here because the overflow list is
6513 // provably empty here. That would reduce the maximum
6514 // size requirements for preserved_{oop,mark}_stack.
6515 // But we'll just postpone it until we are all done
6516 // so we can just stream through.
6517 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6518 _collector->restore_preserved_marks_if_any();
6519 assert(_collector->no_preserved_marks(), "No preserved marks");
6520 }
6521 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6522 "All preserved marks should have been restored above");
6523 }
6524 }
6526 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6527 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6529 void MarkRefsIntoAndScanClosure::do_yield_work() {
6530 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6531 "CMS thread should hold CMS token");
6532 assert_lock_strong(_freelistLock);
6533 assert_lock_strong(_bit_map->lock());
6534 // relinquish the free_list_lock and bitMaplock()
6535 _bit_map->lock()->unlock();
6536 _freelistLock->unlock();
6537 ConcurrentMarkSweepThread::desynchronize(true);
6538 ConcurrentMarkSweepThread::acknowledge_yield_request();
6539 _collector->stopTimer();
6540 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6541 if (PrintCMSStatistics != 0) {
6542 _collector->incrementYields();
6543 }
6544 _collector->icms_wait();
6546 // See the comment in coordinator_yield()
6547 for (unsigned i = 0;
6548 i < CMSYieldSleepCount &&
6549 ConcurrentMarkSweepThread::should_yield() &&
6550 !CMSCollector::foregroundGCIsActive();
6551 ++i) {
6552 os::sleep(Thread::current(), 1, false);
6553 ConcurrentMarkSweepThread::acknowledge_yield_request();
6554 }
6556 ConcurrentMarkSweepThread::synchronize(true);
6557 _freelistLock->lock_without_safepoint_check();
6558 _bit_map->lock()->lock_without_safepoint_check();
6559 _collector->startTimer();
6560 }
6562 ///////////////////////////////////////////////////////////
6563 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6564 // MarkRefsIntoAndScanClosure
6565 ///////////////////////////////////////////////////////////
6566 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6567 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6568 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6569 _span(span),
6570 _bit_map(bit_map),
6571 _work_queue(work_queue),
6572 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6573 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6574 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6575 revisit_stack)
6576 {
6577 _ref_processor = rp;
6578 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6579 }
6581 // This closure is used to mark refs into the CMS generation at the
6582 // second (final) checkpoint, and to scan and transitively follow
6583 // the unmarked oops. The marks are made in the marking bit map and
6584 // the work_queue is used for keeping the (newly) grey objects during
6585 // the scan phase whence they are also available for stealing by parallel
6586 // threads. Since the marking bit map is shared, updates are
6587 // synchronized (via CAS).
6588 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6589 if (obj != NULL) {
6590 // Ignore mark word because this could be an already marked oop
6591 // that may be chained at the end of the overflow list.
6592 assert(obj->is_oop(true), "expected an oop");
6593 HeapWord* addr = (HeapWord*)obj;
6594 if (_span.contains(addr) &&
6595 !_bit_map->isMarked(addr)) {
6596 // mark bit map (object will become grey):
6597 // It is possible for several threads to be
6598 // trying to "claim" this object concurrently;
6599 // the unique thread that succeeds in marking the
6600 // object first will do the subsequent push on
6601 // to the work queue (or overflow list).
6602 if (_bit_map->par_mark(addr)) {
6603 // push on work_queue (which may not be empty), and trim the
6604 // queue to an appropriate length by applying this closure to
6605 // the oops in the oops popped from the stack (i.e. blacken the
6606 // grey objects)
6607 bool res = _work_queue->push(obj);
6608 assert(res, "Low water mark should be less than capacity?");
6609 trim_queue(_low_water_mark);
6610 } // Else, another thread claimed the object
6611 }
6612 }
6613 }
6615 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6616 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6618 // This closure is used to rescan the marked objects on the dirty cards
6619 // in the mod union table and the card table proper.
6620 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6621 oop p, MemRegion mr) {
6623 size_t size = 0;
6624 HeapWord* addr = (HeapWord*)p;
6625 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6626 assert(_span.contains(addr), "we are scanning the CMS generation");
6627 // check if it's time to yield
6628 if (do_yield_check()) {
6629 // We yielded for some foreground stop-world work,
6630 // and we have been asked to abort this ongoing preclean cycle.
6631 return 0;
6632 }
6633 if (_bitMap->isMarked(addr)) {
6634 // it's marked; is it potentially uninitialized?
6635 if (p->klass_or_null() != NULL) {
6636 // If is_conc_safe is false, the object may be undergoing
6637 // change by the VM outside a safepoint. Don't try to
6638 // scan it, but rather leave it for the remark phase.
6639 if (CMSPermGenPrecleaningEnabled &&
6640 (!p->is_conc_safe() || !p->is_parsable())) {
6641 // Signal precleaning to redirty the card since
6642 // the klass pointer is already installed.
6643 assert(size == 0, "Initial value");
6644 } else {
6645 assert(p->is_parsable(), "must be parsable.");
6646 // an initialized object; ignore mark word in verification below
6647 // since we are running concurrent with mutators
6648 assert(p->is_oop(true), "should be an oop");
6649 if (p->is_objArray()) {
6650 // objArrays are precisely marked; restrict scanning
6651 // to dirty cards only.
6652 size = CompactibleFreeListSpace::adjustObjectSize(
6653 p->oop_iterate(_scanningClosure, mr));
6654 } else {
6655 // A non-array may have been imprecisely marked; we need
6656 // to scan object in its entirety.
6657 size = CompactibleFreeListSpace::adjustObjectSize(
6658 p->oop_iterate(_scanningClosure));
6659 }
6660 #ifdef DEBUG
6661 size_t direct_size =
6662 CompactibleFreeListSpace::adjustObjectSize(p->size());
6663 assert(size == direct_size, "Inconsistency in size");
6664 assert(size >= 3, "Necessary for Printezis marks to work");
6665 if (!_bitMap->isMarked(addr+1)) {
6666 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6667 } else {
6668 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6669 assert(_bitMap->isMarked(addr+size-1),
6670 "inconsistent Printezis mark");
6671 }
6672 #endif // DEBUG
6673 }
6674 } else {
6675 // an unitialized object
6676 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6677 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6678 size = pointer_delta(nextOneAddr + 1, addr);
6679 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6680 "alignment problem");
6681 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6682 // will dirty the card when the klass pointer is installed in the
6683 // object (signalling the completion of initialization).
6684 }
6685 } else {
6686 // Either a not yet marked object or an uninitialized object
6687 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6688 // An uninitialized object, skip to the next card, since
6689 // we may not be able to read its P-bits yet.
6690 assert(size == 0, "Initial value");
6691 } else {
6692 // An object not (yet) reached by marking: we merely need to
6693 // compute its size so as to go look at the next block.
6694 assert(p->is_oop(true), "should be an oop");
6695 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6696 }
6697 }
6698 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6699 return size;
6700 }
6702 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6703 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6704 "CMS thread should hold CMS token");
6705 assert_lock_strong(_freelistLock);
6706 assert_lock_strong(_bitMap->lock());
6707 // relinquish the free_list_lock and bitMaplock()
6708 _bitMap->lock()->unlock();
6709 _freelistLock->unlock();
6710 ConcurrentMarkSweepThread::desynchronize(true);
6711 ConcurrentMarkSweepThread::acknowledge_yield_request();
6712 _collector->stopTimer();
6713 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6714 if (PrintCMSStatistics != 0) {
6715 _collector->incrementYields();
6716 }
6717 _collector->icms_wait();
6719 // See the comment in coordinator_yield()
6720 for (unsigned i = 0; i < CMSYieldSleepCount &&
6721 ConcurrentMarkSweepThread::should_yield() &&
6722 !CMSCollector::foregroundGCIsActive(); ++i) {
6723 os::sleep(Thread::current(), 1, false);
6724 ConcurrentMarkSweepThread::acknowledge_yield_request();
6725 }
6727 ConcurrentMarkSweepThread::synchronize(true);
6728 _freelistLock->lock_without_safepoint_check();
6729 _bitMap->lock()->lock_without_safepoint_check();
6730 _collector->startTimer();
6731 }
6734 //////////////////////////////////////////////////////////////////
6735 // SurvivorSpacePrecleanClosure
6736 //////////////////////////////////////////////////////////////////
6737 // This (single-threaded) closure is used to preclean the oops in
6738 // the survivor spaces.
6739 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6741 HeapWord* addr = (HeapWord*)p;
6742 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6743 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6744 assert(p->klass_or_null() != NULL, "object should be initializd");
6745 assert(p->is_parsable(), "must be parsable.");
6746 // an initialized object; ignore mark word in verification below
6747 // since we are running concurrent with mutators
6748 assert(p->is_oop(true), "should be an oop");
6749 // Note that we do not yield while we iterate over
6750 // the interior oops of p, pushing the relevant ones
6751 // on our marking stack.
6752 size_t size = p->oop_iterate(_scanning_closure);
6753 do_yield_check();
6754 // Observe that below, we do not abandon the preclean
6755 // phase as soon as we should; rather we empty the
6756 // marking stack before returning. This is to satisfy
6757 // some existing assertions. In general, it may be a
6758 // good idea to abort immediately and complete the marking
6759 // from the grey objects at a later time.
6760 while (!_mark_stack->isEmpty()) {
6761 oop new_oop = _mark_stack->pop();
6762 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6763 assert(new_oop->is_parsable(), "Found unparsable oop");
6764 assert(_bit_map->isMarked((HeapWord*)new_oop),
6765 "only grey objects on this stack");
6766 // iterate over the oops in this oop, marking and pushing
6767 // the ones in CMS heap (i.e. in _span).
6768 new_oop->oop_iterate(_scanning_closure);
6769 // check if it's time to yield
6770 do_yield_check();
6771 }
6772 unsigned int after_count =
6773 GenCollectedHeap::heap()->total_collections();
6774 bool abort = (_before_count != after_count) ||
6775 _collector->should_abort_preclean();
6776 return abort ? 0 : size;
6777 }
6779 void SurvivorSpacePrecleanClosure::do_yield_work() {
6780 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6781 "CMS thread should hold CMS token");
6782 assert_lock_strong(_bit_map->lock());
6783 // Relinquish the bit map lock
6784 _bit_map->lock()->unlock();
6785 ConcurrentMarkSweepThread::desynchronize(true);
6786 ConcurrentMarkSweepThread::acknowledge_yield_request();
6787 _collector->stopTimer();
6788 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6789 if (PrintCMSStatistics != 0) {
6790 _collector->incrementYields();
6791 }
6792 _collector->icms_wait();
6794 // See the comment in coordinator_yield()
6795 for (unsigned i = 0; i < CMSYieldSleepCount &&
6796 ConcurrentMarkSweepThread::should_yield() &&
6797 !CMSCollector::foregroundGCIsActive(); ++i) {
6798 os::sleep(Thread::current(), 1, false);
6799 ConcurrentMarkSweepThread::acknowledge_yield_request();
6800 }
6802 ConcurrentMarkSweepThread::synchronize(true);
6803 _bit_map->lock()->lock_without_safepoint_check();
6804 _collector->startTimer();
6805 }
6807 // This closure is used to rescan the marked objects on the dirty cards
6808 // in the mod union table and the card table proper. In the parallel
6809 // case, although the bitMap is shared, we do a single read so the
6810 // isMarked() query is "safe".
6811 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6812 // Ignore mark word because we are running concurrent with mutators
6813 assert(p->is_oop_or_null(true), "expected an oop or null");
6814 HeapWord* addr = (HeapWord*)p;
6815 assert(_span.contains(addr), "we are scanning the CMS generation");
6816 bool is_obj_array = false;
6817 #ifdef DEBUG
6818 if (!_parallel) {
6819 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6820 assert(_collector->overflow_list_is_empty(),
6821 "overflow list should be empty");
6823 }
6824 #endif // DEBUG
6825 if (_bit_map->isMarked(addr)) {
6826 // Obj arrays are precisely marked, non-arrays are not;
6827 // so we scan objArrays precisely and non-arrays in their
6828 // entirety.
6829 if (p->is_objArray()) {
6830 is_obj_array = true;
6831 if (_parallel) {
6832 p->oop_iterate(_par_scan_closure, mr);
6833 } else {
6834 p->oop_iterate(_scan_closure, mr);
6835 }
6836 } else {
6837 if (_parallel) {
6838 p->oop_iterate(_par_scan_closure);
6839 } else {
6840 p->oop_iterate(_scan_closure);
6841 }
6842 }
6843 }
6844 #ifdef DEBUG
6845 if (!_parallel) {
6846 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6847 assert(_collector->overflow_list_is_empty(),
6848 "overflow list should be empty");
6850 }
6851 #endif // DEBUG
6852 return is_obj_array;
6853 }
6855 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6856 MemRegion span,
6857 CMSBitMap* bitMap, CMSMarkStack* markStack,
6858 CMSMarkStack* revisitStack,
6859 bool should_yield, bool verifying):
6860 _collector(collector),
6861 _span(span),
6862 _bitMap(bitMap),
6863 _mut(&collector->_modUnionTable),
6864 _markStack(markStack),
6865 _revisitStack(revisitStack),
6866 _yield(should_yield),
6867 _skipBits(0)
6868 {
6869 assert(_markStack->isEmpty(), "stack should be empty");
6870 _finger = _bitMap->startWord();
6871 _threshold = _finger;
6872 assert(_collector->_restart_addr == NULL, "Sanity check");
6873 assert(_span.contains(_finger), "Out of bounds _finger?");
6874 DEBUG_ONLY(_verifying = verifying;)
6875 }
6877 void MarkFromRootsClosure::reset(HeapWord* addr) {
6878 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6879 assert(_span.contains(addr), "Out of bounds _finger?");
6880 _finger = addr;
6881 _threshold = (HeapWord*)round_to(
6882 (intptr_t)_finger, CardTableModRefBS::card_size);
6883 }
6885 // Should revisit to see if this should be restructured for
6886 // greater efficiency.
6887 bool MarkFromRootsClosure::do_bit(size_t offset) {
6888 if (_skipBits > 0) {
6889 _skipBits--;
6890 return true;
6891 }
6892 // convert offset into a HeapWord*
6893 HeapWord* addr = _bitMap->startWord() + offset;
6894 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6895 "address out of range");
6896 assert(_bitMap->isMarked(addr), "tautology");
6897 if (_bitMap->isMarked(addr+1)) {
6898 // this is an allocated but not yet initialized object
6899 assert(_skipBits == 0, "tautology");
6900 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6901 oop p = oop(addr);
6902 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6903 DEBUG_ONLY(if (!_verifying) {)
6904 // We re-dirty the cards on which this object lies and increase
6905 // the _threshold so that we'll come back to scan this object
6906 // during the preclean or remark phase. (CMSCleanOnEnter)
6907 if (CMSCleanOnEnter) {
6908 size_t sz = _collector->block_size_using_printezis_bits(addr);
6909 HeapWord* end_card_addr = (HeapWord*)round_to(
6910 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6911 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6912 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6913 // Bump _threshold to end_card_addr; note that
6914 // _threshold cannot possibly exceed end_card_addr, anyhow.
6915 // This prevents future clearing of the card as the scan proceeds
6916 // to the right.
6917 assert(_threshold <= end_card_addr,
6918 "Because we are just scanning into this object");
6919 if (_threshold < end_card_addr) {
6920 _threshold = end_card_addr;
6921 }
6922 if (p->klass_or_null() != NULL) {
6923 // Redirty the range of cards...
6924 _mut->mark_range(redirty_range);
6925 } // ...else the setting of klass will dirty the card anyway.
6926 }
6927 DEBUG_ONLY(})
6928 return true;
6929 }
6930 }
6931 scanOopsInOop(addr);
6932 return true;
6933 }
6935 // We take a break if we've been at this for a while,
6936 // so as to avoid monopolizing the locks involved.
6937 void MarkFromRootsClosure::do_yield_work() {
6938 // First give up the locks, then yield, then re-lock
6939 // We should probably use a constructor/destructor idiom to
6940 // do this unlock/lock or modify the MutexUnlocker class to
6941 // serve our purpose. XXX
6942 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6943 "CMS thread should hold CMS token");
6944 assert_lock_strong(_bitMap->lock());
6945 _bitMap->lock()->unlock();
6946 ConcurrentMarkSweepThread::desynchronize(true);
6947 ConcurrentMarkSweepThread::acknowledge_yield_request();
6948 _collector->stopTimer();
6949 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6950 if (PrintCMSStatistics != 0) {
6951 _collector->incrementYields();
6952 }
6953 _collector->icms_wait();
6955 // See the comment in coordinator_yield()
6956 for (unsigned i = 0; i < CMSYieldSleepCount &&
6957 ConcurrentMarkSweepThread::should_yield() &&
6958 !CMSCollector::foregroundGCIsActive(); ++i) {
6959 os::sleep(Thread::current(), 1, false);
6960 ConcurrentMarkSweepThread::acknowledge_yield_request();
6961 }
6963 ConcurrentMarkSweepThread::synchronize(true);
6964 _bitMap->lock()->lock_without_safepoint_check();
6965 _collector->startTimer();
6966 }
6968 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6969 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6970 assert(_markStack->isEmpty(),
6971 "should drain stack to limit stack usage");
6972 // convert ptr to an oop preparatory to scanning
6973 oop obj = oop(ptr);
6974 // Ignore mark word in verification below, since we
6975 // may be running concurrent with mutators.
6976 assert(obj->is_oop(true), "should be an oop");
6977 assert(_finger <= ptr, "_finger runneth ahead");
6978 // advance the finger to right end of this object
6979 _finger = ptr + obj->size();
6980 assert(_finger > ptr, "we just incremented it above");
6981 // On large heaps, it may take us some time to get through
6982 // the marking phase (especially if running iCMS). During
6983 // this time it's possible that a lot of mutations have
6984 // accumulated in the card table and the mod union table --
6985 // these mutation records are redundant until we have
6986 // actually traced into the corresponding card.
6987 // Here, we check whether advancing the finger would make
6988 // us cross into a new card, and if so clear corresponding
6989 // cards in the MUT (preclean them in the card-table in the
6990 // future).
6992 DEBUG_ONLY(if (!_verifying) {)
6993 // The clean-on-enter optimization is disabled by default,
6994 // until we fix 6178663.
6995 if (CMSCleanOnEnter && (_finger > _threshold)) {
6996 // [_threshold, _finger) represents the interval
6997 // of cards to be cleared in MUT (or precleaned in card table).
6998 // The set of cards to be cleared is all those that overlap
6999 // with the interval [_threshold, _finger); note that
7000 // _threshold is always kept card-aligned but _finger isn't
7001 // always card-aligned.
7002 HeapWord* old_threshold = _threshold;
7003 assert(old_threshold == (HeapWord*)round_to(
7004 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7005 "_threshold should always be card-aligned");
7006 _threshold = (HeapWord*)round_to(
7007 (intptr_t)_finger, CardTableModRefBS::card_size);
7008 MemRegion mr(old_threshold, _threshold);
7009 assert(!mr.is_empty(), "Control point invariant");
7010 assert(_span.contains(mr), "Should clear within span");
7011 // XXX When _finger crosses from old gen into perm gen
7012 // we may be doing unnecessary cleaning; do better in the
7013 // future by detecting that condition and clearing fewer
7014 // MUT/CT entries.
7015 _mut->clear_range(mr);
7016 }
7017 DEBUG_ONLY(})
7018 // Note: the finger doesn't advance while we drain
7019 // the stack below.
7020 PushOrMarkClosure pushOrMarkClosure(_collector,
7021 _span, _bitMap, _markStack,
7022 _revisitStack,
7023 _finger, this);
7024 bool res = _markStack->push(obj);
7025 assert(res, "Empty non-zero size stack should have space for single push");
7026 while (!_markStack->isEmpty()) {
7027 oop new_oop = _markStack->pop();
7028 // Skip verifying header mark word below because we are
7029 // running concurrent with mutators.
7030 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7031 // now scan this oop's oops
7032 new_oop->oop_iterate(&pushOrMarkClosure);
7033 do_yield_check();
7034 }
7035 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7036 }
7038 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7039 CMSCollector* collector, MemRegion span,
7040 CMSBitMap* bit_map,
7041 OopTaskQueue* work_queue,
7042 CMSMarkStack* overflow_stack,
7043 CMSMarkStack* revisit_stack,
7044 bool should_yield):
7045 _collector(collector),
7046 _whole_span(collector->_span),
7047 _span(span),
7048 _bit_map(bit_map),
7049 _mut(&collector->_modUnionTable),
7050 _work_queue(work_queue),
7051 _overflow_stack(overflow_stack),
7052 _revisit_stack(revisit_stack),
7053 _yield(should_yield),
7054 _skip_bits(0),
7055 _task(task)
7056 {
7057 assert(_work_queue->size() == 0, "work_queue should be empty");
7058 _finger = span.start();
7059 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7060 assert(_span.contains(_finger), "Out of bounds _finger?");
7061 }
7063 // Should revisit to see if this should be restructured for
7064 // greater efficiency.
7065 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7066 if (_skip_bits > 0) {
7067 _skip_bits--;
7068 return true;
7069 }
7070 // convert offset into a HeapWord*
7071 HeapWord* addr = _bit_map->startWord() + offset;
7072 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7073 "address out of range");
7074 assert(_bit_map->isMarked(addr), "tautology");
7075 if (_bit_map->isMarked(addr+1)) {
7076 // this is an allocated object that might not yet be initialized
7077 assert(_skip_bits == 0, "tautology");
7078 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7079 oop p = oop(addr);
7080 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7081 // in the case of Clean-on-Enter optimization, redirty card
7082 // and avoid clearing card by increasing the threshold.
7083 return true;
7084 }
7085 }
7086 scan_oops_in_oop(addr);
7087 return true;
7088 }
7090 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7091 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7092 // Should we assert that our work queue is empty or
7093 // below some drain limit?
7094 assert(_work_queue->size() == 0,
7095 "should drain stack to limit stack usage");
7096 // convert ptr to an oop preparatory to scanning
7097 oop obj = oop(ptr);
7098 // Ignore mark word in verification below, since we
7099 // may be running concurrent with mutators.
7100 assert(obj->is_oop(true), "should be an oop");
7101 assert(_finger <= ptr, "_finger runneth ahead");
7102 // advance the finger to right end of this object
7103 _finger = ptr + obj->size();
7104 assert(_finger > ptr, "we just incremented it above");
7105 // On large heaps, it may take us some time to get through
7106 // the marking phase (especially if running iCMS). During
7107 // this time it's possible that a lot of mutations have
7108 // accumulated in the card table and the mod union table --
7109 // these mutation records are redundant until we have
7110 // actually traced into the corresponding card.
7111 // Here, we check whether advancing the finger would make
7112 // us cross into a new card, and if so clear corresponding
7113 // cards in the MUT (preclean them in the card-table in the
7114 // future).
7116 // The clean-on-enter optimization is disabled by default,
7117 // until we fix 6178663.
7118 if (CMSCleanOnEnter && (_finger > _threshold)) {
7119 // [_threshold, _finger) represents the interval
7120 // of cards to be cleared in MUT (or precleaned in card table).
7121 // The set of cards to be cleared is all those that overlap
7122 // with the interval [_threshold, _finger); note that
7123 // _threshold is always kept card-aligned but _finger isn't
7124 // always card-aligned.
7125 HeapWord* old_threshold = _threshold;
7126 assert(old_threshold == (HeapWord*)round_to(
7127 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7128 "_threshold should always be card-aligned");
7129 _threshold = (HeapWord*)round_to(
7130 (intptr_t)_finger, CardTableModRefBS::card_size);
7131 MemRegion mr(old_threshold, _threshold);
7132 assert(!mr.is_empty(), "Control point invariant");
7133 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7134 // XXX When _finger crosses from old gen into perm gen
7135 // we may be doing unnecessary cleaning; do better in the
7136 // future by detecting that condition and clearing fewer
7137 // MUT/CT entries.
7138 _mut->clear_range(mr);
7139 }
7141 // Note: the local finger doesn't advance while we drain
7142 // the stack below, but the global finger sure can and will.
7143 HeapWord** gfa = _task->global_finger_addr();
7144 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7145 _span, _bit_map,
7146 _work_queue,
7147 _overflow_stack,
7148 _revisit_stack,
7149 _finger,
7150 gfa, this);
7151 bool res = _work_queue->push(obj); // overflow could occur here
7152 assert(res, "Will hold once we use workqueues");
7153 while (true) {
7154 oop new_oop;
7155 if (!_work_queue->pop_local(new_oop)) {
7156 // We emptied our work_queue; check if there's stuff that can
7157 // be gotten from the overflow stack.
7158 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7159 _overflow_stack, _work_queue)) {
7160 do_yield_check();
7161 continue;
7162 } else { // done
7163 break;
7164 }
7165 }
7166 // Skip verifying header mark word below because we are
7167 // running concurrent with mutators.
7168 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7169 // now scan this oop's oops
7170 new_oop->oop_iterate(&pushOrMarkClosure);
7171 do_yield_check();
7172 }
7173 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7174 }
7176 // Yield in response to a request from VM Thread or
7177 // from mutators.
7178 void Par_MarkFromRootsClosure::do_yield_work() {
7179 assert(_task != NULL, "sanity");
7180 _task->yield();
7181 }
7183 // A variant of the above used for verifying CMS marking work.
7184 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7185 MemRegion span,
7186 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7187 CMSMarkStack* mark_stack):
7188 _collector(collector),
7189 _span(span),
7190 _verification_bm(verification_bm),
7191 _cms_bm(cms_bm),
7192 _mark_stack(mark_stack),
7193 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7194 mark_stack)
7195 {
7196 assert(_mark_stack->isEmpty(), "stack should be empty");
7197 _finger = _verification_bm->startWord();
7198 assert(_collector->_restart_addr == NULL, "Sanity check");
7199 assert(_span.contains(_finger), "Out of bounds _finger?");
7200 }
7202 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7203 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7204 assert(_span.contains(addr), "Out of bounds _finger?");
7205 _finger = addr;
7206 }
7208 // Should revisit to see if this should be restructured for
7209 // greater efficiency.
7210 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7211 // convert offset into a HeapWord*
7212 HeapWord* addr = _verification_bm->startWord() + offset;
7213 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7214 "address out of range");
7215 assert(_verification_bm->isMarked(addr), "tautology");
7216 assert(_cms_bm->isMarked(addr), "tautology");
7218 assert(_mark_stack->isEmpty(),
7219 "should drain stack to limit stack usage");
7220 // convert addr to an oop preparatory to scanning
7221 oop obj = oop(addr);
7222 assert(obj->is_oop(), "should be an oop");
7223 assert(_finger <= addr, "_finger runneth ahead");
7224 // advance the finger to right end of this object
7225 _finger = addr + obj->size();
7226 assert(_finger > addr, "we just incremented it above");
7227 // Note: the finger doesn't advance while we drain
7228 // the stack below.
7229 bool res = _mark_stack->push(obj);
7230 assert(res, "Empty non-zero size stack should have space for single push");
7231 while (!_mark_stack->isEmpty()) {
7232 oop new_oop = _mark_stack->pop();
7233 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7234 // now scan this oop's oops
7235 new_oop->oop_iterate(&_pam_verify_closure);
7236 }
7237 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7238 return true;
7239 }
7241 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7242 CMSCollector* collector, MemRegion span,
7243 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7244 CMSMarkStack* mark_stack):
7245 OopClosure(collector->ref_processor()),
7246 _collector(collector),
7247 _span(span),
7248 _verification_bm(verification_bm),
7249 _cms_bm(cms_bm),
7250 _mark_stack(mark_stack)
7251 { }
7253 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7254 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7256 // Upon stack overflow, we discard (part of) the stack,
7257 // remembering the least address amongst those discarded
7258 // in CMSCollector's _restart_address.
7259 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7260 // Remember the least grey address discarded
7261 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7262 _collector->lower_restart_addr(ra);
7263 _mark_stack->reset(); // discard stack contents
7264 _mark_stack->expand(); // expand the stack if possible
7265 }
7267 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7268 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7269 HeapWord* addr = (HeapWord*)obj;
7270 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7271 // Oop lies in _span and isn't yet grey or black
7272 _verification_bm->mark(addr); // now grey
7273 if (!_cms_bm->isMarked(addr)) {
7274 oop(addr)->print();
7275 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7276 addr);
7277 fatal("... aborting");
7278 }
7280 if (!_mark_stack->push(obj)) { // stack overflow
7281 if (PrintCMSStatistics != 0) {
7282 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7283 SIZE_FORMAT, _mark_stack->capacity());
7284 }
7285 assert(_mark_stack->isFull(), "Else push should have succeeded");
7286 handle_stack_overflow(addr);
7287 }
7288 // anything including and to the right of _finger
7289 // will be scanned as we iterate over the remainder of the
7290 // bit map
7291 }
7292 }
7294 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7295 MemRegion span,
7296 CMSBitMap* bitMap, CMSMarkStack* markStack,
7297 CMSMarkStack* revisitStack,
7298 HeapWord* finger, MarkFromRootsClosure* parent) :
7299 OopClosure(collector->ref_processor()),
7300 _collector(collector),
7301 _span(span),
7302 _bitMap(bitMap),
7303 _markStack(markStack),
7304 _revisitStack(revisitStack),
7305 _finger(finger),
7306 _parent(parent),
7307 _should_remember_klasses(collector->should_unload_classes())
7308 { }
7310 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7311 MemRegion span,
7312 CMSBitMap* bit_map,
7313 OopTaskQueue* work_queue,
7314 CMSMarkStack* overflow_stack,
7315 CMSMarkStack* revisit_stack,
7316 HeapWord* finger,
7317 HeapWord** global_finger_addr,
7318 Par_MarkFromRootsClosure* parent) :
7319 OopClosure(collector->ref_processor()),
7320 _collector(collector),
7321 _whole_span(collector->_span),
7322 _span(span),
7323 _bit_map(bit_map),
7324 _work_queue(work_queue),
7325 _overflow_stack(overflow_stack),
7326 _revisit_stack(revisit_stack),
7327 _finger(finger),
7328 _global_finger_addr(global_finger_addr),
7329 _parent(parent),
7330 _should_remember_klasses(collector->should_unload_classes())
7331 { }
7333 // Assumes thread-safe access by callers, who are
7334 // responsible for mutual exclusion.
7335 void CMSCollector::lower_restart_addr(HeapWord* low) {
7336 assert(_span.contains(low), "Out of bounds addr");
7337 if (_restart_addr == NULL) {
7338 _restart_addr = low;
7339 } else {
7340 _restart_addr = MIN2(_restart_addr, low);
7341 }
7342 }
7344 // Upon stack overflow, we discard (part of) the stack,
7345 // remembering the least address amongst those discarded
7346 // in CMSCollector's _restart_address.
7347 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7348 // Remember the least grey address discarded
7349 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7350 _collector->lower_restart_addr(ra);
7351 _markStack->reset(); // discard stack contents
7352 _markStack->expand(); // expand the stack if possible
7353 }
7355 // Upon stack overflow, we discard (part of) the stack,
7356 // remembering the least address amongst those discarded
7357 // in CMSCollector's _restart_address.
7358 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7359 // We need to do this under a mutex to prevent other
7360 // workers from interfering with the work done below.
7361 MutexLockerEx ml(_overflow_stack->par_lock(),
7362 Mutex::_no_safepoint_check_flag);
7363 // Remember the least grey address discarded
7364 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7365 _collector->lower_restart_addr(ra);
7366 _overflow_stack->reset(); // discard stack contents
7367 _overflow_stack->expand(); // expand the stack if possible
7368 }
7370 void PushOrMarkClosure::do_oop(oop obj) {
7371 // Ignore mark word because we are running concurrent with mutators.
7372 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7373 HeapWord* addr = (HeapWord*)obj;
7374 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7375 // Oop lies in _span and isn't yet grey or black
7376 _bitMap->mark(addr); // now grey
7377 if (addr < _finger) {
7378 // the bit map iteration has already either passed, or
7379 // sampled, this bit in the bit map; we'll need to
7380 // use the marking stack to scan this oop's oops.
7381 bool simulate_overflow = false;
7382 NOT_PRODUCT(
7383 if (CMSMarkStackOverflowALot &&
7384 _collector->simulate_overflow()) {
7385 // simulate a stack overflow
7386 simulate_overflow = true;
7387 }
7388 )
7389 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7390 if (PrintCMSStatistics != 0) {
7391 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7392 SIZE_FORMAT, _markStack->capacity());
7393 }
7394 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7395 handle_stack_overflow(addr);
7396 }
7397 }
7398 // anything including and to the right of _finger
7399 // will be scanned as we iterate over the remainder of the
7400 // bit map
7401 do_yield_check();
7402 }
7403 }
7405 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7406 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7408 void Par_PushOrMarkClosure::do_oop(oop obj) {
7409 // Ignore mark word because we are running concurrent with mutators.
7410 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7411 HeapWord* addr = (HeapWord*)obj;
7412 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7413 // Oop lies in _span and isn't yet grey or black
7414 // We read the global_finger (volatile read) strictly after marking oop
7415 bool res = _bit_map->par_mark(addr); // now grey
7416 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7417 // Should we push this marked oop on our stack?
7418 // -- if someone else marked it, nothing to do
7419 // -- if target oop is above global finger nothing to do
7420 // -- if target oop is in chunk and above local finger
7421 // then nothing to do
7422 // -- else push on work queue
7423 if ( !res // someone else marked it, they will deal with it
7424 || (addr >= *gfa) // will be scanned in a later task
7425 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7426 return;
7427 }
7428 // the bit map iteration has already either passed, or
7429 // sampled, this bit in the bit map; we'll need to
7430 // use the marking stack to scan this oop's oops.
7431 bool simulate_overflow = false;
7432 NOT_PRODUCT(
7433 if (CMSMarkStackOverflowALot &&
7434 _collector->simulate_overflow()) {
7435 // simulate a stack overflow
7436 simulate_overflow = true;
7437 }
7438 )
7439 if (simulate_overflow ||
7440 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7441 // stack overflow
7442 if (PrintCMSStatistics != 0) {
7443 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7444 SIZE_FORMAT, _overflow_stack->capacity());
7445 }
7446 // We cannot assert that the overflow stack is full because
7447 // it may have been emptied since.
7448 assert(simulate_overflow ||
7449 _work_queue->size() == _work_queue->max_elems(),
7450 "Else push should have succeeded");
7451 handle_stack_overflow(addr);
7452 }
7453 do_yield_check();
7454 }
7455 }
7457 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7458 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7460 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7461 MemRegion span,
7462 ReferenceProcessor* rp,
7463 CMSBitMap* bit_map,
7464 CMSBitMap* mod_union_table,
7465 CMSMarkStack* mark_stack,
7466 CMSMarkStack* revisit_stack,
7467 bool concurrent_precleaning):
7468 OopClosure(rp),
7469 _collector(collector),
7470 _span(span),
7471 _bit_map(bit_map),
7472 _mod_union_table(mod_union_table),
7473 _mark_stack(mark_stack),
7474 _revisit_stack(revisit_stack),
7475 _concurrent_precleaning(concurrent_precleaning),
7476 _should_remember_klasses(collector->should_unload_classes())
7477 {
7478 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7479 }
7481 // Grey object rescan during pre-cleaning and second checkpoint phases --
7482 // the non-parallel version (the parallel version appears further below.)
7483 void PushAndMarkClosure::do_oop(oop obj) {
7484 // Ignore mark word verification. If during concurrent precleaning,
7485 // the object monitor may be locked. If during the checkpoint
7486 // phases, the object may already have been reached by a different
7487 // path and may be at the end of the global overflow list (so
7488 // the mark word may be NULL).
7489 assert(obj->is_oop_or_null(true /* ignore mark word */),
7490 "expected an oop or NULL");
7491 HeapWord* addr = (HeapWord*)obj;
7492 // Check if oop points into the CMS generation
7493 // and is not marked
7494 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7495 // a white object ...
7496 _bit_map->mark(addr); // ... now grey
7497 // push on the marking stack (grey set)
7498 bool simulate_overflow = false;
7499 NOT_PRODUCT(
7500 if (CMSMarkStackOverflowALot &&
7501 _collector->simulate_overflow()) {
7502 // simulate a stack overflow
7503 simulate_overflow = true;
7504 }
7505 )
7506 if (simulate_overflow || !_mark_stack->push(obj)) {
7507 if (_concurrent_precleaning) {
7508 // During precleaning we can just dirty the appropriate card(s)
7509 // in the mod union table, thus ensuring that the object remains
7510 // in the grey set and continue. In the case of object arrays
7511 // we need to dirty all of the cards that the object spans,
7512 // since the rescan of object arrays will be limited to the
7513 // dirty cards.
7514 // Note that no one can be intefering with us in this action
7515 // of dirtying the mod union table, so no locking or atomics
7516 // are required.
7517 if (obj->is_objArray()) {
7518 size_t sz = obj->size();
7519 HeapWord* end_card_addr = (HeapWord*)round_to(
7520 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7521 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7522 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7523 _mod_union_table->mark_range(redirty_range);
7524 } else {
7525 _mod_union_table->mark(addr);
7526 }
7527 _collector->_ser_pmc_preclean_ovflw++;
7528 } else {
7529 // During the remark phase, we need to remember this oop
7530 // in the overflow list.
7531 _collector->push_on_overflow_list(obj);
7532 _collector->_ser_pmc_remark_ovflw++;
7533 }
7534 }
7535 }
7536 }
7538 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7539 MemRegion span,
7540 ReferenceProcessor* rp,
7541 CMSBitMap* bit_map,
7542 OopTaskQueue* work_queue,
7543 CMSMarkStack* revisit_stack):
7544 OopClosure(rp),
7545 _collector(collector),
7546 _span(span),
7547 _bit_map(bit_map),
7548 _work_queue(work_queue),
7549 _revisit_stack(revisit_stack),
7550 _should_remember_klasses(collector->should_unload_classes())
7551 {
7552 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7553 }
7555 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7556 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7558 // Grey object rescan during second checkpoint phase --
7559 // the parallel version.
7560 void Par_PushAndMarkClosure::do_oop(oop obj) {
7561 // In the assert below, we ignore the mark word because
7562 // this oop may point to an already visited object that is
7563 // on the overflow stack (in which case the mark word has
7564 // been hijacked for chaining into the overflow stack --
7565 // if this is the last object in the overflow stack then
7566 // its mark word will be NULL). Because this object may
7567 // have been subsequently popped off the global overflow
7568 // stack, and the mark word possibly restored to the prototypical
7569 // value, by the time we get to examined this failing assert in
7570 // the debugger, is_oop_or_null(false) may subsequently start
7571 // to hold.
7572 assert(obj->is_oop_or_null(true),
7573 "expected an oop or NULL");
7574 HeapWord* addr = (HeapWord*)obj;
7575 // Check if oop points into the CMS generation
7576 // and is not marked
7577 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7578 // a white object ...
7579 // If we manage to "claim" the object, by being the
7580 // first thread to mark it, then we push it on our
7581 // marking stack
7582 if (_bit_map->par_mark(addr)) { // ... now grey
7583 // push on work queue (grey set)
7584 bool simulate_overflow = false;
7585 NOT_PRODUCT(
7586 if (CMSMarkStackOverflowALot &&
7587 _collector->par_simulate_overflow()) {
7588 // simulate a stack overflow
7589 simulate_overflow = true;
7590 }
7591 )
7592 if (simulate_overflow || !_work_queue->push(obj)) {
7593 _collector->par_push_on_overflow_list(obj);
7594 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7595 }
7596 } // Else, some other thread got there first
7597 }
7598 }
7600 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7601 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7603 void PushAndMarkClosure::remember_klass(Klass* k) {
7604 if (!_revisit_stack->push(oop(k))) {
7605 fatal("Revisit stack overflowed in PushAndMarkClosure");
7606 }
7607 }
7609 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7610 if (!_revisit_stack->par_push(oop(k))) {
7611 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7612 }
7613 }
7615 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7616 Mutex* bml = _collector->bitMapLock();
7617 assert_lock_strong(bml);
7618 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7619 "CMS thread should hold CMS token");
7621 bml->unlock();
7622 ConcurrentMarkSweepThread::desynchronize(true);
7624 ConcurrentMarkSweepThread::acknowledge_yield_request();
7626 _collector->stopTimer();
7627 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7628 if (PrintCMSStatistics != 0) {
7629 _collector->incrementYields();
7630 }
7631 _collector->icms_wait();
7633 // See the comment in coordinator_yield()
7634 for (unsigned i = 0; i < CMSYieldSleepCount &&
7635 ConcurrentMarkSweepThread::should_yield() &&
7636 !CMSCollector::foregroundGCIsActive(); ++i) {
7637 os::sleep(Thread::current(), 1, false);
7638 ConcurrentMarkSweepThread::acknowledge_yield_request();
7639 }
7641 ConcurrentMarkSweepThread::synchronize(true);
7642 bml->lock();
7644 _collector->startTimer();
7645 }
7647 bool CMSPrecleanRefsYieldClosure::should_return() {
7648 if (ConcurrentMarkSweepThread::should_yield()) {
7649 do_yield_work();
7650 }
7651 return _collector->foregroundGCIsActive();
7652 }
7654 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7655 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7656 "mr should be aligned to start at a card boundary");
7657 // We'd like to assert:
7658 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7659 // "mr should be a range of cards");
7660 // However, that would be too strong in one case -- the last
7661 // partition ends at _unallocated_block which, in general, can be
7662 // an arbitrary boundary, not necessarily card aligned.
7663 if (PrintCMSStatistics != 0) {
7664 _num_dirty_cards +=
7665 mr.word_size()/CardTableModRefBS::card_size_in_words;
7666 }
7667 _space->object_iterate_mem(mr, &_scan_cl);
7668 }
7670 SweepClosure::SweepClosure(CMSCollector* collector,
7671 ConcurrentMarkSweepGeneration* g,
7672 CMSBitMap* bitMap, bool should_yield) :
7673 _collector(collector),
7674 _g(g),
7675 _sp(g->cmsSpace()),
7676 _limit(_sp->sweep_limit()),
7677 _freelistLock(_sp->freelistLock()),
7678 _bitMap(bitMap),
7679 _yield(should_yield),
7680 _inFreeRange(false), // No free range at beginning of sweep
7681 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7682 _lastFreeRangeCoalesced(false),
7683 _freeFinger(g->used_region().start())
7684 {
7685 NOT_PRODUCT(
7686 _numObjectsFreed = 0;
7687 _numWordsFreed = 0;
7688 _numObjectsLive = 0;
7689 _numWordsLive = 0;
7690 _numObjectsAlreadyFree = 0;
7691 _numWordsAlreadyFree = 0;
7692 _last_fc = NULL;
7694 _sp->initializeIndexedFreeListArrayReturnedBytes();
7695 _sp->dictionary()->initializeDictReturnedBytes();
7696 )
7697 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7698 "sweep _limit out of bounds");
7699 if (CMSTraceSweeper) {
7700 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7701 }
7702 }
7704 // We need this destructor to reclaim any space at the end
7705 // of the space, which do_blk below may not have added back to
7706 // the free lists. [basically dealing with the "fringe effect"]
7707 SweepClosure::~SweepClosure() {
7708 assert_lock_strong(_freelistLock);
7709 // this should be treated as the end of a free run if any
7710 // The current free range should be returned to the free lists
7711 // as one coalesced chunk.
7712 if (inFreeRange()) {
7713 flushCurFreeChunk(freeFinger(),
7714 pointer_delta(_limit, freeFinger()));
7715 assert(freeFinger() < _limit, "the finger pointeth off base");
7716 if (CMSTraceSweeper) {
7717 gclog_or_tty->print("destructor:");
7718 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7719 "[coalesced:"SIZE_FORMAT"]\n",
7720 freeFinger(), pointer_delta(_limit, freeFinger()),
7721 lastFreeRangeCoalesced());
7722 }
7723 }
7724 NOT_PRODUCT(
7725 if (Verbose && PrintGC) {
7726 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7727 SIZE_FORMAT " bytes",
7728 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7729 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7730 SIZE_FORMAT" bytes "
7731 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7732 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7733 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7734 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7735 sizeof(HeapWord);
7736 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7738 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7739 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7740 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7741 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7742 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7743 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7744 indexListReturnedBytes);
7745 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7746 dictReturnedBytes);
7747 }
7748 }
7749 )
7750 // Now, in debug mode, just null out the sweep_limit
7751 NOT_PRODUCT(_sp->clear_sweep_limit();)
7752 if (CMSTraceSweeper) {
7753 gclog_or_tty->print("end of sweep\n================\n");
7754 }
7755 }
7757 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7758 bool freeRangeInFreeLists) {
7759 if (CMSTraceSweeper) {
7760 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7761 freeFinger, _sp->block_size(freeFinger),
7762 freeRangeInFreeLists);
7763 }
7764 assert(!inFreeRange(), "Trampling existing free range");
7765 set_inFreeRange(true);
7766 set_lastFreeRangeCoalesced(false);
7768 set_freeFinger(freeFinger);
7769 set_freeRangeInFreeLists(freeRangeInFreeLists);
7770 if (CMSTestInFreeList) {
7771 if (freeRangeInFreeLists) {
7772 FreeChunk* fc = (FreeChunk*) freeFinger;
7773 assert(fc->isFree(), "A chunk on the free list should be free.");
7774 assert(fc->size() > 0, "Free range should have a size");
7775 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7776 }
7777 }
7778 }
7780 // Note that the sweeper runs concurrently with mutators. Thus,
7781 // it is possible for direct allocation in this generation to happen
7782 // in the middle of the sweep. Note that the sweeper also coalesces
7783 // contiguous free blocks. Thus, unless the sweeper and the allocator
7784 // synchronize appropriately freshly allocated blocks may get swept up.
7785 // This is accomplished by the sweeper locking the free lists while
7786 // it is sweeping. Thus blocks that are determined to be free are
7787 // indeed free. There is however one additional complication:
7788 // blocks that have been allocated since the final checkpoint and
7789 // mark, will not have been marked and so would be treated as
7790 // unreachable and swept up. To prevent this, the allocator marks
7791 // the bit map when allocating during the sweep phase. This leads,
7792 // however, to a further complication -- objects may have been allocated
7793 // but not yet initialized -- in the sense that the header isn't yet
7794 // installed. The sweeper can not then determine the size of the block
7795 // in order to skip over it. To deal with this case, we use a technique
7796 // (due to Printezis) to encode such uninitialized block sizes in the
7797 // bit map. Since the bit map uses a bit per every HeapWord, but the
7798 // CMS generation has a minimum object size of 3 HeapWords, it follows
7799 // that "normal marks" won't be adjacent in the bit map (there will
7800 // always be at least two 0 bits between successive 1 bits). We make use
7801 // of these "unused" bits to represent uninitialized blocks -- the bit
7802 // corresponding to the start of the uninitialized object and the next
7803 // bit are both set. Finally, a 1 bit marks the end of the object that
7804 // started with the two consecutive 1 bits to indicate its potentially
7805 // uninitialized state.
7807 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7808 FreeChunk* fc = (FreeChunk*)addr;
7809 size_t res;
7811 // check if we are done sweepinrg
7812 if (addr == _limit) { // we have swept up to the limit, do nothing more
7813 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7814 "sweep _limit out of bounds");
7815 // help the closure application finish
7816 return pointer_delta(_sp->end(), _limit);
7817 }
7818 assert(addr <= _limit, "sweep invariant");
7820 // check if we should yield
7821 do_yield_check(addr);
7822 if (fc->isFree()) {
7823 // Chunk that is already free
7824 res = fc->size();
7825 doAlreadyFreeChunk(fc);
7826 debug_only(_sp->verifyFreeLists());
7827 assert(res == fc->size(), "Don't expect the size to change");
7828 NOT_PRODUCT(
7829 _numObjectsAlreadyFree++;
7830 _numWordsAlreadyFree += res;
7831 )
7832 NOT_PRODUCT(_last_fc = fc;)
7833 } else if (!_bitMap->isMarked(addr)) {
7834 // Chunk is fresh garbage
7835 res = doGarbageChunk(fc);
7836 debug_only(_sp->verifyFreeLists());
7837 NOT_PRODUCT(
7838 _numObjectsFreed++;
7839 _numWordsFreed += res;
7840 )
7841 } else {
7842 // Chunk that is alive.
7843 res = doLiveChunk(fc);
7844 debug_only(_sp->verifyFreeLists());
7845 NOT_PRODUCT(
7846 _numObjectsLive++;
7847 _numWordsLive += res;
7848 )
7849 }
7850 return res;
7851 }
7853 // For the smart allocation, record following
7854 // split deaths - a free chunk is removed from its free list because
7855 // it is being split into two or more chunks.
7856 // split birth - a free chunk is being added to its free list because
7857 // a larger free chunk has been split and resulted in this free chunk.
7858 // coal death - a free chunk is being removed from its free list because
7859 // it is being coalesced into a large free chunk.
7860 // coal birth - a free chunk is being added to its free list because
7861 // it was created when two or more free chunks where coalesced into
7862 // this free chunk.
7863 //
7864 // These statistics are used to determine the desired number of free
7865 // chunks of a given size. The desired number is chosen to be relative
7866 // to the end of a CMS sweep. The desired number at the end of a sweep
7867 // is the
7868 // count-at-end-of-previous-sweep (an amount that was enough)
7869 // - count-at-beginning-of-current-sweep (the excess)
7870 // + split-births (gains in this size during interval)
7871 // - split-deaths (demands on this size during interval)
7872 // where the interval is from the end of one sweep to the end of the
7873 // next.
7874 //
7875 // When sweeping the sweeper maintains an accumulated chunk which is
7876 // the chunk that is made up of chunks that have been coalesced. That
7877 // will be termed the left-hand chunk. A new chunk of garbage that
7878 // is being considered for coalescing will be referred to as the
7879 // right-hand chunk.
7880 //
7881 // When making a decision on whether to coalesce a right-hand chunk with
7882 // the current left-hand chunk, the current count vs. the desired count
7883 // of the left-hand chunk is considered. Also if the right-hand chunk
7884 // is near the large chunk at the end of the heap (see
7885 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7886 // left-hand chunk is coalesced.
7887 //
7888 // When making a decision about whether to split a chunk, the desired count
7889 // vs. the current count of the candidate to be split is also considered.
7890 // If the candidate is underpopulated (currently fewer chunks than desired)
7891 // a chunk of an overpopulated (currently more chunks than desired) size may
7892 // be chosen. The "hint" associated with a free list, if non-null, points
7893 // to a free list which may be overpopulated.
7894 //
7896 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7897 size_t size = fc->size();
7898 // Chunks that cannot be coalesced are not in the
7899 // free lists.
7900 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7901 assert(_sp->verifyChunkInFreeLists(fc),
7902 "free chunk should be in free lists");
7903 }
7904 // a chunk that is already free, should not have been
7905 // marked in the bit map
7906 HeapWord* addr = (HeapWord*) fc;
7907 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7908 // Verify that the bit map has no bits marked between
7909 // addr and purported end of this block.
7910 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7912 // Some chunks cannot be coalesced in under any circumstances.
7913 // See the definition of cantCoalesce().
7914 if (!fc->cantCoalesce()) {
7915 // This chunk can potentially be coalesced.
7916 if (_sp->adaptive_freelists()) {
7917 // All the work is done in
7918 doPostIsFreeOrGarbageChunk(fc, size);
7919 } else { // Not adaptive free lists
7920 // this is a free chunk that can potentially be coalesced by the sweeper;
7921 if (!inFreeRange()) {
7922 // if the next chunk is a free block that can't be coalesced
7923 // it doesn't make sense to remove this chunk from the free lists
7924 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7925 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7926 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7927 nextChunk->isFree() && // which is free...
7928 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7929 // nothing to do
7930 } else {
7931 // Potentially the start of a new free range:
7932 // Don't eagerly remove it from the free lists.
7933 // No need to remove it if it will just be put
7934 // back again. (Also from a pragmatic point of view
7935 // if it is a free block in a region that is beyond
7936 // any allocated blocks, an assertion will fail)
7937 // Remember the start of a free run.
7938 initialize_free_range(addr, true);
7939 // end - can coalesce with next chunk
7940 }
7941 } else {
7942 // the midst of a free range, we are coalescing
7943 debug_only(record_free_block_coalesced(fc);)
7944 if (CMSTraceSweeper) {
7945 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7946 }
7947 // remove it from the free lists
7948 _sp->removeFreeChunkFromFreeLists(fc);
7949 set_lastFreeRangeCoalesced(true);
7950 // If the chunk is being coalesced and the current free range is
7951 // in the free lists, remove the current free range so that it
7952 // will be returned to the free lists in its entirety - all
7953 // the coalesced pieces included.
7954 if (freeRangeInFreeLists()) {
7955 FreeChunk* ffc = (FreeChunk*) freeFinger();
7956 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7957 "Size of free range is inconsistent with chunk size.");
7958 if (CMSTestInFreeList) {
7959 assert(_sp->verifyChunkInFreeLists(ffc),
7960 "free range is not in free lists");
7961 }
7962 _sp->removeFreeChunkFromFreeLists(ffc);
7963 set_freeRangeInFreeLists(false);
7964 }
7965 }
7966 }
7967 } else {
7968 // Code path common to both original and adaptive free lists.
7970 // cant coalesce with previous block; this should be treated
7971 // as the end of a free run if any
7972 if (inFreeRange()) {
7973 // we kicked some butt; time to pick up the garbage
7974 assert(freeFinger() < addr, "the finger pointeth off base");
7975 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
7976 }
7977 // else, nothing to do, just continue
7978 }
7979 }
7981 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
7982 // This is a chunk of garbage. It is not in any free list.
7983 // Add it to a free list or let it possibly be coalesced into
7984 // a larger chunk.
7985 HeapWord* addr = (HeapWord*) fc;
7986 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7988 if (_sp->adaptive_freelists()) {
7989 // Verify that the bit map has no bits marked between
7990 // addr and purported end of just dead object.
7991 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7993 doPostIsFreeOrGarbageChunk(fc, size);
7994 } else {
7995 if (!inFreeRange()) {
7996 // start of a new free range
7997 assert(size > 0, "A free range should have a size");
7998 initialize_free_range(addr, false);
8000 } else {
8001 // this will be swept up when we hit the end of the
8002 // free range
8003 if (CMSTraceSweeper) {
8004 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8005 }
8006 // If the chunk is being coalesced and the current free range is
8007 // in the free lists, remove the current free range so that it
8008 // will be returned to the free lists in its entirety - all
8009 // the coalesced pieces included.
8010 if (freeRangeInFreeLists()) {
8011 FreeChunk* ffc = (FreeChunk*)freeFinger();
8012 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8013 "Size of free range is inconsistent with chunk size.");
8014 if (CMSTestInFreeList) {
8015 assert(_sp->verifyChunkInFreeLists(ffc),
8016 "free range is not in free lists");
8017 }
8018 _sp->removeFreeChunkFromFreeLists(ffc);
8019 set_freeRangeInFreeLists(false);
8020 }
8021 set_lastFreeRangeCoalesced(true);
8022 }
8023 // this will be swept up when we hit the end of the free range
8025 // Verify that the bit map has no bits marked between
8026 // addr and purported end of just dead object.
8027 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8028 }
8029 return size;
8030 }
8032 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8033 HeapWord* addr = (HeapWord*) fc;
8034 // The sweeper has just found a live object. Return any accumulated
8035 // left hand chunk to the free lists.
8036 if (inFreeRange()) {
8037 if (_sp->adaptive_freelists()) {
8038 flushCurFreeChunk(freeFinger(),
8039 pointer_delta(addr, freeFinger()));
8040 } else { // not adaptive freelists
8041 set_inFreeRange(false);
8042 // Add the free range back to the free list if it is not already
8043 // there.
8044 if (!freeRangeInFreeLists()) {
8045 assert(freeFinger() < addr, "the finger pointeth off base");
8046 if (CMSTraceSweeper) {
8047 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8048 "[coalesced:%d]\n",
8049 freeFinger(), pointer_delta(addr, freeFinger()),
8050 lastFreeRangeCoalesced());
8051 }
8052 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8053 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8054 }
8055 }
8056 }
8058 // Common code path for original and adaptive free lists.
8060 // this object is live: we'd normally expect this to be
8061 // an oop, and like to assert the following:
8062 // assert(oop(addr)->is_oop(), "live block should be an oop");
8063 // However, as we commented above, this may be an object whose
8064 // header hasn't yet been initialized.
8065 size_t size;
8066 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8067 if (_bitMap->isMarked(addr + 1)) {
8068 // Determine the size from the bit map, rather than trying to
8069 // compute it from the object header.
8070 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8071 size = pointer_delta(nextOneAddr + 1, addr);
8072 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8073 "alignment problem");
8075 #ifdef DEBUG
8076 if (oop(addr)->klass_or_null() != NULL &&
8077 ( !_collector->should_unload_classes()
8078 || (oop(addr)->is_parsable()) &&
8079 oop(addr)->is_conc_safe())) {
8080 // Ignore mark word because we are running concurrent with mutators
8081 assert(oop(addr)->is_oop(true), "live block should be an oop");
8082 // is_conc_safe is checked before performing this assertion
8083 // because an object that is not is_conc_safe may yet have
8084 // the return from size() correct.
8085 assert(size ==
8086 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8087 "P-mark and computed size do not agree");
8088 }
8089 #endif
8091 } else {
8092 // This should be an initialized object that's alive.
8093 assert(oop(addr)->klass_or_null() != NULL &&
8094 (!_collector->should_unload_classes()
8095 || oop(addr)->is_parsable()),
8096 "Should be an initialized object");
8097 // Note that there are objects used during class redefinition
8098 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
8099 // which are discarded with their is_conc_safe state still
8100 // false. These object may be floating garbage so may be
8101 // seen here. If they are floating garbage their size
8102 // should be attainable from their klass. Do not that
8103 // is_conc_safe() is true for oop(addr).
8104 // Ignore mark word because we are running concurrent with mutators
8105 assert(oop(addr)->is_oop(true), "live block should be an oop");
8106 // Verify that the bit map has no bits marked between
8107 // addr and purported end of this block.
8108 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8109 assert(size >= 3, "Necessary for Printezis marks to work");
8110 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8111 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8112 }
8113 return size;
8114 }
8116 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8117 size_t chunkSize) {
8118 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8119 // scheme.
8120 bool fcInFreeLists = fc->isFree();
8121 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8122 assert((HeapWord*)fc <= _limit, "sweep invariant");
8123 if (CMSTestInFreeList && fcInFreeLists) {
8124 assert(_sp->verifyChunkInFreeLists(fc),
8125 "free chunk is not in free lists");
8126 }
8129 if (CMSTraceSweeper) {
8130 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8131 }
8133 HeapWord* addr = (HeapWord*) fc;
8135 bool coalesce;
8136 size_t left = pointer_delta(addr, freeFinger());
8137 size_t right = chunkSize;
8138 switch (FLSCoalescePolicy) {
8139 // numeric value forms a coalition aggressiveness metric
8140 case 0: { // never coalesce
8141 coalesce = false;
8142 break;
8143 }
8144 case 1: { // coalesce if left & right chunks on overpopulated lists
8145 coalesce = _sp->coalOverPopulated(left) &&
8146 _sp->coalOverPopulated(right);
8147 break;
8148 }
8149 case 2: { // coalesce if left chunk on overpopulated list (default)
8150 coalesce = _sp->coalOverPopulated(left);
8151 break;
8152 }
8153 case 3: { // coalesce if left OR right chunk on overpopulated list
8154 coalesce = _sp->coalOverPopulated(left) ||
8155 _sp->coalOverPopulated(right);
8156 break;
8157 }
8158 case 4: { // always coalesce
8159 coalesce = true;
8160 break;
8161 }
8162 default:
8163 ShouldNotReachHere();
8164 }
8166 // Should the current free range be coalesced?
8167 // If the chunk is in a free range and either we decided to coalesce above
8168 // or the chunk is near the large block at the end of the heap
8169 // (isNearLargestChunk() returns true), then coalesce this chunk.
8170 bool doCoalesce = inFreeRange() &&
8171 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8172 if (doCoalesce) {
8173 // Coalesce the current free range on the left with the new
8174 // chunk on the right. If either is on a free list,
8175 // it must be removed from the list and stashed in the closure.
8176 if (freeRangeInFreeLists()) {
8177 FreeChunk* ffc = (FreeChunk*)freeFinger();
8178 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8179 "Size of free range is inconsistent with chunk size.");
8180 if (CMSTestInFreeList) {
8181 assert(_sp->verifyChunkInFreeLists(ffc),
8182 "Chunk is not in free lists");
8183 }
8184 _sp->coalDeath(ffc->size());
8185 _sp->removeFreeChunkFromFreeLists(ffc);
8186 set_freeRangeInFreeLists(false);
8187 }
8188 if (fcInFreeLists) {
8189 _sp->coalDeath(chunkSize);
8190 assert(fc->size() == chunkSize,
8191 "The chunk has the wrong size or is not in the free lists");
8192 _sp->removeFreeChunkFromFreeLists(fc);
8193 }
8194 set_lastFreeRangeCoalesced(true);
8195 } else { // not in a free range and/or should not coalesce
8196 // Return the current free range and start a new one.
8197 if (inFreeRange()) {
8198 // In a free range but cannot coalesce with the right hand chunk.
8199 // Put the current free range into the free lists.
8200 flushCurFreeChunk(freeFinger(),
8201 pointer_delta(addr, freeFinger()));
8202 }
8203 // Set up for new free range. Pass along whether the right hand
8204 // chunk is in the free lists.
8205 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8206 }
8207 }
8208 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8209 assert(inFreeRange(), "Should only be called if currently in a free range.");
8210 assert(size > 0,
8211 "A zero sized chunk cannot be added to the free lists.");
8212 if (!freeRangeInFreeLists()) {
8213 if(CMSTestInFreeList) {
8214 FreeChunk* fc = (FreeChunk*) chunk;
8215 fc->setSize(size);
8216 assert(!_sp->verifyChunkInFreeLists(fc),
8217 "chunk should not be in free lists yet");
8218 }
8219 if (CMSTraceSweeper) {
8220 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8221 chunk, size);
8222 }
8223 // A new free range is going to be starting. The current
8224 // free range has not been added to the free lists yet or
8225 // was removed so add it back.
8226 // If the current free range was coalesced, then the death
8227 // of the free range was recorded. Record a birth now.
8228 if (lastFreeRangeCoalesced()) {
8229 _sp->coalBirth(size);
8230 }
8231 _sp->addChunkAndRepairOffsetTable(chunk, size,
8232 lastFreeRangeCoalesced());
8233 }
8234 set_inFreeRange(false);
8235 set_freeRangeInFreeLists(false);
8236 }
8238 // We take a break if we've been at this for a while,
8239 // so as to avoid monopolizing the locks involved.
8240 void SweepClosure::do_yield_work(HeapWord* addr) {
8241 // Return current free chunk being used for coalescing (if any)
8242 // to the appropriate freelist. After yielding, the next
8243 // free block encountered will start a coalescing range of
8244 // free blocks. If the next free block is adjacent to the
8245 // chunk just flushed, they will need to wait for the next
8246 // sweep to be coalesced.
8247 if (inFreeRange()) {
8248 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8249 }
8251 // First give up the locks, then yield, then re-lock.
8252 // We should probably use a constructor/destructor idiom to
8253 // do this unlock/lock or modify the MutexUnlocker class to
8254 // serve our purpose. XXX
8255 assert_lock_strong(_bitMap->lock());
8256 assert_lock_strong(_freelistLock);
8257 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8258 "CMS thread should hold CMS token");
8259 _bitMap->lock()->unlock();
8260 _freelistLock->unlock();
8261 ConcurrentMarkSweepThread::desynchronize(true);
8262 ConcurrentMarkSweepThread::acknowledge_yield_request();
8263 _collector->stopTimer();
8264 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8265 if (PrintCMSStatistics != 0) {
8266 _collector->incrementYields();
8267 }
8268 _collector->icms_wait();
8270 // See the comment in coordinator_yield()
8271 for (unsigned i = 0; i < CMSYieldSleepCount &&
8272 ConcurrentMarkSweepThread::should_yield() &&
8273 !CMSCollector::foregroundGCIsActive(); ++i) {
8274 os::sleep(Thread::current(), 1, false);
8275 ConcurrentMarkSweepThread::acknowledge_yield_request();
8276 }
8278 ConcurrentMarkSweepThread::synchronize(true);
8279 _freelistLock->lock();
8280 _bitMap->lock()->lock_without_safepoint_check();
8281 _collector->startTimer();
8282 }
8284 #ifndef PRODUCT
8285 // This is actually very useful in a product build if it can
8286 // be called from the debugger. Compile it into the product
8287 // as needed.
8288 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8289 return debug_cms_space->verifyChunkInFreeLists(fc);
8290 }
8292 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8293 if (CMSTraceSweeper) {
8294 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8295 }
8296 }
8297 #endif
8299 // CMSIsAliveClosure
8300 bool CMSIsAliveClosure::do_object_b(oop obj) {
8301 HeapWord* addr = (HeapWord*)obj;
8302 return addr != NULL &&
8303 (!_span.contains(addr) || _bit_map->isMarked(addr));
8304 }
8306 // CMSKeepAliveClosure: the serial version
8307 void CMSKeepAliveClosure::do_oop(oop obj) {
8308 HeapWord* addr = (HeapWord*)obj;
8309 if (_span.contains(addr) &&
8310 !_bit_map->isMarked(addr)) {
8311 _bit_map->mark(addr);
8312 bool simulate_overflow = false;
8313 NOT_PRODUCT(
8314 if (CMSMarkStackOverflowALot &&
8315 _collector->simulate_overflow()) {
8316 // simulate a stack overflow
8317 simulate_overflow = true;
8318 }
8319 )
8320 if (simulate_overflow || !_mark_stack->push(obj)) {
8321 if (_concurrent_precleaning) {
8322 // We dirty the overflown object and let the remark
8323 // phase deal with it.
8324 assert(_collector->overflow_list_is_empty(), "Error");
8325 // In the case of object arrays, we need to dirty all of
8326 // the cards that the object spans. No locking or atomics
8327 // are needed since no one else can be mutating the mod union
8328 // table.
8329 if (obj->is_objArray()) {
8330 size_t sz = obj->size();
8331 HeapWord* end_card_addr =
8332 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8333 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8334 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8335 _collector->_modUnionTable.mark_range(redirty_range);
8336 } else {
8337 _collector->_modUnionTable.mark(addr);
8338 }
8339 _collector->_ser_kac_preclean_ovflw++;
8340 } else {
8341 _collector->push_on_overflow_list(obj);
8342 _collector->_ser_kac_ovflw++;
8343 }
8344 }
8345 }
8346 }
8348 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8349 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8351 // CMSParKeepAliveClosure: a parallel version of the above.
8352 // The work queues are private to each closure (thread),
8353 // but (may be) available for stealing by other threads.
8354 void CMSParKeepAliveClosure::do_oop(oop obj) {
8355 HeapWord* addr = (HeapWord*)obj;
8356 if (_span.contains(addr) &&
8357 !_bit_map->isMarked(addr)) {
8358 // In general, during recursive tracing, several threads
8359 // may be concurrently getting here; the first one to
8360 // "tag" it, claims it.
8361 if (_bit_map->par_mark(addr)) {
8362 bool res = _work_queue->push(obj);
8363 assert(res, "Low water mark should be much less than capacity");
8364 // Do a recursive trim in the hope that this will keep
8365 // stack usage lower, but leave some oops for potential stealers
8366 trim_queue(_low_water_mark);
8367 } // Else, another thread got there first
8368 }
8369 }
8371 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8372 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8374 void CMSParKeepAliveClosure::trim_queue(uint max) {
8375 while (_work_queue->size() > max) {
8376 oop new_oop;
8377 if (_work_queue->pop_local(new_oop)) {
8378 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8379 assert(_bit_map->isMarked((HeapWord*)new_oop),
8380 "no white objects on this stack!");
8381 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8382 // iterate over the oops in this oop, marking and pushing
8383 // the ones in CMS heap (i.e. in _span).
8384 new_oop->oop_iterate(&_mark_and_push);
8385 }
8386 }
8387 }
8389 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8390 HeapWord* addr = (HeapWord*)obj;
8391 if (_span.contains(addr) &&
8392 !_bit_map->isMarked(addr)) {
8393 if (_bit_map->par_mark(addr)) {
8394 bool simulate_overflow = false;
8395 NOT_PRODUCT(
8396 if (CMSMarkStackOverflowALot &&
8397 _collector->par_simulate_overflow()) {
8398 // simulate a stack overflow
8399 simulate_overflow = true;
8400 }
8401 )
8402 if (simulate_overflow || !_work_queue->push(obj)) {
8403 _collector->par_push_on_overflow_list(obj);
8404 _collector->_par_kac_ovflw++;
8405 }
8406 } // Else another thread got there already
8407 }
8408 }
8410 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8411 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8413 //////////////////////////////////////////////////////////////////
8414 // CMSExpansionCause /////////////////////////////
8415 //////////////////////////////////////////////////////////////////
8416 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8417 switch (cause) {
8418 case _no_expansion:
8419 return "No expansion";
8420 case _satisfy_free_ratio:
8421 return "Free ratio";
8422 case _satisfy_promotion:
8423 return "Satisfy promotion";
8424 case _satisfy_allocation:
8425 return "allocation";
8426 case _allocate_par_lab:
8427 return "Par LAB";
8428 case _allocate_par_spooling_space:
8429 return "Par Spooling Space";
8430 case _adaptive_size_policy:
8431 return "Ergonomics";
8432 default:
8433 return "unknown";
8434 }
8435 }
8437 void CMSDrainMarkingStackClosure::do_void() {
8438 // the max number to take from overflow list at a time
8439 const size_t num = _mark_stack->capacity()/4;
8440 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8441 "Overflow list should be NULL during concurrent phases");
8442 while (!_mark_stack->isEmpty() ||
8443 // if stack is empty, check the overflow list
8444 _collector->take_from_overflow_list(num, _mark_stack)) {
8445 oop obj = _mark_stack->pop();
8446 HeapWord* addr = (HeapWord*)obj;
8447 assert(_span.contains(addr), "Should be within span");
8448 assert(_bit_map->isMarked(addr), "Should be marked");
8449 assert(obj->is_oop(), "Should be an oop");
8450 obj->oop_iterate(_keep_alive);
8451 }
8452 }
8454 void CMSParDrainMarkingStackClosure::do_void() {
8455 // drain queue
8456 trim_queue(0);
8457 }
8459 // Trim our work_queue so its length is below max at return
8460 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8461 while (_work_queue->size() > max) {
8462 oop new_oop;
8463 if (_work_queue->pop_local(new_oop)) {
8464 assert(new_oop->is_oop(), "Expected an oop");
8465 assert(_bit_map->isMarked((HeapWord*)new_oop),
8466 "no white objects on this stack!");
8467 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8468 // iterate over the oops in this oop, marking and pushing
8469 // the ones in CMS heap (i.e. in _span).
8470 new_oop->oop_iterate(&_mark_and_push);
8471 }
8472 }
8473 }
8475 ////////////////////////////////////////////////////////////////////
8476 // Support for Marking Stack Overflow list handling and related code
8477 ////////////////////////////////////////////////////////////////////
8478 // Much of the following code is similar in shape and spirit to the
8479 // code used in ParNewGC. We should try and share that code
8480 // as much as possible in the future.
8482 #ifndef PRODUCT
8483 // Debugging support for CMSStackOverflowALot
8485 // It's OK to call this multi-threaded; the worst thing
8486 // that can happen is that we'll get a bunch of closely
8487 // spaced simulated oveflows, but that's OK, in fact
8488 // probably good as it would exercise the overflow code
8489 // under contention.
8490 bool CMSCollector::simulate_overflow() {
8491 if (_overflow_counter-- <= 0) { // just being defensive
8492 _overflow_counter = CMSMarkStackOverflowInterval;
8493 return true;
8494 } else {
8495 return false;
8496 }
8497 }
8499 bool CMSCollector::par_simulate_overflow() {
8500 return simulate_overflow();
8501 }
8502 #endif
8504 // Single-threaded
8505 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8506 assert(stack->isEmpty(), "Expected precondition");
8507 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8508 size_t i = num;
8509 oop cur = _overflow_list;
8510 const markOop proto = markOopDesc::prototype();
8511 NOT_PRODUCT(ssize_t n = 0;)
8512 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8513 next = oop(cur->mark());
8514 cur->set_mark(proto); // until proven otherwise
8515 assert(cur->is_oop(), "Should be an oop");
8516 bool res = stack->push(cur);
8517 assert(res, "Bit off more than can chew?");
8518 NOT_PRODUCT(n++;)
8519 }
8520 _overflow_list = cur;
8521 #ifndef PRODUCT
8522 assert(_num_par_pushes >= n, "Too many pops?");
8523 _num_par_pushes -=n;
8524 #endif
8525 return !stack->isEmpty();
8526 }
8528 #define BUSY (oop(0x1aff1aff))
8529 // (MT-safe) Get a prefix of at most "num" from the list.
8530 // The overflow list is chained through the mark word of
8531 // each object in the list. We fetch the entire list,
8532 // break off a prefix of the right size and return the
8533 // remainder. If other threads try to take objects from
8534 // the overflow list at that time, they will wait for
8535 // some time to see if data becomes available. If (and
8536 // only if) another thread places one or more object(s)
8537 // on the global list before we have returned the suffix
8538 // to the global list, we will walk down our local list
8539 // to find its end and append the global list to
8540 // our suffix before returning it. This suffix walk can
8541 // prove to be expensive (quadratic in the amount of traffic)
8542 // when there are many objects in the overflow list and
8543 // there is much producer-consumer contention on the list.
8544 // *NOTE*: The overflow list manipulation code here and
8545 // in ParNewGeneration:: are very similar in shape,
8546 // except that in the ParNew case we use the old (from/eden)
8547 // copy of the object to thread the list via its klass word.
8548 // Because of the common code, if you make any changes in
8549 // the code below, please check the ParNew version to see if
8550 // similar changes might be needed.
8551 // CR 6797058 has been filed to consolidate the common code.
8552 bool CMSCollector::par_take_from_overflow_list(size_t num,
8553 OopTaskQueue* work_q) {
8554 assert(work_q->size() == 0, "First empty local work queue");
8555 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8556 if (_overflow_list == NULL) {
8557 return false;
8558 }
8559 // Grab the entire list; we'll put back a suffix
8560 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8561 Thread* tid = Thread::current();
8562 size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
8563 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8564 // If the list is busy, we spin for a short while,
8565 // sleeping between attempts to get the list.
8566 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8567 os::sleep(tid, sleep_time_millis, false);
8568 if (_overflow_list == NULL) {
8569 // Nothing left to take
8570 return false;
8571 } else if (_overflow_list != BUSY) {
8572 // Try and grab the prefix
8573 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8574 }
8575 }
8576 // If the list was found to be empty, or we spun long
8577 // enough, we give up and return empty-handed. If we leave
8578 // the list in the BUSY state below, it must be the case that
8579 // some other thread holds the overflow list and will set it
8580 // to a non-BUSY state in the future.
8581 if (prefix == NULL || prefix == BUSY) {
8582 // Nothing to take or waited long enough
8583 if (prefix == NULL) {
8584 // Write back the NULL in case we overwrote it with BUSY above
8585 // and it is still the same value.
8586 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8587 }
8588 return false;
8589 }
8590 assert(prefix != NULL && prefix != BUSY, "Error");
8591 size_t i = num;
8592 oop cur = prefix;
8593 // Walk down the first "num" objects, unless we reach the end.
8594 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8595 if (cur->mark() == NULL) {
8596 // We have "num" or fewer elements in the list, so there
8597 // is nothing to return to the global list.
8598 // Write back the NULL in lieu of the BUSY we wrote
8599 // above, if it is still the same value.
8600 if (_overflow_list == BUSY) {
8601 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8602 }
8603 } else {
8604 // Chop off the suffix and rerturn it to the global list.
8605 assert(cur->mark() != BUSY, "Error");
8606 oop suffix_head = cur->mark(); // suffix will be put back on global list
8607 cur->set_mark(NULL); // break off suffix
8608 // It's possible that the list is still in the empty(busy) state
8609 // we left it in a short while ago; in that case we may be
8610 // able to place back the suffix without incurring the cost
8611 // of a walk down the list.
8612 oop observed_overflow_list = _overflow_list;
8613 oop cur_overflow_list = observed_overflow_list;
8614 bool attached = false;
8615 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8616 observed_overflow_list =
8617 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8618 if (cur_overflow_list == observed_overflow_list) {
8619 attached = true;
8620 break;
8621 } else cur_overflow_list = observed_overflow_list;
8622 }
8623 if (!attached) {
8624 // Too bad, someone else sneaked in (at least) an element; we'll need
8625 // to do a splice. Find tail of suffix so we can prepend suffix to global
8626 // list.
8627 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8628 oop suffix_tail = cur;
8629 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8630 "Tautology");
8631 observed_overflow_list = _overflow_list;
8632 do {
8633 cur_overflow_list = observed_overflow_list;
8634 if (cur_overflow_list != BUSY) {
8635 // Do the splice ...
8636 suffix_tail->set_mark(markOop(cur_overflow_list));
8637 } else { // cur_overflow_list == BUSY
8638 suffix_tail->set_mark(NULL);
8639 }
8640 // ... and try to place spliced list back on overflow_list ...
8641 observed_overflow_list =
8642 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8643 } while (cur_overflow_list != observed_overflow_list);
8644 // ... until we have succeeded in doing so.
8645 }
8646 }
8648 // Push the prefix elements on work_q
8649 assert(prefix != NULL, "control point invariant");
8650 const markOop proto = markOopDesc::prototype();
8651 oop next;
8652 NOT_PRODUCT(ssize_t n = 0;)
8653 for (cur = prefix; cur != NULL; cur = next) {
8654 next = oop(cur->mark());
8655 cur->set_mark(proto); // until proven otherwise
8656 assert(cur->is_oop(), "Should be an oop");
8657 bool res = work_q->push(cur);
8658 assert(res, "Bit off more than we can chew?");
8659 NOT_PRODUCT(n++;)
8660 }
8661 #ifndef PRODUCT
8662 assert(_num_par_pushes >= n, "Too many pops?");
8663 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8664 #endif
8665 return true;
8666 }
8668 // Single-threaded
8669 void CMSCollector::push_on_overflow_list(oop p) {
8670 NOT_PRODUCT(_num_par_pushes++;)
8671 assert(p->is_oop(), "Not an oop");
8672 preserve_mark_if_necessary(p);
8673 p->set_mark((markOop)_overflow_list);
8674 _overflow_list = p;
8675 }
8677 // Multi-threaded; use CAS to prepend to overflow list
8678 void CMSCollector::par_push_on_overflow_list(oop p) {
8679 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8680 assert(p->is_oop(), "Not an oop");
8681 par_preserve_mark_if_necessary(p);
8682 oop observed_overflow_list = _overflow_list;
8683 oop cur_overflow_list;
8684 do {
8685 cur_overflow_list = observed_overflow_list;
8686 if (cur_overflow_list != BUSY) {
8687 p->set_mark(markOop(cur_overflow_list));
8688 } else {
8689 p->set_mark(NULL);
8690 }
8691 observed_overflow_list =
8692 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8693 } while (cur_overflow_list != observed_overflow_list);
8694 }
8695 #undef BUSY
8697 // Single threaded
8698 // General Note on GrowableArray: pushes may silently fail
8699 // because we are (temporarily) out of C-heap for expanding
8700 // the stack. The problem is quite ubiquitous and affects
8701 // a lot of code in the JVM. The prudent thing for GrowableArray
8702 // to do (for now) is to exit with an error. However, that may
8703 // be too draconian in some cases because the caller may be
8704 // able to recover without much harm. For such cases, we
8705 // should probably introduce a "soft_push" method which returns
8706 // an indication of success or failure with the assumption that
8707 // the caller may be able to recover from a failure; code in
8708 // the VM can then be changed, incrementally, to deal with such
8709 // failures where possible, thus, incrementally hardening the VM
8710 // in such low resource situations.
8711 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8712 if (_preserved_oop_stack == NULL) {
8713 assert(_preserved_mark_stack == NULL,
8714 "bijection with preserved_oop_stack");
8715 // Allocate the stacks
8716 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8717 GrowableArray<oop>(PreserveMarkStackSize, true);
8718 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8719 GrowableArray<markOop>(PreserveMarkStackSize, true);
8720 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8721 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8722 "Preserved Mark/Oop Stack for CMS (C-heap)");
8723 }
8724 }
8725 _preserved_oop_stack->push(p);
8726 _preserved_mark_stack->push(m);
8727 assert(m == p->mark(), "Mark word changed");
8728 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8729 "bijection");
8730 }
8732 // Single threaded
8733 void CMSCollector::preserve_mark_if_necessary(oop p) {
8734 markOop m = p->mark();
8735 if (m->must_be_preserved(p)) {
8736 preserve_mark_work(p, m);
8737 }
8738 }
8740 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8741 markOop m = p->mark();
8742 if (m->must_be_preserved(p)) {
8743 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8744 // Even though we read the mark word without holding
8745 // the lock, we are assured that it will not change
8746 // because we "own" this oop, so no other thread can
8747 // be trying to push it on the overflow list; see
8748 // the assertion in preserve_mark_work() that checks
8749 // that m == p->mark().
8750 preserve_mark_work(p, m);
8751 }
8752 }
8754 // We should be able to do this multi-threaded,
8755 // a chunk of stack being a task (this is
8756 // correct because each oop only ever appears
8757 // once in the overflow list. However, it's
8758 // not very easy to completely overlap this with
8759 // other operations, so will generally not be done
8760 // until all work's been completed. Because we
8761 // expect the preserved oop stack (set) to be small,
8762 // it's probably fine to do this single-threaded.
8763 // We can explore cleverer concurrent/overlapped/parallel
8764 // processing of preserved marks if we feel the
8765 // need for this in the future. Stack overflow should
8766 // be so rare in practice and, when it happens, its
8767 // effect on performance so great that this will
8768 // likely just be in the noise anyway.
8769 void CMSCollector::restore_preserved_marks_if_any() {
8770 if (_preserved_oop_stack == NULL) {
8771 assert(_preserved_mark_stack == NULL,
8772 "bijection with preserved_oop_stack");
8773 return;
8774 }
8776 assert(SafepointSynchronize::is_at_safepoint(),
8777 "world should be stopped");
8778 assert(Thread::current()->is_ConcurrentGC_thread() ||
8779 Thread::current()->is_VM_thread(),
8780 "should be single-threaded");
8782 int length = _preserved_oop_stack->length();
8783 assert(_preserved_mark_stack->length() == length, "bijection");
8784 for (int i = 0; i < length; i++) {
8785 oop p = _preserved_oop_stack->at(i);
8786 assert(p->is_oop(), "Should be an oop");
8787 assert(_span.contains(p), "oop should be in _span");
8788 assert(p->mark() == markOopDesc::prototype(),
8789 "Set when taken from overflow list");
8790 markOop m = _preserved_mark_stack->at(i);
8791 p->set_mark(m);
8792 }
8793 _preserved_mark_stack->clear();
8794 _preserved_oop_stack->clear();
8795 assert(_preserved_mark_stack->is_empty() &&
8796 _preserved_oop_stack->is_empty(),
8797 "stacks were cleared above");
8798 }
8800 #ifndef PRODUCT
8801 bool CMSCollector::no_preserved_marks() const {
8802 return ( ( _preserved_mark_stack == NULL
8803 && _preserved_oop_stack == NULL)
8804 || ( _preserved_mark_stack->is_empty()
8805 && _preserved_oop_stack->is_empty()));
8806 }
8807 #endif
8809 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8810 {
8811 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8812 CMSAdaptiveSizePolicy* size_policy =
8813 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8814 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8815 "Wrong type for size policy");
8816 return size_policy;
8817 }
8819 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8820 size_t desired_promo_size) {
8821 if (cur_promo_size < desired_promo_size) {
8822 size_t expand_bytes = desired_promo_size - cur_promo_size;
8823 if (PrintAdaptiveSizePolicy && Verbose) {
8824 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8825 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8826 expand_bytes);
8827 }
8828 expand(expand_bytes,
8829 MinHeapDeltaBytes,
8830 CMSExpansionCause::_adaptive_size_policy);
8831 } else if (desired_promo_size < cur_promo_size) {
8832 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8833 if (PrintAdaptiveSizePolicy && Verbose) {
8834 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8835 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8836 shrink_bytes);
8837 }
8838 shrink(shrink_bytes);
8839 }
8840 }
8842 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8843 GenCollectedHeap* gch = GenCollectedHeap::heap();
8844 CMSGCAdaptivePolicyCounters* counters =
8845 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8846 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8847 "Wrong kind of counters");
8848 return counters;
8849 }
8852 void ASConcurrentMarkSweepGeneration::update_counters() {
8853 if (UsePerfData) {
8854 _space_counters->update_all();
8855 _gen_counters->update_all();
8856 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8857 GenCollectedHeap* gch = GenCollectedHeap::heap();
8858 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8859 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8860 "Wrong gc statistics type");
8861 counters->update_counters(gc_stats_l);
8862 }
8863 }
8865 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8866 if (UsePerfData) {
8867 _space_counters->update_used(used);
8868 _space_counters->update_capacity();
8869 _gen_counters->update_all();
8871 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8872 GenCollectedHeap* gch = GenCollectedHeap::heap();
8873 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8874 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8875 "Wrong gc statistics type");
8876 counters->update_counters(gc_stats_l);
8877 }
8878 }
8880 // The desired expansion delta is computed so that:
8881 // . desired free percentage or greater is used
8882 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8883 assert_locked_or_safepoint(Heap_lock);
8885 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8887 // If incremental collection failed, we just want to expand
8888 // to the limit.
8889 if (incremental_collection_failed()) {
8890 clear_incremental_collection_failed();
8891 grow_to_reserved();
8892 return;
8893 }
8895 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8897 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8898 "Wrong type of heap");
8899 int prev_level = level() - 1;
8900 assert(prev_level >= 0, "The cms generation is the lowest generation");
8901 Generation* prev_gen = gch->get_gen(prev_level);
8902 assert(prev_gen->kind() == Generation::ASParNew,
8903 "Wrong type of young generation");
8904 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8905 size_t cur_eden = younger_gen->eden()->capacity();
8906 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8907 size_t cur_promo = free();
8908 size_policy->compute_tenured_generation_free_space(cur_promo,
8909 max_available(),
8910 cur_eden);
8911 resize(cur_promo, size_policy->promo_size());
8913 // Record the new size of the space in the cms generation
8914 // that is available for promotions. This is temporary.
8915 // It should be the desired promo size.
8916 size_policy->avg_cms_promo()->sample(free());
8917 size_policy->avg_old_live()->sample(used());
8919 if (UsePerfData) {
8920 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8921 counters->update_cms_capacity_counter(capacity());
8922 }
8923 }
8925 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
8926 assert_locked_or_safepoint(Heap_lock);
8927 assert_lock_strong(freelistLock());
8928 HeapWord* old_end = _cmsSpace->end();
8929 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
8930 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
8931 FreeChunk* chunk_at_end = find_chunk_at_end();
8932 if (chunk_at_end == NULL) {
8933 // No room to shrink
8934 if (PrintGCDetails && Verbose) {
8935 gclog_or_tty->print_cr("No room to shrink: old_end "
8936 PTR_FORMAT " unallocated_start " PTR_FORMAT
8937 " chunk_at_end " PTR_FORMAT,
8938 old_end, unallocated_start, chunk_at_end);
8939 }
8940 return;
8941 } else {
8943 // Find the chunk at the end of the space and determine
8944 // how much it can be shrunk.
8945 size_t shrinkable_size_in_bytes = chunk_at_end->size();
8946 size_t aligned_shrinkable_size_in_bytes =
8947 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
8948 assert(unallocated_start <= chunk_at_end->end(),
8949 "Inconsistent chunk at end of space");
8950 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
8951 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
8953 // Shrink the underlying space
8954 _virtual_space.shrink_by(bytes);
8955 if (PrintGCDetails && Verbose) {
8956 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
8957 " desired_bytes " SIZE_FORMAT
8958 " shrinkable_size_in_bytes " SIZE_FORMAT
8959 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
8960 " bytes " SIZE_FORMAT,
8961 desired_bytes, shrinkable_size_in_bytes,
8962 aligned_shrinkable_size_in_bytes, bytes);
8963 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
8964 " unallocated_start " SIZE_FORMAT,
8965 old_end, unallocated_start);
8966 }
8968 // If the space did shrink (shrinking is not guaranteed),
8969 // shrink the chunk at the end by the appropriate amount.
8970 if (((HeapWord*)_virtual_space.high()) < old_end) {
8971 size_t new_word_size =
8972 heap_word_size(_virtual_space.committed_size());
8974 // Have to remove the chunk from the dictionary because it is changing
8975 // size and might be someplace elsewhere in the dictionary.
8977 // Get the chunk at end, shrink it, and put it
8978 // back.
8979 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
8980 size_t word_size_change = word_size_before - new_word_size;
8981 size_t chunk_at_end_old_size = chunk_at_end->size();
8982 assert(chunk_at_end_old_size >= word_size_change,
8983 "Shrink is too large");
8984 chunk_at_end->setSize(chunk_at_end_old_size -
8985 word_size_change);
8986 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
8987 word_size_change);
8989 _cmsSpace->returnChunkToDictionary(chunk_at_end);
8991 MemRegion mr(_cmsSpace->bottom(), new_word_size);
8992 _bts->resize(new_word_size); // resize the block offset shared array
8993 Universe::heap()->barrier_set()->resize_covered_region(mr);
8994 _cmsSpace->assert_locked();
8995 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
8997 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
8999 // update the space and generation capacity counters
9000 if (UsePerfData) {
9001 _space_counters->update_capacity();
9002 _gen_counters->update_all();
9003 }
9005 if (Verbose && PrintGCDetails) {
9006 size_t new_mem_size = _virtual_space.committed_size();
9007 size_t old_mem_size = new_mem_size + bytes;
9008 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9009 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9010 }
9011 }
9013 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9014 "Inconsistency at end of space");
9015 assert(chunk_at_end->end() == _cmsSpace->end(),
9016 "Shrinking is inconsistent");
9017 return;
9018 }
9019 }
9021 // Transfer some number of overflown objects to usual marking
9022 // stack. Return true if some objects were transferred.
9023 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9024 size_t num = MIN2((size_t)_mark_stack->capacity()/4,
9025 (size_t)ParGCDesiredObjsFromOverflowList);
9027 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9028 assert(_collector->overflow_list_is_empty() || res,
9029 "If list is not empty, we should have taken something");
9030 assert(!res || !_mark_stack->isEmpty(),
9031 "If we took something, it should now be on our stack");
9032 return res;
9033 }
9035 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9036 size_t res = _sp->block_size_no_stall(addr, _collector);
9037 assert(res != 0, "Should always be able to compute a size");
9038 if (_sp->block_is_obj(addr)) {
9039 if (_live_bit_map->isMarked(addr)) {
9040 // It can't have been dead in a previous cycle
9041 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9042 } else {
9043 _dead_bit_map->mark(addr); // mark the dead object
9044 }
9045 }
9046 return res;
9047 }