Thu, 05 Jun 2008 15:57:56 -0700
6711316: Open source the Garbage-First garbage collector
Summary: First mercurial integration of the code for the Garbage-First garbage collector.
Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
194 "Offset of FreeChunk::_prev within FreeChunk must match"
195 " that of OopDesc::_klass within OopDesc");
196 )
197 if (ParallelGCThreads > 0) {
198 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
199 _par_gc_thread_states =
200 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
201 if (_par_gc_thread_states == NULL) {
202 vm_exit_during_initialization("Could not allocate par gc structs");
203 }
204 for (uint i = 0; i < ParallelGCThreads; i++) {
205 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
206 if (_par_gc_thread_states[i] == NULL) {
207 vm_exit_during_initialization("Could not allocate par gc structs");
208 }
209 }
210 } else {
211 _par_gc_thread_states = NULL;
212 }
213 _incremental_collection_failed = false;
214 // The "dilatation_factor" is the expansion that can occur on
215 // account of the fact that the minimum object size in the CMS
216 // generation may be larger than that in, say, a contiguous young
217 // generation.
218 // Ideally, in the calculation below, we'd compute the dilatation
219 // factor as: MinChunkSize/(promoting_gen's min object size)
220 // Since we do not have such a general query interface for the
221 // promoting generation, we'll instead just use the mimimum
222 // object size (which today is a header's worth of space);
223 // note that all arithmetic is in units of HeapWords.
224 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
225 assert(_dilatation_factor >= 1.0, "from previous assert");
226 }
229 // The field "_initiating_occupancy" represents the occupancy percentage
230 // at which we trigger a new collection cycle. Unless explicitly specified
231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
232 // is calculated by:
233 //
234 // Let "f" be MinHeapFreeRatio in
235 //
236 // _intiating_occupancy = 100-f +
237 // f * (CMSTrigger[Perm]Ratio/100)
238 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
239 //
240 // That is, if we assume the heap is at its desired maximum occupancy at the
241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
242 // space be allocated before initiating a new collection cycle.
243 //
244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
245 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
246 if (io >= 0) {
247 _initiating_occupancy = (double)io / 100.0;
248 } else {
249 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
250 (double)(tr * MinHeapFreeRatio) / 100.0)
251 / 100.0;
252 }
253 }
256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
257 assert(collector() != NULL, "no collector");
258 collector()->ref_processor_init();
259 }
261 void CMSCollector::ref_processor_init() {
262 if (_ref_processor == NULL) {
263 // Allocate and initialize a reference processor
264 _ref_processor = ReferenceProcessor::create_ref_processor(
265 _span, // span
266 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
267 _cmsGen->refs_discovery_is_mt(), // mt_discovery
268 &_is_alive_closure,
269 ParallelGCThreads,
270 ParallelRefProcEnabled);
271 // Initialize the _ref_processor field of CMSGen
272 _cmsGen->set_ref_processor(_ref_processor);
274 // Allocate a dummy ref processor for perm gen.
275 ReferenceProcessor* rp2 = new ReferenceProcessor();
276 if (rp2 == NULL) {
277 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
278 }
279 _permGen->set_ref_processor(rp2);
280 }
281 }
283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
284 GenCollectedHeap* gch = GenCollectedHeap::heap();
285 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
286 "Wrong type of heap");
287 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
288 gch->gen_policy()->size_policy();
289 assert(sp->is_gc_cms_adaptive_size_policy(),
290 "Wrong type of size policy");
291 return sp;
292 }
294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
295 CMSGCAdaptivePolicyCounters* results =
296 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
297 assert(
298 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
299 "Wrong gc policy counter kind");
300 return results;
301 }
304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
306 const char* gen_name = "old";
308 // Generation Counters - generation 1, 1 subspace
309 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
311 _space_counters = new GSpaceCounters(gen_name, 0,
312 _virtual_space.reserved_size(),
313 this, _gen_counters);
314 }
316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317 _cms_gen(cms_gen)
318 {
319 assert(alpha <= 100, "bad value");
320 _saved_alpha = alpha;
322 // Initialize the alphas to the bootstrap value of 100.
323 _gc0_alpha = _cms_alpha = 100;
325 _cms_begin_time.update();
326 _cms_end_time.update();
328 _gc0_duration = 0.0;
329 _gc0_period = 0.0;
330 _gc0_promoted = 0;
332 _cms_duration = 0.0;
333 _cms_period = 0.0;
334 _cms_allocated = 0;
336 _cms_used_at_gc0_begin = 0;
337 _cms_used_at_gc0_end = 0;
338 _allow_duty_cycle_reduction = false;
339 _valid_bits = 0;
340 _icms_duty_cycle = CMSIncrementalDutyCycle;
341 }
343 // If promotion failure handling is on use
344 // the padded average size of the promotion for each
345 // young generation collection.
346 double CMSStats::time_until_cms_gen_full() const {
347 size_t cms_free = _cms_gen->cmsSpace()->free();
348 GenCollectedHeap* gch = GenCollectedHeap::heap();
349 size_t expected_promotion = gch->get_gen(0)->capacity();
350 if (HandlePromotionFailure) {
351 expected_promotion = MIN2(
352 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
353 expected_promotion);
354 }
355 if (cms_free > expected_promotion) {
356 // Start a cms collection if there isn't enough space to promote
357 // for the next minor collection. Use the padded average as
358 // a safety factor.
359 cms_free -= expected_promotion;
361 // Adjust by the safety factor.
362 double cms_free_dbl = (double)cms_free;
363 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
365 if (PrintGCDetails && Verbose) {
366 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
367 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
368 cms_free, expected_promotion);
369 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
370 cms_free_dbl, cms_consumption_rate() + 1.0);
371 }
372 // Add 1 in case the consumption rate goes to zero.
373 return cms_free_dbl / (cms_consumption_rate() + 1.0);
374 }
375 return 0.0;
376 }
378 // Compare the duration of the cms collection to the
379 // time remaining before the cms generation is empty.
380 // Note that the time from the start of the cms collection
381 // to the start of the cms sweep (less than the total
382 // duration of the cms collection) can be used. This
383 // has been tried and some applications experienced
384 // promotion failures early in execution. This was
385 // possibly because the averages were not accurate
386 // enough at the beginning.
387 double CMSStats::time_until_cms_start() const {
388 // We add "gc0_period" to the "work" calculation
389 // below because this query is done (mostly) at the
390 // end of a scavenge, so we need to conservatively
391 // account for that much possible delay
392 // in the query so as to avoid concurrent mode failures
393 // due to starting the collection just a wee bit too
394 // late.
395 double work = cms_duration() + gc0_period();
396 double deadline = time_until_cms_gen_full();
397 if (work > deadline) {
398 if (Verbose && PrintGCDetails) {
399 gclog_or_tty->print(
400 " CMSCollector: collect because of anticipated promotion "
401 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
402 gc0_period(), time_until_cms_gen_full());
403 }
404 return 0.0;
405 }
406 return work - deadline;
407 }
409 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
410 // amount of change to prevent wild oscillation.
411 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
412 unsigned int new_duty_cycle) {
413 assert(old_duty_cycle <= 100, "bad input value");
414 assert(new_duty_cycle <= 100, "bad input value");
416 // Note: use subtraction with caution since it may underflow (values are
417 // unsigned). Addition is safe since we're in the range 0-100.
418 unsigned int damped_duty_cycle = new_duty_cycle;
419 if (new_duty_cycle < old_duty_cycle) {
420 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
421 if (new_duty_cycle + largest_delta < old_duty_cycle) {
422 damped_duty_cycle = old_duty_cycle - largest_delta;
423 }
424 } else if (new_duty_cycle > old_duty_cycle) {
425 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
426 if (new_duty_cycle > old_duty_cycle + largest_delta) {
427 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
428 }
429 }
430 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
432 if (CMSTraceIncrementalPacing) {
433 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
434 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
435 }
436 return damped_duty_cycle;
437 }
439 unsigned int CMSStats::icms_update_duty_cycle_impl() {
440 assert(CMSIncrementalPacing && valid(),
441 "should be handled in icms_update_duty_cycle()");
443 double cms_time_so_far = cms_timer().seconds();
444 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
445 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
447 // Avoid division by 0.
448 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
449 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
451 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
452 if (new_duty_cycle > _icms_duty_cycle) {
453 // Avoid very small duty cycles (1 or 2); 0 is allowed.
454 if (new_duty_cycle > 2) {
455 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
456 new_duty_cycle);
457 }
458 } else if (_allow_duty_cycle_reduction) {
459 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
460 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
461 // Respect the minimum duty cycle.
462 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
463 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
464 }
466 if (PrintGCDetails || CMSTraceIncrementalPacing) {
467 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
468 }
470 _allow_duty_cycle_reduction = false;
471 return _icms_duty_cycle;
472 }
474 #ifndef PRODUCT
475 void CMSStats::print_on(outputStream *st) const {
476 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
477 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
478 gc0_duration(), gc0_period(), gc0_promoted());
479 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
480 cms_duration(), cms_duration_per_mb(),
481 cms_period(), cms_allocated());
482 st->print(",cms_since_beg=%g,cms_since_end=%g",
483 cms_time_since_begin(), cms_time_since_end());
484 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
485 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
486 if (CMSIncrementalMode) {
487 st->print(",dc=%d", icms_duty_cycle());
488 }
490 if (valid()) {
491 st->print(",promo_rate=%g,cms_alloc_rate=%g",
492 promotion_rate(), cms_allocation_rate());
493 st->print(",cms_consumption_rate=%g,time_until_full=%g",
494 cms_consumption_rate(), time_until_cms_gen_full());
495 }
496 st->print(" ");
497 }
498 #endif // #ifndef PRODUCT
500 CMSCollector::CollectorState CMSCollector::_collectorState =
501 CMSCollector::Idling;
502 bool CMSCollector::_foregroundGCIsActive = false;
503 bool CMSCollector::_foregroundGCShouldWait = false;
505 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
506 ConcurrentMarkSweepGeneration* permGen,
507 CardTableRS* ct,
508 ConcurrentMarkSweepPolicy* cp):
509 _cmsGen(cmsGen),
510 _permGen(permGen),
511 _ct(ct),
512 _ref_processor(NULL), // will be set later
513 _conc_workers(NULL), // may be set later
514 _abort_preclean(false),
515 _start_sampling(false),
516 _between_prologue_and_epilogue(false),
517 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
518 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
519 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
520 -1 /* lock-free */, "No_lock" /* dummy */),
521 _modUnionClosure(&_modUnionTable),
522 _modUnionClosurePar(&_modUnionTable),
523 // Adjust my span to cover old (cms) gen and perm gen
524 _span(cmsGen->reserved()._union(permGen->reserved())),
525 // Construct the is_alive_closure with _span & markBitMap
526 _is_alive_closure(_span, &_markBitMap),
527 _restart_addr(NULL),
528 _overflow_list(NULL),
529 _preserved_oop_stack(NULL),
530 _preserved_mark_stack(NULL),
531 _stats(cmsGen),
532 _eden_chunk_array(NULL), // may be set in ctor body
533 _eden_chunk_capacity(0), // -- ditto --
534 _eden_chunk_index(0), // -- ditto --
535 _survivor_plab_array(NULL), // -- ditto --
536 _survivor_chunk_array(NULL), // -- ditto --
537 _survivor_chunk_capacity(0), // -- ditto --
538 _survivor_chunk_index(0), // -- ditto --
539 _ser_pmc_preclean_ovflw(0),
540 _ser_pmc_remark_ovflw(0),
541 _par_pmc_remark_ovflw(0),
542 _ser_kac_ovflw(0),
543 _par_kac_ovflw(0),
544 #ifndef PRODUCT
545 _num_par_pushes(0),
546 #endif
547 _collection_count_start(0),
548 _verifying(false),
549 _icms_start_limit(NULL),
550 _icms_stop_limit(NULL),
551 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
552 _completed_initialization(false),
553 _collector_policy(cp),
554 _should_unload_classes(false),
555 _concurrent_cycles_since_last_unload(0),
556 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
557 {
558 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
559 ExplicitGCInvokesConcurrent = true;
560 }
561 // Now expand the span and allocate the collection support structures
562 // (MUT, marking bit map etc.) to cover both generations subject to
563 // collection.
565 // First check that _permGen is adjacent to _cmsGen and above it.
566 assert( _cmsGen->reserved().word_size() > 0
567 && _permGen->reserved().word_size() > 0,
568 "generations should not be of zero size");
569 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
570 "_cmsGen and _permGen should not overlap");
571 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
572 "_cmsGen->end() different from _permGen->start()");
574 // For use by dirty card to oop closures.
575 _cmsGen->cmsSpace()->set_collector(this);
576 _permGen->cmsSpace()->set_collector(this);
578 // Allocate MUT and marking bit map
579 {
580 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
581 if (!_markBitMap.allocate(_span)) {
582 warning("Failed to allocate CMS Bit Map");
583 return;
584 }
585 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
586 }
587 {
588 _modUnionTable.allocate(_span);
589 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
590 }
592 if (!_markStack.allocate(CMSMarkStackSize)) {
593 warning("Failed to allocate CMS Marking Stack");
594 return;
595 }
596 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
597 warning("Failed to allocate CMS Revisit Stack");
598 return;
599 }
601 // Support for multi-threaded concurrent phases
602 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
603 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
604 // just for now
605 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
606 }
607 if (ParallelCMSThreads > 1) {
608 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
609 ParallelCMSThreads, true);
610 if (_conc_workers == NULL) {
611 warning("GC/CMS: _conc_workers allocation failure: "
612 "forcing -CMSConcurrentMTEnabled");
613 CMSConcurrentMTEnabled = false;
614 }
615 } else {
616 CMSConcurrentMTEnabled = false;
617 }
618 }
619 if (!CMSConcurrentMTEnabled) {
620 ParallelCMSThreads = 0;
621 } else {
622 // Turn off CMSCleanOnEnter optimization temporarily for
623 // the MT case where it's not fixed yet; see 6178663.
624 CMSCleanOnEnter = false;
625 }
626 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
627 "Inconsistency");
629 // Parallel task queues; these are shared for the
630 // concurrent and stop-world phases of CMS, but
631 // are not shared with parallel scavenge (ParNew).
632 {
633 uint i;
634 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
636 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
637 || ParallelRefProcEnabled)
638 && num_queues > 0) {
639 _task_queues = new OopTaskQueueSet(num_queues);
640 if (_task_queues == NULL) {
641 warning("task_queues allocation failure.");
642 return;
643 }
644 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
645 if (_hash_seed == NULL) {
646 warning("_hash_seed array allocation failure");
647 return;
648 }
650 // XXX use a global constant instead of 64!
651 typedef struct OopTaskQueuePadded {
652 OopTaskQueue work_queue;
653 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
654 } OopTaskQueuePadded;
656 for (i = 0; i < num_queues; i++) {
657 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
658 if (q_padded == NULL) {
659 warning("work_queue allocation failure.");
660 return;
661 }
662 _task_queues->register_queue(i, &q_padded->work_queue);
663 }
664 for (i = 0; i < num_queues; i++) {
665 _task_queues->queue(i)->initialize();
666 _hash_seed[i] = 17; // copied from ParNew
667 }
668 }
669 }
671 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
672 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
674 // Clip CMSBootstrapOccupancy between 0 and 100.
675 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
676 /(double)100;
678 _full_gcs_since_conc_gc = 0;
680 // Now tell CMS generations the identity of their collector
681 ConcurrentMarkSweepGeneration::set_collector(this);
683 // Create & start a CMS thread for this CMS collector
684 _cmsThread = ConcurrentMarkSweepThread::start(this);
685 assert(cmsThread() != NULL, "CMS Thread should have been created");
686 assert(cmsThread()->collector() == this,
687 "CMS Thread should refer to this gen");
688 assert(CGC_lock != NULL, "Where's the CGC_lock?");
690 // Support for parallelizing young gen rescan
691 GenCollectedHeap* gch = GenCollectedHeap::heap();
692 _young_gen = gch->prev_gen(_cmsGen);
693 if (gch->supports_inline_contig_alloc()) {
694 _top_addr = gch->top_addr();
695 _end_addr = gch->end_addr();
696 assert(_young_gen != NULL, "no _young_gen");
697 _eden_chunk_index = 0;
698 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
699 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
700 if (_eden_chunk_array == NULL) {
701 _eden_chunk_capacity = 0;
702 warning("GC/CMS: _eden_chunk_array allocation failure");
703 }
704 }
705 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
707 // Support for parallelizing survivor space rescan
708 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
709 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
710 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
711 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
712 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
713 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
714 || _cursor == NULL) {
715 warning("Failed to allocate survivor plab/chunk array");
716 if (_survivor_plab_array != NULL) {
717 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
718 _survivor_plab_array = NULL;
719 }
720 if (_survivor_chunk_array != NULL) {
721 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
722 _survivor_chunk_array = NULL;
723 }
724 if (_cursor != NULL) {
725 FREE_C_HEAP_ARRAY(size_t, _cursor);
726 _cursor = NULL;
727 }
728 } else {
729 _survivor_chunk_capacity = 2*max_plab_samples;
730 for (uint i = 0; i < ParallelGCThreads; i++) {
731 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
732 if (vec == NULL) {
733 warning("Failed to allocate survivor plab array");
734 for (int j = i; j > 0; j--) {
735 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
736 }
737 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
738 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
739 _survivor_plab_array = NULL;
740 _survivor_chunk_array = NULL;
741 _survivor_chunk_capacity = 0;
742 break;
743 } else {
744 ChunkArray* cur =
745 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
746 max_plab_samples);
747 assert(cur->end() == 0, "Should be 0");
748 assert(cur->array() == vec, "Should be vec");
749 assert(cur->capacity() == max_plab_samples, "Error");
750 }
751 }
752 }
753 }
754 assert( ( _survivor_plab_array != NULL
755 && _survivor_chunk_array != NULL)
756 || ( _survivor_chunk_capacity == 0
757 && _survivor_chunk_index == 0),
758 "Error");
760 // Choose what strong roots should be scanned depending on verification options
761 // and perm gen collection mode.
762 if (!CMSClassUnloadingEnabled) {
763 // If class unloading is disabled we want to include all classes into the root set.
764 add_root_scanning_option(SharedHeap::SO_AllClasses);
765 } else {
766 add_root_scanning_option(SharedHeap::SO_SystemClasses);
767 }
769 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
770 _gc_counters = new CollectorCounters("CMS", 1);
771 _completed_initialization = true;
772 _sweep_timer.start(); // start of time
773 }
775 const char* ConcurrentMarkSweepGeneration::name() const {
776 return "concurrent mark-sweep generation";
777 }
778 void ConcurrentMarkSweepGeneration::update_counters() {
779 if (UsePerfData) {
780 _space_counters->update_all();
781 _gen_counters->update_all();
782 }
783 }
785 // this is an optimized version of update_counters(). it takes the
786 // used value as a parameter rather than computing it.
787 //
788 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
789 if (UsePerfData) {
790 _space_counters->update_used(used);
791 _space_counters->update_capacity();
792 _gen_counters->update_all();
793 }
794 }
796 void ConcurrentMarkSweepGeneration::print() const {
797 Generation::print();
798 cmsSpace()->print();
799 }
801 #ifndef PRODUCT
802 void ConcurrentMarkSweepGeneration::print_statistics() {
803 cmsSpace()->printFLCensus(0);
804 }
805 #endif
807 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
808 GenCollectedHeap* gch = GenCollectedHeap::heap();
809 if (PrintGCDetails) {
810 if (Verbose) {
811 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
812 level(), short_name(), s, used(), capacity());
813 } else {
814 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
815 level(), short_name(), s, used() / K, capacity() / K);
816 }
817 }
818 if (Verbose) {
819 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
820 gch->used(), gch->capacity());
821 } else {
822 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
823 gch->used() / K, gch->capacity() / K);
824 }
825 }
827 size_t
828 ConcurrentMarkSweepGeneration::contiguous_available() const {
829 // dld proposes an improvement in precision here. If the committed
830 // part of the space ends in a free block we should add that to
831 // uncommitted size in the calculation below. Will make this
832 // change later, staying with the approximation below for the
833 // time being. -- ysr.
834 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
835 }
837 size_t
838 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
839 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
840 }
842 size_t ConcurrentMarkSweepGeneration::max_available() const {
843 return free() + _virtual_space.uncommitted_size();
844 }
846 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
847 size_t max_promotion_in_bytes,
848 bool younger_handles_promotion_failure) const {
850 // This is the most conservative test. Full promotion is
851 // guaranteed if this is used. The multiplicative factor is to
852 // account for the worst case "dilatation".
853 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
854 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
855 adjusted_max_promo_bytes = (double)max_uintx;
856 }
857 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
859 if (younger_handles_promotion_failure && !result) {
860 // Full promotion is not guaranteed because fragmentation
861 // of the cms generation can prevent the full promotion.
862 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
864 if (!result) {
865 // With promotion failure handling the test for the ability
866 // to support the promotion does not have to be guaranteed.
867 // Use an average of the amount promoted.
868 result = max_available() >= (size_t)
869 gc_stats()->avg_promoted()->padded_average();
870 if (PrintGC && Verbose && result) {
871 gclog_or_tty->print_cr(
872 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
873 " max_available: " SIZE_FORMAT
874 " avg_promoted: " SIZE_FORMAT,
875 max_available(), (size_t)
876 gc_stats()->avg_promoted()->padded_average());
877 }
878 } else {
879 if (PrintGC && Verbose) {
880 gclog_or_tty->print_cr(
881 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
882 " max_available: " SIZE_FORMAT
883 " adj_max_promo_bytes: " SIZE_FORMAT,
884 max_available(), (size_t)adjusted_max_promo_bytes);
885 }
886 }
887 } else {
888 if (PrintGC && Verbose) {
889 gclog_or_tty->print_cr(
890 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
891 " contiguous_available: " SIZE_FORMAT
892 " adj_max_promo_bytes: " SIZE_FORMAT,
893 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
894 }
895 }
896 return result;
897 }
899 CompactibleSpace*
900 ConcurrentMarkSweepGeneration::first_compaction_space() const {
901 return _cmsSpace;
902 }
904 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
905 // Clear the promotion information. These pointers can be adjusted
906 // along with all the other pointers into the heap but
907 // compaction is expected to be a rare event with
908 // a heap using cms so don't do it without seeing the need.
909 if (ParallelGCThreads > 0) {
910 for (uint i = 0; i < ParallelGCThreads; i++) {
911 _par_gc_thread_states[i]->promo.reset();
912 }
913 }
914 }
916 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
917 blk->do_space(_cmsSpace);
918 }
920 void ConcurrentMarkSweepGeneration::compute_new_size() {
921 assert_locked_or_safepoint(Heap_lock);
923 // If incremental collection failed, we just want to expand
924 // to the limit.
925 if (incremental_collection_failed()) {
926 clear_incremental_collection_failed();
927 grow_to_reserved();
928 return;
929 }
931 size_t expand_bytes = 0;
932 double free_percentage = ((double) free()) / capacity();
933 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
934 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
936 // compute expansion delta needed for reaching desired free percentage
937 if (free_percentage < desired_free_percentage) {
938 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
939 assert(desired_capacity >= capacity(), "invalid expansion size");
940 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
941 }
942 if (expand_bytes > 0) {
943 if (PrintGCDetails && Verbose) {
944 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
945 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
946 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
947 gclog_or_tty->print_cr(" Desired free fraction %f",
948 desired_free_percentage);
949 gclog_or_tty->print_cr(" Maximum free fraction %f",
950 maximum_free_percentage);
951 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
952 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
953 desired_capacity/1000);
954 int prev_level = level() - 1;
955 if (prev_level >= 0) {
956 size_t prev_size = 0;
957 GenCollectedHeap* gch = GenCollectedHeap::heap();
958 Generation* prev_gen = gch->_gens[prev_level];
959 prev_size = prev_gen->capacity();
960 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
961 prev_size/1000);
962 }
963 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
964 unsafe_max_alloc_nogc()/1000);
965 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
966 contiguous_available()/1000);
967 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
968 expand_bytes);
969 }
970 // safe if expansion fails
971 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
972 if (PrintGCDetails && Verbose) {
973 gclog_or_tty->print_cr(" Expanded free fraction %f",
974 ((double) free()) / capacity());
975 }
976 }
977 }
979 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
980 return cmsSpace()->freelistLock();
981 }
983 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
984 bool tlab) {
985 CMSSynchronousYieldRequest yr;
986 MutexLockerEx x(freelistLock(),
987 Mutex::_no_safepoint_check_flag);
988 return have_lock_and_allocate(size, tlab);
989 }
991 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
992 bool tlab) {
993 assert_lock_strong(freelistLock());
994 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
995 HeapWord* res = cmsSpace()->allocate(adjustedSize);
996 // Allocate the object live (grey) if the background collector has
997 // started marking. This is necessary because the marker may
998 // have passed this address and consequently this object will
999 // not otherwise be greyed and would be incorrectly swept up.
1000 // Note that if this object contains references, the writing
1001 // of those references will dirty the card containing this object
1002 // allowing the object to be blackened (and its references scanned)
1003 // either during a preclean phase or at the final checkpoint.
1004 if (res != NULL) {
1005 collector()->direct_allocated(res, adjustedSize);
1006 _direct_allocated_words += adjustedSize;
1007 // allocation counters
1008 NOT_PRODUCT(
1009 _numObjectsAllocated++;
1010 _numWordsAllocated += (int)adjustedSize;
1011 )
1012 }
1013 return res;
1014 }
1016 // In the case of direct allocation by mutators in a generation that
1017 // is being concurrently collected, the object must be allocated
1018 // live (grey) if the background collector has started marking.
1019 // This is necessary because the marker may
1020 // have passed this address and consequently this object will
1021 // not otherwise be greyed and would be incorrectly swept up.
1022 // Note that if this object contains references, the writing
1023 // of those references will dirty the card containing this object
1024 // allowing the object to be blackened (and its references scanned)
1025 // either during a preclean phase or at the final checkpoint.
1026 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1027 assert(_markBitMap.covers(start, size), "Out of bounds");
1028 if (_collectorState >= Marking) {
1029 MutexLockerEx y(_markBitMap.lock(),
1030 Mutex::_no_safepoint_check_flag);
1031 // [see comments preceding SweepClosure::do_blk() below for details]
1032 // 1. need to mark the object as live so it isn't collected
1033 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1034 // 3. need to mark the end of the object so sweeper can skip over it
1035 // if it's uninitialized when the sweeper reaches it.
1036 _markBitMap.mark(start); // object is live
1037 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1038 _markBitMap.mark(start + size - 1);
1039 // mark end of object
1040 }
1041 // check that oop looks uninitialized
1042 assert(oop(start)->klass() == NULL, "_klass should be NULL");
1043 }
1045 void CMSCollector::promoted(bool par, HeapWord* start,
1046 bool is_obj_array, size_t obj_size) {
1047 assert(_markBitMap.covers(start), "Out of bounds");
1048 // See comment in direct_allocated() about when objects should
1049 // be allocated live.
1050 if (_collectorState >= Marking) {
1051 // we already hold the marking bit map lock, taken in
1052 // the prologue
1053 if (par) {
1054 _markBitMap.par_mark(start);
1055 } else {
1056 _markBitMap.mark(start);
1057 }
1058 // We don't need to mark the object as uninitialized (as
1059 // in direct_allocated above) because this is being done with the
1060 // world stopped and the object will be initialized by the
1061 // time the sweeper gets to look at it.
1062 assert(SafepointSynchronize::is_at_safepoint(),
1063 "expect promotion only at safepoints");
1065 if (_collectorState < Sweeping) {
1066 // Mark the appropriate cards in the modUnionTable, so that
1067 // this object gets scanned before the sweep. If this is
1068 // not done, CMS generation references in the object might
1069 // not get marked.
1070 // For the case of arrays, which are otherwise precisely
1071 // marked, we need to dirty the entire array, not just its head.
1072 if (is_obj_array) {
1073 // The [par_]mark_range() method expects mr.end() below to
1074 // be aligned to the granularity of a bit's representation
1075 // in the heap. In the case of the MUT below, that's a
1076 // card size.
1077 MemRegion mr(start,
1078 (HeapWord*)round_to((intptr_t)(start + obj_size),
1079 CardTableModRefBS::card_size /* bytes */));
1080 if (par) {
1081 _modUnionTable.par_mark_range(mr);
1082 } else {
1083 _modUnionTable.mark_range(mr);
1084 }
1085 } else { // not an obj array; we can just mark the head
1086 if (par) {
1087 _modUnionTable.par_mark(start);
1088 } else {
1089 _modUnionTable.mark(start);
1090 }
1091 }
1092 }
1093 }
1094 }
1096 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1097 {
1098 size_t delta = pointer_delta(addr, space->bottom());
1099 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1100 }
1102 void CMSCollector::icms_update_allocation_limits()
1103 {
1104 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1105 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1107 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1108 if (CMSTraceIncrementalPacing) {
1109 stats().print();
1110 }
1112 assert(duty_cycle <= 100, "invalid duty cycle");
1113 if (duty_cycle != 0) {
1114 // The duty_cycle is a percentage between 0 and 100; convert to words and
1115 // then compute the offset from the endpoints of the space.
1116 size_t free_words = eden->free() / HeapWordSize;
1117 double free_words_dbl = (double)free_words;
1118 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1119 size_t offset_words = (free_words - duty_cycle_words) / 2;
1121 _icms_start_limit = eden->top() + offset_words;
1122 _icms_stop_limit = eden->end() - offset_words;
1124 // The limits may be adjusted (shifted to the right) by
1125 // CMSIncrementalOffset, to allow the application more mutator time after a
1126 // young gen gc (when all mutators were stopped) and before CMS starts and
1127 // takes away one or more cpus.
1128 if (CMSIncrementalOffset != 0) {
1129 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1130 size_t adjustment = (size_t)adjustment_dbl;
1131 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1132 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1133 _icms_start_limit += adjustment;
1134 _icms_stop_limit = tmp_stop;
1135 }
1136 }
1137 }
1138 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1139 _icms_start_limit = _icms_stop_limit = eden->end();
1140 }
1142 // Install the new start limit.
1143 eden->set_soft_end(_icms_start_limit);
1145 if (CMSTraceIncrementalMode) {
1146 gclog_or_tty->print(" icms alloc limits: "
1147 PTR_FORMAT "," PTR_FORMAT
1148 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1149 _icms_start_limit, _icms_stop_limit,
1150 percent_of_space(eden, _icms_start_limit),
1151 percent_of_space(eden, _icms_stop_limit));
1152 if (Verbose) {
1153 gclog_or_tty->print("eden: ");
1154 eden->print_on(gclog_or_tty);
1155 }
1156 }
1157 }
1159 // Any changes here should try to maintain the invariant
1160 // that if this method is called with _icms_start_limit
1161 // and _icms_stop_limit both NULL, then it should return NULL
1162 // and not notify the icms thread.
1163 HeapWord*
1164 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1165 size_t word_size)
1166 {
1167 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1168 // nop.
1169 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1170 if (top <= _icms_start_limit) {
1171 if (CMSTraceIncrementalMode) {
1172 space->print_on(gclog_or_tty);
1173 gclog_or_tty->stamp();
1174 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1175 ", new limit=" PTR_FORMAT
1176 " (" SIZE_FORMAT "%%)",
1177 top, _icms_stop_limit,
1178 percent_of_space(space, _icms_stop_limit));
1179 }
1180 ConcurrentMarkSweepThread::start_icms();
1181 assert(top < _icms_stop_limit, "Tautology");
1182 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1183 return _icms_stop_limit;
1184 }
1186 // The allocation will cross both the _start and _stop limits, so do the
1187 // stop notification also and return end().
1188 if (CMSTraceIncrementalMode) {
1189 space->print_on(gclog_or_tty);
1190 gclog_or_tty->stamp();
1191 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1192 ", new limit=" PTR_FORMAT
1193 " (" SIZE_FORMAT "%%)",
1194 top, space->end(),
1195 percent_of_space(space, space->end()));
1196 }
1197 ConcurrentMarkSweepThread::stop_icms();
1198 return space->end();
1199 }
1201 if (top <= _icms_stop_limit) {
1202 if (CMSTraceIncrementalMode) {
1203 space->print_on(gclog_or_tty);
1204 gclog_or_tty->stamp();
1205 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1206 ", new limit=" PTR_FORMAT
1207 " (" SIZE_FORMAT "%%)",
1208 top, space->end(),
1209 percent_of_space(space, space->end()));
1210 }
1211 ConcurrentMarkSweepThread::stop_icms();
1212 return space->end();
1213 }
1215 if (CMSTraceIncrementalMode) {
1216 space->print_on(gclog_or_tty);
1217 gclog_or_tty->stamp();
1218 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1219 ", new limit=" PTR_FORMAT,
1220 top, NULL);
1221 }
1222 }
1224 return NULL;
1225 }
1227 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1228 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1229 // allocate, copy and if necessary update promoinfo --
1230 // delegate to underlying space.
1231 assert_lock_strong(freelistLock());
1233 #ifndef PRODUCT
1234 if (Universe::heap()->promotion_should_fail()) {
1235 return NULL;
1236 }
1237 #endif // #ifndef PRODUCT
1239 oop res = _cmsSpace->promote(obj, obj_size);
1240 if (res == NULL) {
1241 // expand and retry
1242 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1243 expand(s*HeapWordSize, MinHeapDeltaBytes,
1244 CMSExpansionCause::_satisfy_promotion);
1245 // Since there's currently no next generation, we don't try to promote
1246 // into a more senior generation.
1247 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1248 "is made to pass on a possibly failing "
1249 "promotion to next generation");
1250 res = _cmsSpace->promote(obj, obj_size);
1251 }
1252 if (res != NULL) {
1253 // See comment in allocate() about when objects should
1254 // be allocated live.
1255 assert(obj->is_oop(), "Will dereference klass pointer below");
1256 collector()->promoted(false, // Not parallel
1257 (HeapWord*)res, obj->is_objArray(), obj_size);
1258 // promotion counters
1259 NOT_PRODUCT(
1260 _numObjectsPromoted++;
1261 _numWordsPromoted +=
1262 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1263 )
1264 }
1265 return res;
1266 }
1269 HeapWord*
1270 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1271 HeapWord* top,
1272 size_t word_sz)
1273 {
1274 return collector()->allocation_limit_reached(space, top, word_sz);
1275 }
1277 // Things to support parallel young-gen collection.
1278 oop
1279 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1280 oop old, markOop m,
1281 size_t word_sz) {
1282 #ifndef PRODUCT
1283 if (Universe::heap()->promotion_should_fail()) {
1284 return NULL;
1285 }
1286 #endif // #ifndef PRODUCT
1288 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1289 PromotionInfo* promoInfo = &ps->promo;
1290 // if we are tracking promotions, then first ensure space for
1291 // promotion (including spooling space for saving header if necessary).
1292 // then allocate and copy, then track promoted info if needed.
1293 // When tracking (see PromotionInfo::track()), the mark word may
1294 // be displaced and in this case restoration of the mark word
1295 // occurs in the (oop_since_save_marks_)iterate phase.
1296 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1297 // Out of space for allocating spooling buffers;
1298 // try expanding and allocating spooling buffers.
1299 if (!expand_and_ensure_spooling_space(promoInfo)) {
1300 return NULL;
1301 }
1302 }
1303 assert(promoInfo->has_spooling_space(), "Control point invariant");
1304 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1305 if (obj_ptr == NULL) {
1306 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1307 if (obj_ptr == NULL) {
1308 return NULL;
1309 }
1310 }
1311 oop obj = oop(obj_ptr);
1312 assert(obj->klass() == NULL, "Object should be uninitialized here.");
1313 // Otherwise, copy the object. Here we must be careful to insert the
1314 // klass pointer last, since this marks the block as an allocated object.
1315 HeapWord* old_ptr = (HeapWord*)old;
1316 if (word_sz > (size_t)oopDesc::header_size()) {
1317 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1318 obj_ptr + oopDesc::header_size(),
1319 word_sz - oopDesc::header_size());
1320 }
1321 // Restore the mark word copied above.
1322 obj->set_mark(m);
1323 // Now we can track the promoted object, if necessary. We take care
1324 // To delay the transition from uninitialized to full object
1325 // (i.e., insertion of klass pointer) until after, so that it
1326 // atomically becomes a promoted object.
1327 if (promoInfo->tracking()) {
1328 promoInfo->track((PromotedObject*)obj, old->klass());
1329 }
1330 // Finally, install the klass pointer.
1331 obj->set_klass(old->klass());
1333 assert(old->is_oop(), "Will dereference klass ptr below");
1334 collector()->promoted(true, // parallel
1335 obj_ptr, old->is_objArray(), word_sz);
1337 NOT_PRODUCT(
1338 Atomic::inc(&_numObjectsPromoted);
1339 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1340 &_numWordsPromoted);
1341 )
1343 return obj;
1344 }
1346 void
1347 ConcurrentMarkSweepGeneration::
1348 par_promote_alloc_undo(int thread_num,
1349 HeapWord* obj, size_t word_sz) {
1350 // CMS does not support promotion undo.
1351 ShouldNotReachHere();
1352 }
1354 void
1355 ConcurrentMarkSweepGeneration::
1356 par_promote_alloc_done(int thread_num) {
1357 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1358 ps->lab.retire();
1359 #if CFLS_LAB_REFILL_STATS
1360 if (thread_num == 0) {
1361 _cmsSpace->print_par_alloc_stats();
1362 }
1363 #endif
1364 }
1366 void
1367 ConcurrentMarkSweepGeneration::
1368 par_oop_since_save_marks_iterate_done(int thread_num) {
1369 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1370 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1371 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1372 }
1374 // XXXPERM
1375 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1376 size_t size,
1377 bool tlab)
1378 {
1379 // We allow a STW collection only if a full
1380 // collection was requested.
1381 return full || should_allocate(size, tlab); // FIX ME !!!
1382 // This and promotion failure handling are connected at the
1383 // hip and should be fixed by untying them.
1384 }
1386 bool CMSCollector::shouldConcurrentCollect() {
1387 if (_full_gc_requested) {
1388 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1389 if (Verbose && PrintGCDetails) {
1390 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1391 " gc request");
1392 }
1393 return true;
1394 }
1396 // For debugging purposes, change the type of collection.
1397 // If the rotation is not on the concurrent collection
1398 // type, don't start a concurrent collection.
1399 NOT_PRODUCT(
1400 if (RotateCMSCollectionTypes &&
1401 (_cmsGen->debug_collection_type() !=
1402 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1403 assert(_cmsGen->debug_collection_type() !=
1404 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1405 "Bad cms collection type");
1406 return false;
1407 }
1408 )
1410 FreelistLocker x(this);
1411 // ------------------------------------------------------------------
1412 // Print out lots of information which affects the initiation of
1413 // a collection.
1414 if (PrintCMSInitiationStatistics && stats().valid()) {
1415 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1416 gclog_or_tty->stamp();
1417 gclog_or_tty->print_cr("");
1418 stats().print_on(gclog_or_tty);
1419 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1420 stats().time_until_cms_gen_full());
1421 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1422 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1423 _cmsGen->contiguous_available());
1424 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1425 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1426 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1427 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1428 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1429 }
1430 // ------------------------------------------------------------------
1432 // If the estimated time to complete a cms collection (cms_duration())
1433 // is less than the estimated time remaining until the cms generation
1434 // is full, start a collection.
1435 if (!UseCMSInitiatingOccupancyOnly) {
1436 if (stats().valid()) {
1437 if (stats().time_until_cms_start() == 0.0) {
1438 return true;
1439 }
1440 } else {
1441 // We want to conservatively collect somewhat early in order
1442 // to try and "bootstrap" our CMS/promotion statistics;
1443 // this branch will not fire after the first successful CMS
1444 // collection because the stats should then be valid.
1445 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1446 if (Verbose && PrintGCDetails) {
1447 gclog_or_tty->print_cr(
1448 " CMSCollector: collect for bootstrapping statistics:"
1449 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1450 _bootstrap_occupancy);
1451 }
1452 return true;
1453 }
1454 }
1455 }
1457 // Otherwise, we start a collection cycle if either the perm gen or
1458 // old gen want a collection cycle started. Each may use
1459 // an appropriate criterion for making this decision.
1460 // XXX We need to make sure that the gen expansion
1461 // criterion dovetails well with this. XXX NEED TO FIX THIS
1462 if (_cmsGen->should_concurrent_collect()) {
1463 if (Verbose && PrintGCDetails) {
1464 gclog_or_tty->print_cr("CMS old gen initiated");
1465 }
1466 return true;
1467 }
1469 // We start a collection if we believe an incremental collection may fail;
1470 // this is not likely to be productive in practice because it's probably too
1471 // late anyway.
1472 GenCollectedHeap* gch = GenCollectedHeap::heap();
1473 assert(gch->collector_policy()->is_two_generation_policy(),
1474 "You may want to check the correctness of the following");
1475 if (gch->incremental_collection_will_fail()) {
1476 if (PrintGCDetails && Verbose) {
1477 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1478 }
1479 return true;
1480 }
1482 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1483 bool res = update_should_unload_classes();
1484 if (res) {
1485 if (Verbose && PrintGCDetails) {
1486 gclog_or_tty->print_cr("CMS perm gen initiated");
1487 }
1488 return true;
1489 }
1490 }
1491 return false;
1492 }
1494 // Clear _expansion_cause fields of constituent generations
1495 void CMSCollector::clear_expansion_cause() {
1496 _cmsGen->clear_expansion_cause();
1497 _permGen->clear_expansion_cause();
1498 }
1500 // We should be conservative in starting a collection cycle. To
1501 // start too eagerly runs the risk of collecting too often in the
1502 // extreme. To collect too rarely falls back on full collections,
1503 // which works, even if not optimum in terms of concurrent work.
1504 // As a work around for too eagerly collecting, use the flag
1505 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1506 // giving the user an easily understandable way of controlling the
1507 // collections.
1508 // We want to start a new collection cycle if any of the following
1509 // conditions hold:
1510 // . our current occupancy exceeds the configured initiating occupancy
1511 // for this generation, or
1512 // . we recently needed to expand this space and have not, since that
1513 // expansion, done a collection of this generation, or
1514 // . the underlying space believes that it may be a good idea to initiate
1515 // a concurrent collection (this may be based on criteria such as the
1516 // following: the space uses linear allocation and linear allocation is
1517 // going to fail, or there is believed to be excessive fragmentation in
1518 // the generation, etc... or ...
1519 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1520 // the case of the old generation, not the perm generation; see CR 6543076):
1521 // we may be approaching a point at which allocation requests may fail because
1522 // we will be out of sufficient free space given allocation rate estimates.]
1523 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1525 assert_lock_strong(freelistLock());
1526 if (occupancy() > initiating_occupancy()) {
1527 if (PrintGCDetails && Verbose) {
1528 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1529 short_name(), occupancy(), initiating_occupancy());
1530 }
1531 return true;
1532 }
1533 if (UseCMSInitiatingOccupancyOnly) {
1534 return false;
1535 }
1536 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1537 if (PrintGCDetails && Verbose) {
1538 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1539 short_name());
1540 }
1541 return true;
1542 }
1543 if (_cmsSpace->should_concurrent_collect()) {
1544 if (PrintGCDetails && Verbose) {
1545 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1546 short_name());
1547 }
1548 return true;
1549 }
1550 return false;
1551 }
1553 void ConcurrentMarkSweepGeneration::collect(bool full,
1554 bool clear_all_soft_refs,
1555 size_t size,
1556 bool tlab)
1557 {
1558 collector()->collect(full, clear_all_soft_refs, size, tlab);
1559 }
1561 void CMSCollector::collect(bool full,
1562 bool clear_all_soft_refs,
1563 size_t size,
1564 bool tlab)
1565 {
1566 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1567 // For debugging purposes skip the collection if the state
1568 // is not currently idle
1569 if (TraceCMSState) {
1570 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1571 Thread::current(), full, _collectorState);
1572 }
1573 return;
1574 }
1576 // The following "if" branch is present for defensive reasons.
1577 // In the current uses of this interface, it can be replaced with:
1578 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1579 // But I am not placing that assert here to allow future
1580 // generality in invoking this interface.
1581 if (GC_locker::is_active()) {
1582 // A consistency test for GC_locker
1583 assert(GC_locker::needs_gc(), "Should have been set already");
1584 // Skip this foreground collection, instead
1585 // expanding the heap if necessary.
1586 // Need the free list locks for the call to free() in compute_new_size()
1587 compute_new_size();
1588 return;
1589 }
1590 acquire_control_and_collect(full, clear_all_soft_refs);
1591 _full_gcs_since_conc_gc++;
1593 }
1595 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1596 GenCollectedHeap* gch = GenCollectedHeap::heap();
1597 unsigned int gc_count = gch->total_full_collections();
1598 if (gc_count == full_gc_count) {
1599 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1600 _full_gc_requested = true;
1601 CGC_lock->notify(); // nudge CMS thread
1602 }
1603 }
1606 // The foreground and background collectors need to coordinate in order
1607 // to make sure that they do not mutually interfere with CMS collections.
1608 // When a background collection is active,
1609 // the foreground collector may need to take over (preempt) and
1610 // synchronously complete an ongoing collection. Depending on the
1611 // frequency of the background collections and the heap usage
1612 // of the application, this preemption can be seldom or frequent.
1613 // There are only certain
1614 // points in the background collection that the "collection-baton"
1615 // can be passed to the foreground collector.
1616 //
1617 // The foreground collector will wait for the baton before
1618 // starting any part of the collection. The foreground collector
1619 // will only wait at one location.
1620 //
1621 // The background collector will yield the baton before starting a new
1622 // phase of the collection (e.g., before initial marking, marking from roots,
1623 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1624 // of the loop which switches the phases. The background collector does some
1625 // of the phases (initial mark, final re-mark) with the world stopped.
1626 // Because of locking involved in stopping the world,
1627 // the foreground collector should not block waiting for the background
1628 // collector when it is doing a stop-the-world phase. The background
1629 // collector will yield the baton at an additional point just before
1630 // it enters a stop-the-world phase. Once the world is stopped, the
1631 // background collector checks the phase of the collection. If the
1632 // phase has not changed, it proceeds with the collection. If the
1633 // phase has changed, it skips that phase of the collection. See
1634 // the comments on the use of the Heap_lock in collect_in_background().
1635 //
1636 // Variable used in baton passing.
1637 // _foregroundGCIsActive - Set to true by the foreground collector when
1638 // it wants the baton. The foreground clears it when it has finished
1639 // the collection.
1640 // _foregroundGCShouldWait - Set to true by the background collector
1641 // when it is running. The foreground collector waits while
1642 // _foregroundGCShouldWait is true.
1643 // CGC_lock - monitor used to protect access to the above variables
1644 // and to notify the foreground and background collectors.
1645 // _collectorState - current state of the CMS collection.
1646 //
1647 // The foreground collector
1648 // acquires the CGC_lock
1649 // sets _foregroundGCIsActive
1650 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1651 // various locks acquired in preparation for the collection
1652 // are released so as not to block the background collector
1653 // that is in the midst of a collection
1654 // proceeds with the collection
1655 // clears _foregroundGCIsActive
1656 // returns
1657 //
1658 // The background collector in a loop iterating on the phases of the
1659 // collection
1660 // acquires the CGC_lock
1661 // sets _foregroundGCShouldWait
1662 // if _foregroundGCIsActive is set
1663 // clears _foregroundGCShouldWait, notifies _CGC_lock
1664 // waits on _CGC_lock for _foregroundGCIsActive to become false
1665 // and exits the loop.
1666 // otherwise
1667 // proceed with that phase of the collection
1668 // if the phase is a stop-the-world phase,
1669 // yield the baton once more just before enqueueing
1670 // the stop-world CMS operation (executed by the VM thread).
1671 // returns after all phases of the collection are done
1672 //
1674 void CMSCollector::acquire_control_and_collect(bool full,
1675 bool clear_all_soft_refs) {
1676 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1677 assert(!Thread::current()->is_ConcurrentGC_thread(),
1678 "shouldn't try to acquire control from self!");
1680 // Start the protocol for acquiring control of the
1681 // collection from the background collector (aka CMS thread).
1682 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1683 "VM thread should have CMS token");
1684 // Remember the possibly interrupted state of an ongoing
1685 // concurrent collection
1686 CollectorState first_state = _collectorState;
1688 // Signal to a possibly ongoing concurrent collection that
1689 // we want to do a foreground collection.
1690 _foregroundGCIsActive = true;
1692 // Disable incremental mode during a foreground collection.
1693 ICMSDisabler icms_disabler;
1695 // release locks and wait for a notify from the background collector
1696 // releasing the locks in only necessary for phases which
1697 // do yields to improve the granularity of the collection.
1698 assert_lock_strong(bitMapLock());
1699 // We need to lock the Free list lock for the space that we are
1700 // currently collecting.
1701 assert(haveFreelistLocks(), "Must be holding free list locks");
1702 bitMapLock()->unlock();
1703 releaseFreelistLocks();
1704 {
1705 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1706 if (_foregroundGCShouldWait) {
1707 // We are going to be waiting for action for the CMS thread;
1708 // it had better not be gone (for instance at shutdown)!
1709 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1710 "CMS thread must be running");
1711 // Wait here until the background collector gives us the go-ahead
1712 ConcurrentMarkSweepThread::clear_CMS_flag(
1713 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1714 // Get a possibly blocked CMS thread going:
1715 // Note that we set _foregroundGCIsActive true above,
1716 // without protection of the CGC_lock.
1717 CGC_lock->notify();
1718 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1719 "Possible deadlock");
1720 while (_foregroundGCShouldWait) {
1721 // wait for notification
1722 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1723 // Possibility of delay/starvation here, since CMS token does
1724 // not know to give priority to VM thread? Actually, i think
1725 // there wouldn't be any delay/starvation, but the proof of
1726 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1727 }
1728 ConcurrentMarkSweepThread::set_CMS_flag(
1729 ConcurrentMarkSweepThread::CMS_vm_has_token);
1730 }
1731 }
1732 // The CMS_token is already held. Get back the other locks.
1733 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1734 "VM thread should have CMS token");
1735 getFreelistLocks();
1736 bitMapLock()->lock_without_safepoint_check();
1737 if (TraceCMSState) {
1738 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1739 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1740 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1741 }
1743 // Check if we need to do a compaction, or if not, whether
1744 // we need to start the mark-sweep from scratch.
1745 bool should_compact = false;
1746 bool should_start_over = false;
1747 decide_foreground_collection_type(clear_all_soft_refs,
1748 &should_compact, &should_start_over);
1750 NOT_PRODUCT(
1751 if (RotateCMSCollectionTypes) {
1752 if (_cmsGen->debug_collection_type() ==
1753 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1754 should_compact = true;
1755 } else if (_cmsGen->debug_collection_type() ==
1756 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1757 should_compact = false;
1758 }
1759 }
1760 )
1762 if (PrintGCDetails && first_state > Idling) {
1763 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1764 if (GCCause::is_user_requested_gc(cause) ||
1765 GCCause::is_serviceability_requested_gc(cause)) {
1766 gclog_or_tty->print(" (concurrent mode interrupted)");
1767 } else {
1768 gclog_or_tty->print(" (concurrent mode failure)");
1769 }
1770 }
1772 if (should_compact) {
1773 // If the collection is being acquired from the background
1774 // collector, there may be references on the discovered
1775 // references lists that have NULL referents (being those
1776 // that were concurrently cleared by a mutator) or
1777 // that are no longer active (having been enqueued concurrently
1778 // by the mutator).
1779 // Scrub the list of those references because Mark-Sweep-Compact
1780 // code assumes referents are not NULL and that all discovered
1781 // Reference objects are active.
1782 ref_processor()->clean_up_discovered_references();
1784 do_compaction_work(clear_all_soft_refs);
1786 // Has the GC time limit been exceeded?
1787 check_gc_time_limit();
1789 } else {
1790 do_mark_sweep_work(clear_all_soft_refs, first_state,
1791 should_start_over);
1792 }
1793 // Reset the expansion cause, now that we just completed
1794 // a collection cycle.
1795 clear_expansion_cause();
1796 _foregroundGCIsActive = false;
1797 return;
1798 }
1800 void CMSCollector::check_gc_time_limit() {
1802 // Ignore explicit GC's. Exiting here does not set the flag and
1803 // does not reset the count. Updating of the averages for system
1804 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1805 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1806 if (GCCause::is_user_requested_gc(gc_cause) ||
1807 GCCause::is_serviceability_requested_gc(gc_cause)) {
1808 return;
1809 }
1811 // Calculate the fraction of the CMS generation was freed during
1812 // the last collection.
1813 // Only consider the STW compacting cost for now.
1814 //
1815 // Note that the gc time limit test only works for the collections
1816 // of the young gen + tenured gen and not for collections of the
1817 // permanent gen. That is because the calculation of the space
1818 // freed by the collection is the free space in the young gen +
1819 // tenured gen.
1821 double fraction_free =
1822 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1823 if ((100.0 * size_policy()->compacting_gc_cost()) >
1824 ((double) GCTimeLimit) &&
1825 ((fraction_free * 100) < GCHeapFreeLimit)) {
1826 size_policy()->inc_gc_time_limit_count();
1827 if (UseGCOverheadLimit &&
1828 (size_policy()->gc_time_limit_count() >
1829 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1830 size_policy()->set_gc_time_limit_exceeded(true);
1831 // Avoid consecutive OOM due to the gc time limit by resetting
1832 // the counter.
1833 size_policy()->reset_gc_time_limit_count();
1834 if (PrintGCDetails) {
1835 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1836 "of %d%%", GCTimeLimit);
1837 }
1838 } else {
1839 if (PrintGCDetails) {
1840 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1841 "of %d%%", GCTimeLimit);
1842 }
1843 }
1844 } else {
1845 size_policy()->reset_gc_time_limit_count();
1846 }
1847 }
1849 // Resize the perm generation and the tenured generation
1850 // after obtaining the free list locks for the
1851 // two generations.
1852 void CMSCollector::compute_new_size() {
1853 assert_locked_or_safepoint(Heap_lock);
1854 FreelistLocker z(this);
1855 _permGen->compute_new_size();
1856 _cmsGen->compute_new_size();
1857 }
1859 // A work method used by foreground collection to determine
1860 // what type of collection (compacting or not, continuing or fresh)
1861 // it should do.
1862 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1863 // and CMSCompactWhenClearAllSoftRefs the default in the future
1864 // and do away with the flags after a suitable period.
1865 void CMSCollector::decide_foreground_collection_type(
1866 bool clear_all_soft_refs, bool* should_compact,
1867 bool* should_start_over) {
1868 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1869 // flag is set, and we have either requested a System.gc() or
1870 // the number of full gc's since the last concurrent cycle
1871 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1872 // or if an incremental collection has failed
1873 GenCollectedHeap* gch = GenCollectedHeap::heap();
1874 assert(gch->collector_policy()->is_two_generation_policy(),
1875 "You may want to check the correctness of the following");
1876 // Inform cms gen if this was due to partial collection failing.
1877 // The CMS gen may use this fact to determine its expansion policy.
1878 if (gch->incremental_collection_will_fail()) {
1879 assert(!_cmsGen->incremental_collection_failed(),
1880 "Should have been noticed, reacted to and cleared");
1881 _cmsGen->set_incremental_collection_failed();
1882 }
1883 *should_compact =
1884 UseCMSCompactAtFullCollection &&
1885 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1886 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1887 gch->incremental_collection_will_fail());
1888 *should_start_over = false;
1889 if (clear_all_soft_refs && !*should_compact) {
1890 // We are about to do a last ditch collection attempt
1891 // so it would normally make sense to do a compaction
1892 // to reclaim as much space as possible.
1893 if (CMSCompactWhenClearAllSoftRefs) {
1894 // Default: The rationale is that in this case either
1895 // we are past the final marking phase, in which case
1896 // we'd have to start over, or so little has been done
1897 // that there's little point in saving that work. Compaction
1898 // appears to be the sensible choice in either case.
1899 *should_compact = true;
1900 } else {
1901 // We have been asked to clear all soft refs, but not to
1902 // compact. Make sure that we aren't past the final checkpoint
1903 // phase, for that is where we process soft refs. If we are already
1904 // past that phase, we'll need to redo the refs discovery phase and
1905 // if necessary clear soft refs that weren't previously
1906 // cleared. We do so by remembering the phase in which
1907 // we came in, and if we are past the refs processing
1908 // phase, we'll choose to just redo the mark-sweep
1909 // collection from scratch.
1910 if (_collectorState > FinalMarking) {
1911 // We are past the refs processing phase;
1912 // start over and do a fresh synchronous CMS cycle
1913 _collectorState = Resetting; // skip to reset to start new cycle
1914 reset(false /* == !asynch */);
1915 *should_start_over = true;
1916 } // else we can continue a possibly ongoing current cycle
1917 }
1918 }
1919 }
1921 // A work method used by the foreground collector to do
1922 // a mark-sweep-compact.
1923 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1924 GenCollectedHeap* gch = GenCollectedHeap::heap();
1925 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1926 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1927 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1928 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1929 }
1931 // Sample collection interval time and reset for collection pause.
1932 if (UseAdaptiveSizePolicy) {
1933 size_policy()->msc_collection_begin();
1934 }
1936 // Temporarily widen the span of the weak reference processing to
1937 // the entire heap.
1938 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1939 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1941 // Temporarily, clear the "is_alive_non_header" field of the
1942 // reference processor.
1943 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1945 // Temporarily make reference _processing_ single threaded (non-MT).
1946 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1948 // Temporarily make refs discovery atomic
1949 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1951 ref_processor()->set_enqueuing_is_done(false);
1952 ref_processor()->enable_discovery();
1953 // If an asynchronous collection finishes, the _modUnionTable is
1954 // all clear. If we are assuming the collection from an asynchronous
1955 // collection, clear the _modUnionTable.
1956 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1957 "_modUnionTable should be clear if the baton was not passed");
1958 _modUnionTable.clear_all();
1960 // We must adjust the allocation statistics being maintained
1961 // in the free list space. We do so by reading and clearing
1962 // the sweep timer and updating the block flux rate estimates below.
1963 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1964 _sweep_timer.stop();
1965 // Note that we do not use this sample to update the _sweep_estimate.
1966 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1967 _sweep_estimate.padded_average());
1969 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1970 ref_processor(), clear_all_soft_refs);
1971 #ifdef ASSERT
1972 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1973 size_t free_size = cms_space->free();
1974 assert(free_size ==
1975 pointer_delta(cms_space->end(), cms_space->compaction_top())
1976 * HeapWordSize,
1977 "All the free space should be compacted into one chunk at top");
1978 assert(cms_space->dictionary()->totalChunkSize(
1979 debug_only(cms_space->freelistLock())) == 0 ||
1980 cms_space->totalSizeInIndexedFreeLists() == 0,
1981 "All the free space should be in a single chunk");
1982 size_t num = cms_space->totalCount();
1983 assert((free_size == 0 && num == 0) ||
1984 (free_size > 0 && (num == 1 || num == 2)),
1985 "There should be at most 2 free chunks after compaction");
1986 #endif // ASSERT
1987 _collectorState = Resetting;
1988 assert(_restart_addr == NULL,
1989 "Should have been NULL'd before baton was passed");
1990 reset(false /* == !asynch */);
1991 _cmsGen->reset_after_compaction();
1992 _concurrent_cycles_since_last_unload = 0;
1994 if (verifying() && !should_unload_classes()) {
1995 perm_gen_verify_bit_map()->clear_all();
1996 }
1998 // Clear any data recorded in the PLAB chunk arrays.
1999 if (_survivor_plab_array != NULL) {
2000 reset_survivor_plab_arrays();
2001 }
2003 // Adjust the per-size allocation stats for the next epoch.
2004 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
2005 // Restart the "sweep timer" for next epoch.
2006 _sweep_timer.reset();
2007 _sweep_timer.start();
2009 // Sample collection pause time and reset for collection interval.
2010 if (UseAdaptiveSizePolicy) {
2011 size_policy()->msc_collection_end(gch->gc_cause());
2012 }
2014 // For a mark-sweep-compact, compute_new_size() will be called
2015 // in the heap's do_collection() method.
2016 }
2018 // A work method used by the foreground collector to do
2019 // a mark-sweep, after taking over from a possibly on-going
2020 // concurrent mark-sweep collection.
2021 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2022 CollectorState first_state, bool should_start_over) {
2023 if (PrintGC && Verbose) {
2024 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2025 "collector with count %d",
2026 _full_gcs_since_conc_gc);
2027 }
2028 switch (_collectorState) {
2029 case Idling:
2030 if (first_state == Idling || should_start_over) {
2031 // The background GC was not active, or should
2032 // restarted from scratch; start the cycle.
2033 _collectorState = InitialMarking;
2034 }
2035 // If first_state was not Idling, then a background GC
2036 // was in progress and has now finished. No need to do it
2037 // again. Leave the state as Idling.
2038 break;
2039 case Precleaning:
2040 // In the foreground case don't do the precleaning since
2041 // it is not done concurrently and there is extra work
2042 // required.
2043 _collectorState = FinalMarking;
2044 }
2045 if (PrintGCDetails &&
2046 (_collectorState > Idling ||
2047 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2048 gclog_or_tty->print(" (concurrent mode failure)");
2049 }
2050 collect_in_foreground(clear_all_soft_refs);
2052 // For a mark-sweep, compute_new_size() will be called
2053 // in the heap's do_collection() method.
2054 }
2057 void CMSCollector::getFreelistLocks() const {
2058 // Get locks for all free lists in all generations that this
2059 // collector is responsible for
2060 _cmsGen->freelistLock()->lock_without_safepoint_check();
2061 _permGen->freelistLock()->lock_without_safepoint_check();
2062 }
2064 void CMSCollector::releaseFreelistLocks() const {
2065 // Release locks for all free lists in all generations that this
2066 // collector is responsible for
2067 _cmsGen->freelistLock()->unlock();
2068 _permGen->freelistLock()->unlock();
2069 }
2071 bool CMSCollector::haveFreelistLocks() const {
2072 // Check locks for all free lists in all generations that this
2073 // collector is responsible for
2074 assert_lock_strong(_cmsGen->freelistLock());
2075 assert_lock_strong(_permGen->freelistLock());
2076 PRODUCT_ONLY(ShouldNotReachHere());
2077 return true;
2078 }
2080 // A utility class that is used by the CMS collector to
2081 // temporarily "release" the foreground collector from its
2082 // usual obligation to wait for the background collector to
2083 // complete an ongoing phase before proceeding.
2084 class ReleaseForegroundGC: public StackObj {
2085 private:
2086 CMSCollector* _c;
2087 public:
2088 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2089 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2090 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2091 // allow a potentially blocked foreground collector to proceed
2092 _c->_foregroundGCShouldWait = false;
2093 if (_c->_foregroundGCIsActive) {
2094 CGC_lock->notify();
2095 }
2096 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2097 "Possible deadlock");
2098 }
2100 ~ReleaseForegroundGC() {
2101 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2102 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2103 _c->_foregroundGCShouldWait = true;
2104 }
2105 };
2107 // There are separate collect_in_background and collect_in_foreground because of
2108 // the different locking requirements of the background collector and the
2109 // foreground collector. There was originally an attempt to share
2110 // one "collect" method between the background collector and the foreground
2111 // collector but the if-then-else required made it cleaner to have
2112 // separate methods.
2113 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2114 assert(Thread::current()->is_ConcurrentGC_thread(),
2115 "A CMS asynchronous collection is only allowed on a CMS thread.");
2117 GenCollectedHeap* gch = GenCollectedHeap::heap();
2118 {
2119 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2120 MutexLockerEx hl(Heap_lock, safepoint_check);
2121 FreelistLocker fll(this);
2122 MutexLockerEx x(CGC_lock, safepoint_check);
2123 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2124 // The foreground collector is active or we're
2125 // not using asynchronous collections. Skip this
2126 // background collection.
2127 assert(!_foregroundGCShouldWait, "Should be clear");
2128 return;
2129 } else {
2130 assert(_collectorState == Idling, "Should be idling before start.");
2131 _collectorState = InitialMarking;
2132 // Reset the expansion cause, now that we are about to begin
2133 // a new cycle.
2134 clear_expansion_cause();
2135 }
2136 // Decide if we want to enable class unloading as part of the
2137 // ensuing concurrent GC cycle.
2138 update_should_unload_classes();
2139 _full_gc_requested = false; // acks all outstanding full gc requests
2140 // Signal that we are about to start a collection
2141 gch->increment_total_full_collections(); // ... starting a collection cycle
2142 _collection_count_start = gch->total_full_collections();
2143 }
2145 // Used for PrintGC
2146 size_t prev_used;
2147 if (PrintGC && Verbose) {
2148 prev_used = _cmsGen->used(); // XXXPERM
2149 }
2151 // The change of the collection state is normally done at this level;
2152 // the exceptions are phases that are executed while the world is
2153 // stopped. For those phases the change of state is done while the
2154 // world is stopped. For baton passing purposes this allows the
2155 // background collector to finish the phase and change state atomically.
2156 // The foreground collector cannot wait on a phase that is done
2157 // while the world is stopped because the foreground collector already
2158 // has the world stopped and would deadlock.
2159 while (_collectorState != Idling) {
2160 if (TraceCMSState) {
2161 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2162 Thread::current(), _collectorState);
2163 }
2164 // The foreground collector
2165 // holds the Heap_lock throughout its collection.
2166 // holds the CMS token (but not the lock)
2167 // except while it is waiting for the background collector to yield.
2168 //
2169 // The foreground collector should be blocked (not for long)
2170 // if the background collector is about to start a phase
2171 // executed with world stopped. If the background
2172 // collector has already started such a phase, the
2173 // foreground collector is blocked waiting for the
2174 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2175 // are executed in the VM thread.
2176 //
2177 // The locking order is
2178 // PendingListLock (PLL) -- if applicable (FinalMarking)
2179 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2180 // CMS token (claimed in
2181 // stop_world_and_do() -->
2182 // safepoint_synchronize() -->
2183 // CMSThread::synchronize())
2185 {
2186 // Check if the FG collector wants us to yield.
2187 CMSTokenSync x(true); // is cms thread
2188 if (waitForForegroundGC()) {
2189 // We yielded to a foreground GC, nothing more to be
2190 // done this round.
2191 assert(_foregroundGCShouldWait == false, "We set it to false in "
2192 "waitForForegroundGC()");
2193 if (TraceCMSState) {
2194 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2195 " exiting collection CMS state %d",
2196 Thread::current(), _collectorState);
2197 }
2198 return;
2199 } else {
2200 // The background collector can run but check to see if the
2201 // foreground collector has done a collection while the
2202 // background collector was waiting to get the CGC_lock
2203 // above. If yes, break so that _foregroundGCShouldWait
2204 // is cleared before returning.
2205 if (_collectorState == Idling) {
2206 break;
2207 }
2208 }
2209 }
2211 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2212 "should be waiting");
2214 switch (_collectorState) {
2215 case InitialMarking:
2216 {
2217 ReleaseForegroundGC x(this);
2218 stats().record_cms_begin();
2220 VM_CMS_Initial_Mark initial_mark_op(this);
2221 VMThread::execute(&initial_mark_op);
2222 }
2223 // The collector state may be any legal state at this point
2224 // since the background collector may have yielded to the
2225 // foreground collector.
2226 break;
2227 case Marking:
2228 // initial marking in checkpointRootsInitialWork has been completed
2229 if (markFromRoots(true)) { // we were successful
2230 assert(_collectorState == Precleaning, "Collector state should "
2231 "have changed");
2232 } else {
2233 assert(_foregroundGCIsActive, "Internal state inconsistency");
2234 }
2235 break;
2236 case Precleaning:
2237 if (UseAdaptiveSizePolicy) {
2238 size_policy()->concurrent_precleaning_begin();
2239 }
2240 // marking from roots in markFromRoots has been completed
2241 preclean();
2242 if (UseAdaptiveSizePolicy) {
2243 size_policy()->concurrent_precleaning_end();
2244 }
2245 assert(_collectorState == AbortablePreclean ||
2246 _collectorState == FinalMarking,
2247 "Collector state should have changed");
2248 break;
2249 case AbortablePreclean:
2250 if (UseAdaptiveSizePolicy) {
2251 size_policy()->concurrent_phases_resume();
2252 }
2253 abortable_preclean();
2254 if (UseAdaptiveSizePolicy) {
2255 size_policy()->concurrent_precleaning_end();
2256 }
2257 assert(_collectorState == FinalMarking, "Collector state should "
2258 "have changed");
2259 break;
2260 case FinalMarking:
2261 {
2262 ReleaseForegroundGC x(this);
2264 VM_CMS_Final_Remark final_remark_op(this);
2265 VMThread::execute(&final_remark_op);
2266 }
2267 assert(_foregroundGCShouldWait, "block post-condition");
2268 break;
2269 case Sweeping:
2270 if (UseAdaptiveSizePolicy) {
2271 size_policy()->concurrent_sweeping_begin();
2272 }
2273 // final marking in checkpointRootsFinal has been completed
2274 sweep(true);
2275 assert(_collectorState == Resizing, "Collector state change "
2276 "to Resizing must be done under the free_list_lock");
2277 _full_gcs_since_conc_gc = 0;
2279 // Stop the timers for adaptive size policy for the concurrent phases
2280 if (UseAdaptiveSizePolicy) {
2281 size_policy()->concurrent_sweeping_end();
2282 size_policy()->concurrent_phases_end(gch->gc_cause(),
2283 gch->prev_gen(_cmsGen)->capacity(),
2284 _cmsGen->free());
2285 }
2287 case Resizing: {
2288 // Sweeping has been completed...
2289 // At this point the background collection has completed.
2290 // Don't move the call to compute_new_size() down
2291 // into code that might be executed if the background
2292 // collection was preempted.
2293 {
2294 ReleaseForegroundGC x(this); // unblock FG collection
2295 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2296 CMSTokenSync z(true); // not strictly needed.
2297 if (_collectorState == Resizing) {
2298 compute_new_size();
2299 _collectorState = Resetting;
2300 } else {
2301 assert(_collectorState == Idling, "The state should only change"
2302 " because the foreground collector has finished the collection");
2303 }
2304 }
2305 break;
2306 }
2307 case Resetting:
2308 // CMS heap resizing has been completed
2309 reset(true);
2310 assert(_collectorState == Idling, "Collector state should "
2311 "have changed");
2312 stats().record_cms_end();
2313 // Don't move the concurrent_phases_end() and compute_new_size()
2314 // calls to here because a preempted background collection
2315 // has it's state set to "Resetting".
2316 break;
2317 case Idling:
2318 default:
2319 ShouldNotReachHere();
2320 break;
2321 }
2322 if (TraceCMSState) {
2323 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2324 Thread::current(), _collectorState);
2325 }
2326 assert(_foregroundGCShouldWait, "block post-condition");
2327 }
2329 // Should this be in gc_epilogue?
2330 collector_policy()->counters()->update_counters();
2332 {
2333 // Clear _foregroundGCShouldWait and, in the event that the
2334 // foreground collector is waiting, notify it, before
2335 // returning.
2336 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2337 _foregroundGCShouldWait = false;
2338 if (_foregroundGCIsActive) {
2339 CGC_lock->notify();
2340 }
2341 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2342 "Possible deadlock");
2343 }
2344 if (TraceCMSState) {
2345 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2346 " exiting collection CMS state %d",
2347 Thread::current(), _collectorState);
2348 }
2349 if (PrintGC && Verbose) {
2350 _cmsGen->print_heap_change(prev_used);
2351 }
2352 }
2354 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2355 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2356 "Foreground collector should be waiting, not executing");
2357 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2358 "may only be done by the VM Thread with the world stopped");
2359 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2360 "VM thread should have CMS token");
2362 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2363 true, gclog_or_tty);)
2364 if (UseAdaptiveSizePolicy) {
2365 size_policy()->ms_collection_begin();
2366 }
2367 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2369 HandleMark hm; // Discard invalid handles created during verification
2371 if (VerifyBeforeGC &&
2372 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2373 Universe::verify(true);
2374 }
2376 bool init_mark_was_synchronous = false; // until proven otherwise
2377 while (_collectorState != Idling) {
2378 if (TraceCMSState) {
2379 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2380 Thread::current(), _collectorState);
2381 }
2382 switch (_collectorState) {
2383 case InitialMarking:
2384 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2385 checkpointRootsInitial(false);
2386 assert(_collectorState == Marking, "Collector state should have changed"
2387 " within checkpointRootsInitial()");
2388 break;
2389 case Marking:
2390 // initial marking in checkpointRootsInitialWork has been completed
2391 if (VerifyDuringGC &&
2392 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2393 gclog_or_tty->print("Verify before initial mark: ");
2394 Universe::verify(true);
2395 }
2396 {
2397 bool res = markFromRoots(false);
2398 assert(res && _collectorState == FinalMarking, "Collector state should "
2399 "have changed");
2400 break;
2401 }
2402 case FinalMarking:
2403 if (VerifyDuringGC &&
2404 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2405 gclog_or_tty->print("Verify before re-mark: ");
2406 Universe::verify(true);
2407 }
2408 checkpointRootsFinal(false, clear_all_soft_refs,
2409 init_mark_was_synchronous);
2410 assert(_collectorState == Sweeping, "Collector state should not "
2411 "have changed within checkpointRootsFinal()");
2412 break;
2413 case Sweeping:
2414 // final marking in checkpointRootsFinal has been completed
2415 if (VerifyDuringGC &&
2416 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2417 gclog_or_tty->print("Verify before sweep: ");
2418 Universe::verify(true);
2419 }
2420 sweep(false);
2421 assert(_collectorState == Resizing, "Incorrect state");
2422 break;
2423 case Resizing: {
2424 // Sweeping has been completed; the actual resize in this case
2425 // is done separately; nothing to be done in this state.
2426 _collectorState = Resetting;
2427 break;
2428 }
2429 case Resetting:
2430 // The heap has been resized.
2431 if (VerifyDuringGC &&
2432 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2433 gclog_or_tty->print("Verify before reset: ");
2434 Universe::verify(true);
2435 }
2436 reset(false);
2437 assert(_collectorState == Idling, "Collector state should "
2438 "have changed");
2439 break;
2440 case Precleaning:
2441 case AbortablePreclean:
2442 // Elide the preclean phase
2443 _collectorState = FinalMarking;
2444 break;
2445 default:
2446 ShouldNotReachHere();
2447 }
2448 if (TraceCMSState) {
2449 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2450 Thread::current(), _collectorState);
2451 }
2452 }
2454 if (UseAdaptiveSizePolicy) {
2455 GenCollectedHeap* gch = GenCollectedHeap::heap();
2456 size_policy()->ms_collection_end(gch->gc_cause());
2457 }
2459 if (VerifyAfterGC &&
2460 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2461 Universe::verify(true);
2462 }
2463 if (TraceCMSState) {
2464 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2465 " exiting collection CMS state %d",
2466 Thread::current(), _collectorState);
2467 }
2468 }
2470 bool CMSCollector::waitForForegroundGC() {
2471 bool res = false;
2472 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2473 "CMS thread should have CMS token");
2474 // Block the foreground collector until the
2475 // background collectors decides whether to
2476 // yield.
2477 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2478 _foregroundGCShouldWait = true;
2479 if (_foregroundGCIsActive) {
2480 // The background collector yields to the
2481 // foreground collector and returns a value
2482 // indicating that it has yielded. The foreground
2483 // collector can proceed.
2484 res = true;
2485 _foregroundGCShouldWait = false;
2486 ConcurrentMarkSweepThread::clear_CMS_flag(
2487 ConcurrentMarkSweepThread::CMS_cms_has_token);
2488 ConcurrentMarkSweepThread::set_CMS_flag(
2489 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2490 // Get a possibly blocked foreground thread going
2491 CGC_lock->notify();
2492 if (TraceCMSState) {
2493 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2494 Thread::current(), _collectorState);
2495 }
2496 while (_foregroundGCIsActive) {
2497 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2498 }
2499 ConcurrentMarkSweepThread::set_CMS_flag(
2500 ConcurrentMarkSweepThread::CMS_cms_has_token);
2501 ConcurrentMarkSweepThread::clear_CMS_flag(
2502 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2503 }
2504 if (TraceCMSState) {
2505 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2506 Thread::current(), _collectorState);
2507 }
2508 return res;
2509 }
2511 // Because of the need to lock the free lists and other structures in
2512 // the collector, common to all the generations that the collector is
2513 // collecting, we need the gc_prologues of individual CMS generations
2514 // delegate to their collector. It may have been simpler had the
2515 // current infrastructure allowed one to call a prologue on a
2516 // collector. In the absence of that we have the generation's
2517 // prologue delegate to the collector, which delegates back
2518 // some "local" work to a worker method in the individual generations
2519 // that it's responsible for collecting, while itself doing any
2520 // work common to all generations it's responsible for. A similar
2521 // comment applies to the gc_epilogue()'s.
2522 // The role of the varaible _between_prologue_and_epilogue is to
2523 // enforce the invocation protocol.
2524 void CMSCollector::gc_prologue(bool full) {
2525 // Call gc_prologue_work() for each CMSGen and PermGen that
2526 // we are responsible for.
2528 // The following locking discipline assumes that we are only called
2529 // when the world is stopped.
2530 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2532 // The CMSCollector prologue must call the gc_prologues for the
2533 // "generations" (including PermGen if any) that it's responsible
2534 // for.
2536 assert( Thread::current()->is_VM_thread()
2537 || ( CMSScavengeBeforeRemark
2538 && Thread::current()->is_ConcurrentGC_thread()),
2539 "Incorrect thread type for prologue execution");
2541 if (_between_prologue_and_epilogue) {
2542 // We have already been invoked; this is a gc_prologue delegation
2543 // from yet another CMS generation that we are responsible for, just
2544 // ignore it since all relevant work has already been done.
2545 return;
2546 }
2548 // set a bit saying prologue has been called; cleared in epilogue
2549 _between_prologue_and_epilogue = true;
2550 // Claim locks for common data structures, then call gc_prologue_work()
2551 // for each CMSGen and PermGen that we are responsible for.
2553 getFreelistLocks(); // gets free list locks on constituent spaces
2554 bitMapLock()->lock_without_safepoint_check();
2556 // Should call gc_prologue_work() for all cms gens we are responsible for
2557 bool registerClosure = _collectorState >= Marking
2558 && _collectorState < Sweeping;
2559 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2560 : &_modUnionClosure;
2561 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2562 _permGen->gc_prologue_work(full, registerClosure, muc);
2564 if (!full) {
2565 stats().record_gc0_begin();
2566 }
2567 }
2569 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2570 // Delegate to CMScollector which knows how to coordinate between
2571 // this and any other CMS generations that it is responsible for
2572 // collecting.
2573 collector()->gc_prologue(full);
2574 }
2576 // This is a "private" interface for use by this generation's CMSCollector.
2577 // Not to be called directly by any other entity (for instance,
2578 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2579 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2580 bool registerClosure, ModUnionClosure* modUnionClosure) {
2581 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2582 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2583 "Should be NULL");
2584 if (registerClosure) {
2585 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2586 }
2587 cmsSpace()->gc_prologue();
2588 // Clear stat counters
2589 NOT_PRODUCT(
2590 assert(_numObjectsPromoted == 0, "check");
2591 assert(_numWordsPromoted == 0, "check");
2592 if (Verbose && PrintGC) {
2593 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2594 SIZE_FORMAT" bytes concurrently",
2595 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2596 }
2597 _numObjectsAllocated = 0;
2598 _numWordsAllocated = 0;
2599 )
2600 }
2602 void CMSCollector::gc_epilogue(bool full) {
2603 // The following locking discipline assumes that we are only called
2604 // when the world is stopped.
2605 assert(SafepointSynchronize::is_at_safepoint(),
2606 "world is stopped assumption");
2608 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2609 // if linear allocation blocks need to be appropriately marked to allow the
2610 // the blocks to be parsable. We also check here whether we need to nudge the
2611 // CMS collector thread to start a new cycle (if it's not already active).
2612 assert( Thread::current()->is_VM_thread()
2613 || ( CMSScavengeBeforeRemark
2614 && Thread::current()->is_ConcurrentGC_thread()),
2615 "Incorrect thread type for epilogue execution");
2617 if (!_between_prologue_and_epilogue) {
2618 // We have already been invoked; this is a gc_epilogue delegation
2619 // from yet another CMS generation that we are responsible for, just
2620 // ignore it since all relevant work has already been done.
2621 return;
2622 }
2623 assert(haveFreelistLocks(), "must have freelist locks");
2624 assert_lock_strong(bitMapLock());
2626 _cmsGen->gc_epilogue_work(full);
2627 _permGen->gc_epilogue_work(full);
2629 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2630 // in case sampling was not already enabled, enable it
2631 _start_sampling = true;
2632 }
2633 // reset _eden_chunk_array so sampling starts afresh
2634 _eden_chunk_index = 0;
2636 size_t cms_used = _cmsGen->cmsSpace()->used();
2637 size_t perm_used = _permGen->cmsSpace()->used();
2639 // update performance counters - this uses a special version of
2640 // update_counters() that allows the utilization to be passed as a
2641 // parameter, avoiding multiple calls to used().
2642 //
2643 _cmsGen->update_counters(cms_used);
2644 _permGen->update_counters(perm_used);
2646 if (CMSIncrementalMode) {
2647 icms_update_allocation_limits();
2648 }
2650 bitMapLock()->unlock();
2651 releaseFreelistLocks();
2653 _between_prologue_and_epilogue = false; // ready for next cycle
2654 }
2656 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2657 collector()->gc_epilogue(full);
2659 // Also reset promotion tracking in par gc thread states.
2660 if (ParallelGCThreads > 0) {
2661 for (uint i = 0; i < ParallelGCThreads; i++) {
2662 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2663 }
2664 }
2665 }
2667 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2668 assert(!incremental_collection_failed(), "Should have been cleared");
2669 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2670 cmsSpace()->gc_epilogue();
2671 // Print stat counters
2672 NOT_PRODUCT(
2673 assert(_numObjectsAllocated == 0, "check");
2674 assert(_numWordsAllocated == 0, "check");
2675 if (Verbose && PrintGC) {
2676 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2677 SIZE_FORMAT" bytes",
2678 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2679 }
2680 _numObjectsPromoted = 0;
2681 _numWordsPromoted = 0;
2682 )
2684 if (PrintGC && Verbose) {
2685 // Call down the chain in contiguous_available needs the freelistLock
2686 // so print this out before releasing the freeListLock.
2687 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2688 contiguous_available());
2689 }
2690 }
2692 #ifndef PRODUCT
2693 bool CMSCollector::have_cms_token() {
2694 Thread* thr = Thread::current();
2695 if (thr->is_VM_thread()) {
2696 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2697 } else if (thr->is_ConcurrentGC_thread()) {
2698 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2699 } else if (thr->is_GC_task_thread()) {
2700 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2701 ParGCRareEvent_lock->owned_by_self();
2702 }
2703 return false;
2704 }
2705 #endif
2707 // Check reachability of the given heap address in CMS generation,
2708 // treating all other generations as roots.
2709 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2710 // We could "guarantee" below, rather than assert, but i'll
2711 // leave these as "asserts" so that an adventurous debugger
2712 // could try this in the product build provided some subset of
2713 // the conditions were met, provided they were intersted in the
2714 // results and knew that the computation below wouldn't interfere
2715 // with other concurrent computations mutating the structures
2716 // being read or written.
2717 assert(SafepointSynchronize::is_at_safepoint(),
2718 "Else mutations in object graph will make answer suspect");
2719 assert(have_cms_token(), "Should hold cms token");
2720 assert(haveFreelistLocks(), "must hold free list locks");
2721 assert_lock_strong(bitMapLock());
2723 // Clear the marking bit map array before starting, but, just
2724 // for kicks, first report if the given address is already marked
2725 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2726 _markBitMap.isMarked(addr) ? "" : " not");
2728 if (verify_after_remark()) {
2729 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2730 bool result = verification_mark_bm()->isMarked(addr);
2731 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2732 result ? "IS" : "is NOT");
2733 return result;
2734 } else {
2735 gclog_or_tty->print_cr("Could not compute result");
2736 return false;
2737 }
2738 }
2740 ////////////////////////////////////////////////////////
2741 // CMS Verification Support
2742 ////////////////////////////////////////////////////////
2743 // Following the remark phase, the following invariant
2744 // should hold -- each object in the CMS heap which is
2745 // marked in markBitMap() should be marked in the verification_mark_bm().
2747 class VerifyMarkedClosure: public BitMapClosure {
2748 CMSBitMap* _marks;
2749 bool _failed;
2751 public:
2752 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2754 bool do_bit(size_t offset) {
2755 HeapWord* addr = _marks->offsetToHeapWord(offset);
2756 if (!_marks->isMarked(addr)) {
2757 oop(addr)->print();
2758 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2759 _failed = true;
2760 }
2761 return true;
2762 }
2764 bool failed() { return _failed; }
2765 };
2767 bool CMSCollector::verify_after_remark() {
2768 gclog_or_tty->print(" [Verifying CMS Marking... ");
2769 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2770 static bool init = false;
2772 assert(SafepointSynchronize::is_at_safepoint(),
2773 "Else mutations in object graph will make answer suspect");
2774 assert(have_cms_token(),
2775 "Else there may be mutual interference in use of "
2776 " verification data structures");
2777 assert(_collectorState > Marking && _collectorState <= Sweeping,
2778 "Else marking info checked here may be obsolete");
2779 assert(haveFreelistLocks(), "must hold free list locks");
2780 assert_lock_strong(bitMapLock());
2783 // Allocate marking bit map if not already allocated
2784 if (!init) { // first time
2785 if (!verification_mark_bm()->allocate(_span)) {
2786 return false;
2787 }
2788 init = true;
2789 }
2791 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2793 // Turn off refs discovery -- so we will be tracing through refs.
2794 // This is as intended, because by this time
2795 // GC must already have cleared any refs that need to be cleared,
2796 // and traced those that need to be marked; moreover,
2797 // the marking done here is not going to intefere in any
2798 // way with the marking information used by GC.
2799 NoRefDiscovery no_discovery(ref_processor());
2801 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2803 // Clear any marks from a previous round
2804 verification_mark_bm()->clear_all();
2805 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2806 assert(overflow_list_is_empty(), "overflow list should be empty");
2808 GenCollectedHeap* gch = GenCollectedHeap::heap();
2809 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2810 // Update the saved marks which may affect the root scans.
2811 gch->save_marks();
2813 if (CMSRemarkVerifyVariant == 1) {
2814 // In this first variant of verification, we complete
2815 // all marking, then check if the new marks-verctor is
2816 // a subset of the CMS marks-vector.
2817 verify_after_remark_work_1();
2818 } else if (CMSRemarkVerifyVariant == 2) {
2819 // In this second variant of verification, we flag an error
2820 // (i.e. an object reachable in the new marks-vector not reachable
2821 // in the CMS marks-vector) immediately, also indicating the
2822 // identify of an object (A) that references the unmarked object (B) --
2823 // presumably, a mutation to A failed to be picked up by preclean/remark?
2824 verify_after_remark_work_2();
2825 } else {
2826 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2827 CMSRemarkVerifyVariant);
2828 }
2829 gclog_or_tty->print(" done] ");
2830 return true;
2831 }
2833 void CMSCollector::verify_after_remark_work_1() {
2834 ResourceMark rm;
2835 HandleMark hm;
2836 GenCollectedHeap* gch = GenCollectedHeap::heap();
2838 // Mark from roots one level into CMS
2839 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
2840 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2842 gch->gen_process_strong_roots(_cmsGen->level(),
2843 true, // younger gens are roots
2844 true, // collecting perm gen
2845 SharedHeap::ScanningOption(roots_scanning_options()),
2846 NULL, ¬Older);
2848 // Now mark from the roots
2849 assert(_revisitStack.isEmpty(), "Should be empty");
2850 MarkFromRootsClosure markFromRootsClosure(this, _span,
2851 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2852 false /* don't yield */, true /* verifying */);
2853 assert(_restart_addr == NULL, "Expected pre-condition");
2854 verification_mark_bm()->iterate(&markFromRootsClosure);
2855 while (_restart_addr != NULL) {
2856 // Deal with stack overflow: by restarting at the indicated
2857 // address.
2858 HeapWord* ra = _restart_addr;
2859 markFromRootsClosure.reset(ra);
2860 _restart_addr = NULL;
2861 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2862 }
2863 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2864 verify_work_stacks_empty();
2865 // Should reset the revisit stack above, since no class tree
2866 // surgery is forthcoming.
2867 _revisitStack.reset(); // throwing away all contents
2869 // Marking completed -- now verify that each bit marked in
2870 // verification_mark_bm() is also marked in markBitMap(); flag all
2871 // errors by printing corresponding objects.
2872 VerifyMarkedClosure vcl(markBitMap());
2873 verification_mark_bm()->iterate(&vcl);
2874 if (vcl.failed()) {
2875 gclog_or_tty->print("Verification failed");
2876 Universe::heap()->print();
2877 fatal(" ... aborting");
2878 }
2879 }
2881 void CMSCollector::verify_after_remark_work_2() {
2882 ResourceMark rm;
2883 HandleMark hm;
2884 GenCollectedHeap* gch = GenCollectedHeap::heap();
2886 // Mark from roots one level into CMS
2887 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2888 markBitMap(), true /* nmethods */);
2889 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2890 gch->gen_process_strong_roots(_cmsGen->level(),
2891 true, // younger gens are roots
2892 true, // collecting perm gen
2893 SharedHeap::ScanningOption(roots_scanning_options()),
2894 NULL, ¬Older);
2896 // Now mark from the roots
2897 assert(_revisitStack.isEmpty(), "Should be empty");
2898 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2899 verification_mark_bm(), markBitMap(), verification_mark_stack());
2900 assert(_restart_addr == NULL, "Expected pre-condition");
2901 verification_mark_bm()->iterate(&markFromRootsClosure);
2902 while (_restart_addr != NULL) {
2903 // Deal with stack overflow: by restarting at the indicated
2904 // address.
2905 HeapWord* ra = _restart_addr;
2906 markFromRootsClosure.reset(ra);
2907 _restart_addr = NULL;
2908 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2909 }
2910 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2911 verify_work_stacks_empty();
2912 // Should reset the revisit stack above, since no class tree
2913 // surgery is forthcoming.
2914 _revisitStack.reset(); // throwing away all contents
2916 // Marking completed -- now verify that each bit marked in
2917 // verification_mark_bm() is also marked in markBitMap(); flag all
2918 // errors by printing corresponding objects.
2919 VerifyMarkedClosure vcl(markBitMap());
2920 verification_mark_bm()->iterate(&vcl);
2921 assert(!vcl.failed(), "Else verification above should not have succeeded");
2922 }
2924 void ConcurrentMarkSweepGeneration::save_marks() {
2925 // delegate to CMS space
2926 cmsSpace()->save_marks();
2927 for (uint i = 0; i < ParallelGCThreads; i++) {
2928 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2929 }
2930 }
2932 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2933 return cmsSpace()->no_allocs_since_save_marks();
2934 }
2936 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2937 \
2938 void ConcurrentMarkSweepGeneration:: \
2939 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2940 cl->set_generation(this); \
2941 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2942 cl->reset_generation(); \
2943 save_marks(); \
2944 }
2946 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2948 void
2949 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2950 {
2951 // Not currently implemented; need to do the following. -- ysr.
2952 // dld -- I think that is used for some sort of allocation profiler. So it
2953 // really means the objects allocated by the mutator since the last
2954 // GC. We could potentially implement this cheaply by recording only
2955 // the direct allocations in a side data structure.
2956 //
2957 // I think we probably ought not to be required to support these
2958 // iterations at any arbitrary point; I think there ought to be some
2959 // call to enable/disable allocation profiling in a generation/space,
2960 // and the iterator ought to return the objects allocated in the
2961 // gen/space since the enable call, or the last iterator call (which
2962 // will probably be at a GC.) That way, for gens like CM&S that would
2963 // require some extra data structure to support this, we only pay the
2964 // cost when it's in use...
2965 cmsSpace()->object_iterate_since_last_GC(blk);
2966 }
2968 void
2969 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2970 cl->set_generation(this);
2971 younger_refs_in_space_iterate(_cmsSpace, cl);
2972 cl->reset_generation();
2973 }
2975 void
2976 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2977 if (freelistLock()->owned_by_self()) {
2978 Generation::oop_iterate(mr, cl);
2979 } else {
2980 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2981 Generation::oop_iterate(mr, cl);
2982 }
2983 }
2985 void
2986 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
2987 if (freelistLock()->owned_by_self()) {
2988 Generation::oop_iterate(cl);
2989 } else {
2990 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2991 Generation::oop_iterate(cl);
2992 }
2993 }
2995 void
2996 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2997 if (freelistLock()->owned_by_self()) {
2998 Generation::object_iterate(cl);
2999 } else {
3000 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3001 Generation::object_iterate(cl);
3002 }
3003 }
3005 void
3006 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3007 }
3009 void
3010 ConcurrentMarkSweepGeneration::post_compact() {
3011 }
3013 void
3014 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3015 // Fix the linear allocation blocks to look like free blocks.
3017 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3018 // are not called when the heap is verified during universe initialization and
3019 // at vm shutdown.
3020 if (freelistLock()->owned_by_self()) {
3021 cmsSpace()->prepare_for_verify();
3022 } else {
3023 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3024 cmsSpace()->prepare_for_verify();
3025 }
3026 }
3028 void
3029 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3030 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3031 // are not called when the heap is verified during universe initialization and
3032 // at vm shutdown.
3033 if (freelistLock()->owned_by_self()) {
3034 cmsSpace()->verify(false /* ignored */);
3035 } else {
3036 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3037 cmsSpace()->verify(false /* ignored */);
3038 }
3039 }
3041 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3042 _cmsGen->verify(allow_dirty);
3043 _permGen->verify(allow_dirty);
3044 }
3046 #ifndef PRODUCT
3047 bool CMSCollector::overflow_list_is_empty() const {
3048 assert(_num_par_pushes >= 0, "Inconsistency");
3049 if (_overflow_list == NULL) {
3050 assert(_num_par_pushes == 0, "Inconsistency");
3051 }
3052 return _overflow_list == NULL;
3053 }
3055 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3056 // merely consolidate assertion checks that appear to occur together frequently.
3057 void CMSCollector::verify_work_stacks_empty() const {
3058 assert(_markStack.isEmpty(), "Marking stack should be empty");
3059 assert(overflow_list_is_empty(), "Overflow list should be empty");
3060 }
3062 void CMSCollector::verify_overflow_empty() const {
3063 assert(overflow_list_is_empty(), "Overflow list should be empty");
3064 assert(no_preserved_marks(), "No preserved marks");
3065 }
3066 #endif // PRODUCT
3068 // Decide if we want to enable class unloading as part of the
3069 // ensuing concurrent GC cycle. We will collect the perm gen and
3070 // unload classes if it's the case that:
3071 // (1) an explicit gc request has been made and the flag
3072 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3073 // (2) (a) class unloading is enabled at the command line, and
3074 // (b) (i) perm gen threshold has been crossed, or
3075 // (ii) old gen is getting really full, or
3076 // (iii) the previous N CMS collections did not collect the
3077 // perm gen
3078 // NOTE: Provided there is no change in the state of the heap between
3079 // calls to this method, it should have idempotent results. Moreover,
3080 // its results should be monotonically increasing (i.e. going from 0 to 1,
3081 // but not 1 to 0) between successive calls between which the heap was
3082 // not collected. For the implementation below, it must thus rely on
3083 // the property that concurrent_cycles_since_last_unload()
3084 // will not decrease unless a collection cycle happened and that
3085 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3086 // themselves also monotonic in that sense. See check_monotonicity()
3087 // below.
3088 bool CMSCollector::update_should_unload_classes() {
3089 _should_unload_classes = false;
3090 // Condition 1 above
3091 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3092 _should_unload_classes = true;
3093 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3094 // Disjuncts 2.b.(i,ii,iii) above
3095 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3096 CMSClassUnloadingMaxInterval)
3097 || _permGen->should_concurrent_collect()
3098 || _cmsGen->is_too_full();
3099 }
3100 return _should_unload_classes;
3101 }
3103 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3104 bool res = should_concurrent_collect();
3105 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3106 return res;
3107 }
3109 void CMSCollector::setup_cms_unloading_and_verification_state() {
3110 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3111 || VerifyBeforeExit;
3112 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3113 | SharedHeap::SO_CodeCache;
3115 if (should_unload_classes()) { // Should unload classes this cycle
3116 remove_root_scanning_option(rso); // Shrink the root set appropriately
3117 set_verifying(should_verify); // Set verification state for this cycle
3118 return; // Nothing else needs to be done at this time
3119 }
3121 // Not unloading classes this cycle
3122 assert(!should_unload_classes(), "Inconsitency!");
3123 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3124 // We were not verifying, or we _were_ unloading classes in the last cycle,
3125 // AND some verification options are enabled this cycle; in this case,
3126 // we must make sure that the deadness map is allocated if not already so,
3127 // and cleared (if already allocated previously --
3128 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3129 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3130 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3131 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3132 "permanent generation verification disabled");
3133 return; // Note that we leave verification disabled, so we'll retry this
3134 // allocation next cycle. We _could_ remember this failure
3135 // and skip further attempts and permanently disable verification
3136 // attempts if that is considered more desirable.
3137 }
3138 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3139 "_perm_gen_ver_bit_map inconsistency?");
3140 } else {
3141 perm_gen_verify_bit_map()->clear_all();
3142 }
3143 // Include symbols, strings and code cache elements to prevent their resurrection.
3144 add_root_scanning_option(rso);
3145 set_verifying(true);
3146 } else if (verifying() && !should_verify) {
3147 // We were verifying, but some verification flags got disabled.
3148 set_verifying(false);
3149 // Exclude symbols, strings and code cache elements from root scanning to
3150 // reduce IM and RM pauses.
3151 remove_root_scanning_option(rso);
3152 }
3153 }
3156 #ifndef PRODUCT
3157 HeapWord* CMSCollector::block_start(const void* p) const {
3158 const HeapWord* addr = (HeapWord*)p;
3159 if (_span.contains(p)) {
3160 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3161 return _cmsGen->cmsSpace()->block_start(p);
3162 } else {
3163 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3164 "Inconsistent _span?");
3165 return _permGen->cmsSpace()->block_start(p);
3166 }
3167 }
3168 return NULL;
3169 }
3170 #endif
3172 HeapWord*
3173 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3174 bool tlab,
3175 bool parallel) {
3176 assert(!tlab, "Can't deal with TLAB allocation");
3177 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3178 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3179 CMSExpansionCause::_satisfy_allocation);
3180 if (GCExpandToAllocateDelayMillis > 0) {
3181 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3182 }
3183 return have_lock_and_allocate(word_size, tlab);
3184 }
3186 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3187 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3188 // to CardGeneration and share it...
3189 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3190 CMSExpansionCause::Cause cause)
3191 {
3192 assert_locked_or_safepoint(Heap_lock);
3194 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
3195 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
3196 bool success = false;
3197 if (aligned_expand_bytes > aligned_bytes) {
3198 success = grow_by(aligned_expand_bytes);
3199 }
3200 if (!success) {
3201 success = grow_by(aligned_bytes);
3202 }
3203 if (!success) {
3204 size_t remaining_bytes = _virtual_space.uncommitted_size();
3205 if (remaining_bytes > 0) {
3206 success = grow_by(remaining_bytes);
3207 }
3208 }
3209 if (GC_locker::is_active()) {
3210 if (PrintGC && Verbose) {
3211 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
3212 }
3213 }
3214 // remember why we expanded; this information is used
3215 // by shouldConcurrentCollect() when making decisions on whether to start
3216 // a new CMS cycle.
3217 if (success) {
3218 set_expansion_cause(cause);
3219 if (PrintGCDetails && Verbose) {
3220 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3221 CMSExpansionCause::to_string(cause));
3222 }
3223 }
3224 }
3226 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3227 HeapWord* res = NULL;
3228 MutexLocker x(ParGCRareEvent_lock);
3229 while (true) {
3230 // Expansion by some other thread might make alloc OK now:
3231 res = ps->lab.alloc(word_sz);
3232 if (res != NULL) return res;
3233 // If there's not enough expansion space available, give up.
3234 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3235 return NULL;
3236 }
3237 // Otherwise, we try expansion.
3238 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3239 CMSExpansionCause::_allocate_par_lab);
3240 // Now go around the loop and try alloc again;
3241 // A competing par_promote might beat us to the expansion space,
3242 // so we may go around the loop again if promotion fails agaion.
3243 if (GCExpandToAllocateDelayMillis > 0) {
3244 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3245 }
3246 }
3247 }
3250 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3251 PromotionInfo* promo) {
3252 MutexLocker x(ParGCRareEvent_lock);
3253 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3254 while (true) {
3255 // Expansion by some other thread might make alloc OK now:
3256 if (promo->ensure_spooling_space()) {
3257 assert(promo->has_spooling_space(),
3258 "Post-condition of successful ensure_spooling_space()");
3259 return true;
3260 }
3261 // If there's not enough expansion space available, give up.
3262 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3263 return false;
3264 }
3265 // Otherwise, we try expansion.
3266 expand(refill_size_bytes, MinHeapDeltaBytes,
3267 CMSExpansionCause::_allocate_par_spooling_space);
3268 // Now go around the loop and try alloc again;
3269 // A competing allocation might beat us to the expansion space,
3270 // so we may go around the loop again if allocation fails again.
3271 if (GCExpandToAllocateDelayMillis > 0) {
3272 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3273 }
3274 }
3275 }
3279 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3280 assert_locked_or_safepoint(Heap_lock);
3281 size_t size = ReservedSpace::page_align_size_down(bytes);
3282 if (size > 0) {
3283 shrink_by(size);
3284 }
3285 }
3287 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3288 assert_locked_or_safepoint(Heap_lock);
3289 bool result = _virtual_space.expand_by(bytes);
3290 if (result) {
3291 HeapWord* old_end = _cmsSpace->end();
3292 size_t new_word_size =
3293 heap_word_size(_virtual_space.committed_size());
3294 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3295 _bts->resize(new_word_size); // resize the block offset shared array
3296 Universe::heap()->barrier_set()->resize_covered_region(mr);
3297 // Hmmmm... why doesn't CFLS::set_end verify locking?
3298 // This is quite ugly; FIX ME XXX
3299 _cmsSpace->assert_locked();
3300 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3302 // update the space and generation capacity counters
3303 if (UsePerfData) {
3304 _space_counters->update_capacity();
3305 _gen_counters->update_all();
3306 }
3308 if (Verbose && PrintGC) {
3309 size_t new_mem_size = _virtual_space.committed_size();
3310 size_t old_mem_size = new_mem_size - bytes;
3311 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3312 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3313 }
3314 }
3315 return result;
3316 }
3318 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3319 assert_locked_or_safepoint(Heap_lock);
3320 bool success = true;
3321 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3322 if (remaining_bytes > 0) {
3323 success = grow_by(remaining_bytes);
3324 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3325 }
3326 return success;
3327 }
3329 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3330 assert_locked_or_safepoint(Heap_lock);
3331 assert_lock_strong(freelistLock());
3332 // XXX Fix when compaction is implemented.
3333 warning("Shrinking of CMS not yet implemented");
3334 return;
3335 }
3338 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3339 // phases.
3340 class CMSPhaseAccounting: public StackObj {
3341 public:
3342 CMSPhaseAccounting(CMSCollector *collector,
3343 const char *phase,
3344 bool print_cr = true);
3345 ~CMSPhaseAccounting();
3347 private:
3348 CMSCollector *_collector;
3349 const char *_phase;
3350 elapsedTimer _wallclock;
3351 bool _print_cr;
3353 public:
3354 // Not MT-safe; so do not pass around these StackObj's
3355 // where they may be accessed by other threads.
3356 jlong wallclock_millis() {
3357 assert(_wallclock.is_active(), "Wall clock should not stop");
3358 _wallclock.stop(); // to record time
3359 jlong ret = _wallclock.milliseconds();
3360 _wallclock.start(); // restart
3361 return ret;
3362 }
3363 };
3365 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3366 const char *phase,
3367 bool print_cr) :
3368 _collector(collector), _phase(phase), _print_cr(print_cr) {
3370 if (PrintCMSStatistics != 0) {
3371 _collector->resetYields();
3372 }
3373 if (PrintGCDetails && PrintGCTimeStamps) {
3374 gclog_or_tty->date_stamp(PrintGCDateStamps);
3375 gclog_or_tty->stamp();
3376 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3377 _collector->cmsGen()->short_name(), _phase);
3378 }
3379 _collector->resetTimer();
3380 _wallclock.start();
3381 _collector->startTimer();
3382 }
3384 CMSPhaseAccounting::~CMSPhaseAccounting() {
3385 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3386 _collector->stopTimer();
3387 _wallclock.stop();
3388 if (PrintGCDetails) {
3389 gclog_or_tty->date_stamp(PrintGCDateStamps);
3390 if (PrintGCTimeStamps) {
3391 gclog_or_tty->stamp();
3392 gclog_or_tty->print(": ");
3393 }
3394 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3395 _collector->cmsGen()->short_name(),
3396 _phase, _collector->timerValue(), _wallclock.seconds());
3397 if (_print_cr) {
3398 gclog_or_tty->print_cr("");
3399 }
3400 if (PrintCMSStatistics != 0) {
3401 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3402 _collector->yields());
3403 }
3404 }
3405 }
3407 // CMS work
3409 // Checkpoint the roots into this generation from outside
3410 // this generation. [Note this initial checkpoint need only
3411 // be approximate -- we'll do a catch up phase subsequently.]
3412 void CMSCollector::checkpointRootsInitial(bool asynch) {
3413 assert(_collectorState == InitialMarking, "Wrong collector state");
3414 check_correct_thread_executing();
3415 ReferenceProcessor* rp = ref_processor();
3416 SpecializationStats::clear();
3417 assert(_restart_addr == NULL, "Control point invariant");
3418 if (asynch) {
3419 // acquire locks for subsequent manipulations
3420 MutexLockerEx x(bitMapLock(),
3421 Mutex::_no_safepoint_check_flag);
3422 checkpointRootsInitialWork(asynch);
3423 rp->verify_no_references_recorded();
3424 rp->enable_discovery(); // enable ("weak") refs discovery
3425 _collectorState = Marking;
3426 } else {
3427 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3428 // which recognizes if we are a CMS generation, and doesn't try to turn on
3429 // discovery; verify that they aren't meddling.
3430 assert(!rp->discovery_is_atomic(),
3431 "incorrect setting of discovery predicate");
3432 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3433 "ref discovery for this generation kind");
3434 // already have locks
3435 checkpointRootsInitialWork(asynch);
3436 rp->enable_discovery(); // now enable ("weak") refs discovery
3437 _collectorState = Marking;
3438 }
3439 SpecializationStats::print();
3440 }
3442 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3443 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3444 assert(_collectorState == InitialMarking, "just checking");
3446 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3447 // precede our marking with a collection of all
3448 // younger generations to keep floating garbage to a minimum.
3449 // XXX: we won't do this for now -- it's an optimization to be done later.
3451 // already have locks
3452 assert_lock_strong(bitMapLock());
3453 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3455 // Setup the verification and class unloading state for this
3456 // CMS collection cycle.
3457 setup_cms_unloading_and_verification_state();
3459 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3460 PrintGCDetails && Verbose, true, gclog_or_tty);)
3461 if (UseAdaptiveSizePolicy) {
3462 size_policy()->checkpoint_roots_initial_begin();
3463 }
3465 // Reset all the PLAB chunk arrays if necessary.
3466 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3467 reset_survivor_plab_arrays();
3468 }
3470 ResourceMark rm;
3471 HandleMark hm;
3473 FalseClosure falseClosure;
3474 // In the case of a synchronous collection, we will elide the
3475 // remark step, so it's important to catch all the nmethod oops
3476 // in this step; hence the last argument to the constrcutor below.
3477 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
3478 GenCollectedHeap* gch = GenCollectedHeap::heap();
3480 verify_work_stacks_empty();
3481 verify_overflow_empty();
3483 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3484 // Update the saved marks which may affect the root scans.
3485 gch->save_marks();
3487 // weak reference processing has not started yet.
3488 ref_processor()->set_enqueuing_is_done(false);
3490 {
3491 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3492 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3493 gch->gen_process_strong_roots(_cmsGen->level(),
3494 true, // younger gens are roots
3495 true, // collecting perm gen
3496 SharedHeap::ScanningOption(roots_scanning_options()),
3497 NULL, ¬Older);
3498 }
3500 // Clear mod-union table; it will be dirtied in the prologue of
3501 // CMS generation per each younger generation collection.
3503 assert(_modUnionTable.isAllClear(),
3504 "Was cleared in most recent final checkpoint phase"
3505 " or no bits are set in the gc_prologue before the start of the next "
3506 "subsequent marking phase.");
3508 // Temporarily disabled, since pre/post-consumption closures don't
3509 // care about precleaned cards
3510 #if 0
3511 {
3512 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3513 (HeapWord*)_virtual_space.high());
3514 _ct->ct_bs()->preclean_dirty_cards(mr);
3515 }
3516 #endif
3518 // Save the end of the used_region of the constituent generations
3519 // to be used to limit the extent of sweep in each generation.
3520 save_sweep_limits();
3521 if (UseAdaptiveSizePolicy) {
3522 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3523 }
3524 verify_overflow_empty();
3525 }
3527 bool CMSCollector::markFromRoots(bool asynch) {
3528 // we might be tempted to assert that:
3529 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3530 // "inconsistent argument?");
3531 // However that wouldn't be right, because it's possible that
3532 // a safepoint is indeed in progress as a younger generation
3533 // stop-the-world GC happens even as we mark in this generation.
3534 assert(_collectorState == Marking, "inconsistent state?");
3535 check_correct_thread_executing();
3536 verify_overflow_empty();
3538 bool res;
3539 if (asynch) {
3541 // Start the timers for adaptive size policy for the concurrent phases
3542 // Do it here so that the foreground MS can use the concurrent
3543 // timer since a foreground MS might has the sweep done concurrently
3544 // or STW.
3545 if (UseAdaptiveSizePolicy) {
3546 size_policy()->concurrent_marking_begin();
3547 }
3549 // Weak ref discovery note: We may be discovering weak
3550 // refs in this generation concurrent (but interleaved) with
3551 // weak ref discovery by a younger generation collector.
3553 CMSTokenSyncWithLocks ts(true, bitMapLock());
3554 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3555 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3556 res = markFromRootsWork(asynch);
3557 if (res) {
3558 _collectorState = Precleaning;
3559 } else { // We failed and a foreground collection wants to take over
3560 assert(_foregroundGCIsActive, "internal state inconsistency");
3561 assert(_restart_addr == NULL, "foreground will restart from scratch");
3562 if (PrintGCDetails) {
3563 gclog_or_tty->print_cr("bailing out to foreground collection");
3564 }
3565 }
3566 if (UseAdaptiveSizePolicy) {
3567 size_policy()->concurrent_marking_end();
3568 }
3569 } else {
3570 assert(SafepointSynchronize::is_at_safepoint(),
3571 "inconsistent with asynch == false");
3572 if (UseAdaptiveSizePolicy) {
3573 size_policy()->ms_collection_marking_begin();
3574 }
3575 // already have locks
3576 res = markFromRootsWork(asynch);
3577 _collectorState = FinalMarking;
3578 if (UseAdaptiveSizePolicy) {
3579 GenCollectedHeap* gch = GenCollectedHeap::heap();
3580 size_policy()->ms_collection_marking_end(gch->gc_cause());
3581 }
3582 }
3583 verify_overflow_empty();
3584 return res;
3585 }
3587 bool CMSCollector::markFromRootsWork(bool asynch) {
3588 // iterate over marked bits in bit map, doing a full scan and mark
3589 // from these roots using the following algorithm:
3590 // . if oop is to the right of the current scan pointer,
3591 // mark corresponding bit (we'll process it later)
3592 // . else (oop is to left of current scan pointer)
3593 // push oop on marking stack
3594 // . drain the marking stack
3596 // Note that when we do a marking step we need to hold the
3597 // bit map lock -- recall that direct allocation (by mutators)
3598 // and promotion (by younger generation collectors) is also
3599 // marking the bit map. [the so-called allocate live policy.]
3600 // Because the implementation of bit map marking is not
3601 // robust wrt simultaneous marking of bits in the same word,
3602 // we need to make sure that there is no such interference
3603 // between concurrent such updates.
3605 // already have locks
3606 assert_lock_strong(bitMapLock());
3608 // Clear the revisit stack, just in case there are any
3609 // obsolete contents from a short-circuited previous CMS cycle.
3610 _revisitStack.reset();
3611 verify_work_stacks_empty();
3612 verify_overflow_empty();
3613 assert(_revisitStack.isEmpty(), "tabula rasa");
3615 bool result = false;
3616 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3617 result = do_marking_mt(asynch);
3618 } else {
3619 result = do_marking_st(asynch);
3620 }
3621 return result;
3622 }
3624 // Forward decl
3625 class CMSConcMarkingTask;
3627 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3628 CMSCollector* _collector;
3629 CMSConcMarkingTask* _task;
3630 bool _yield;
3631 protected:
3632 virtual void yield();
3633 public:
3634 // "n_threads" is the number of threads to be terminated.
3635 // "queue_set" is a set of work queues of other threads.
3636 // "collector" is the CMS collector associated with this task terminator.
3637 // "yield" indicates whether we need the gang as a whole to yield.
3638 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3639 CMSCollector* collector, bool yield) :
3640 ParallelTaskTerminator(n_threads, queue_set),
3641 _collector(collector),
3642 _yield(yield) { }
3644 void set_task(CMSConcMarkingTask* task) {
3645 _task = task;
3646 }
3647 };
3649 // MT Concurrent Marking Task
3650 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3651 CMSCollector* _collector;
3652 YieldingFlexibleWorkGang* _workers; // the whole gang
3653 int _n_workers; // requested/desired # workers
3654 bool _asynch;
3655 bool _result;
3656 CompactibleFreeListSpace* _cms_space;
3657 CompactibleFreeListSpace* _perm_space;
3658 HeapWord* _global_finger;
3660 // Exposed here for yielding support
3661 Mutex* const _bit_map_lock;
3663 // The per thread work queues, available here for stealing
3664 OopTaskQueueSet* _task_queues;
3665 CMSConcMarkingTerminator _term;
3667 public:
3668 CMSConcMarkingTask(CMSCollector* collector,
3669 CompactibleFreeListSpace* cms_space,
3670 CompactibleFreeListSpace* perm_space,
3671 bool asynch, int n_workers,
3672 YieldingFlexibleWorkGang* workers,
3673 OopTaskQueueSet* task_queues):
3674 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3675 _collector(collector),
3676 _cms_space(cms_space),
3677 _perm_space(perm_space),
3678 _asynch(asynch), _n_workers(n_workers), _result(true),
3679 _workers(workers), _task_queues(task_queues),
3680 _term(n_workers, task_queues, _collector, asynch),
3681 _bit_map_lock(collector->bitMapLock())
3682 {
3683 assert(n_workers <= workers->total_workers(),
3684 "Else termination won't work correctly today"); // XXX FIX ME!
3685 _requested_size = n_workers;
3686 _term.set_task(this);
3687 assert(_cms_space->bottom() < _perm_space->bottom(),
3688 "Finger incorrectly initialized below");
3689 _global_finger = _cms_space->bottom();
3690 }
3693 OopTaskQueueSet* task_queues() { return _task_queues; }
3695 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3697 HeapWord** global_finger_addr() { return &_global_finger; }
3699 CMSConcMarkingTerminator* terminator() { return &_term; }
3701 void work(int i);
3703 virtual void coordinator_yield(); // stuff done by coordinator
3704 bool result() { return _result; }
3706 void reset(HeapWord* ra) {
3707 _term.reset_for_reuse();
3708 }
3710 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3711 OopTaskQueue* work_q);
3713 private:
3714 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3715 void do_work_steal(int i);
3716 void bump_global_finger(HeapWord* f);
3717 };
3719 void CMSConcMarkingTerminator::yield() {
3720 if (ConcurrentMarkSweepThread::should_yield() &&
3721 !_collector->foregroundGCIsActive() &&
3722 _yield) {
3723 _task->yield();
3724 } else {
3725 ParallelTaskTerminator::yield();
3726 }
3727 }
3729 ////////////////////////////////////////////////////////////////
3730 // Concurrent Marking Algorithm Sketch
3731 ////////////////////////////////////////////////////////////////
3732 // Until all tasks exhausted (both spaces):
3733 // -- claim next available chunk
3734 // -- bump global finger via CAS
3735 // -- find first object that starts in this chunk
3736 // and start scanning bitmap from that position
3737 // -- scan marked objects for oops
3738 // -- CAS-mark target, and if successful:
3739 // . if target oop is above global finger (volatile read)
3740 // nothing to do
3741 // . if target oop is in chunk and above local finger
3742 // then nothing to do
3743 // . else push on work-queue
3744 // -- Deal with possible overflow issues:
3745 // . local work-queue overflow causes stuff to be pushed on
3746 // global (common) overflow queue
3747 // . always first empty local work queue
3748 // . then get a batch of oops from global work queue if any
3749 // . then do work stealing
3750 // -- When all tasks claimed (both spaces)
3751 // and local work queue empty,
3752 // then in a loop do:
3753 // . check global overflow stack; steal a batch of oops and trace
3754 // . try to steal from other threads oif GOS is empty
3755 // . if neither is available, offer termination
3756 // -- Terminate and return result
3757 //
3758 void CMSConcMarkingTask::work(int i) {
3759 elapsedTimer _timer;
3760 ResourceMark rm;
3761 HandleMark hm;
3763 DEBUG_ONLY(_collector->verify_overflow_empty();)
3765 // Before we begin work, our work queue should be empty
3766 assert(work_queue(i)->size() == 0, "Expected to be empty");
3767 // Scan the bitmap covering _cms_space, tracing through grey objects.
3768 _timer.start();
3769 do_scan_and_mark(i, _cms_space);
3770 _timer.stop();
3771 if (PrintCMSStatistics != 0) {
3772 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3773 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3774 }
3776 // ... do the same for the _perm_space
3777 _timer.reset();
3778 _timer.start();
3779 do_scan_and_mark(i, _perm_space);
3780 _timer.stop();
3781 if (PrintCMSStatistics != 0) {
3782 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3783 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3784 }
3786 // ... do work stealing
3787 _timer.reset();
3788 _timer.start();
3789 do_work_steal(i);
3790 _timer.stop();
3791 if (PrintCMSStatistics != 0) {
3792 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3793 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3794 }
3795 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3796 assert(work_queue(i)->size() == 0, "Should have been emptied");
3797 // Note that under the current task protocol, the
3798 // following assertion is true even of the spaces
3799 // expanded since the completion of the concurrent
3800 // marking. XXX This will likely change under a strict
3801 // ABORT semantics.
3802 assert(_global_finger > _cms_space->end() &&
3803 _global_finger >= _perm_space->end(),
3804 "All tasks have been completed");
3805 DEBUG_ONLY(_collector->verify_overflow_empty();)
3806 }
3808 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3809 HeapWord* read = _global_finger;
3810 HeapWord* cur = read;
3811 while (f > read) {
3812 cur = read;
3813 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3814 if (cur == read) {
3815 // our cas succeeded
3816 assert(_global_finger >= f, "protocol consistency");
3817 break;
3818 }
3819 }
3820 }
3822 // This is really inefficient, and should be redone by
3823 // using (not yet available) block-read and -write interfaces to the
3824 // stack and the work_queue. XXX FIX ME !!!
3825 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3826 OopTaskQueue* work_q) {
3827 // Fast lock-free check
3828 if (ovflw_stk->length() == 0) {
3829 return false;
3830 }
3831 assert(work_q->size() == 0, "Shouldn't steal");
3832 MutexLockerEx ml(ovflw_stk->par_lock(),
3833 Mutex::_no_safepoint_check_flag);
3834 // Grab up to 1/4 the size of the work queue
3835 size_t num = MIN2((size_t)work_q->max_elems()/4,
3836 (size_t)ParGCDesiredObjsFromOverflowList);
3837 num = MIN2(num, ovflw_stk->length());
3838 for (int i = (int) num; i > 0; i--) {
3839 oop cur = ovflw_stk->pop();
3840 assert(cur != NULL, "Counted wrong?");
3841 work_q->push(cur);
3842 }
3843 return num > 0;
3844 }
3846 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3847 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3848 int n_tasks = pst->n_tasks();
3849 // We allow that there may be no tasks to do here because
3850 // we are restarting after a stack overflow.
3851 assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
3852 int nth_task = 0;
3854 HeapWord* start = sp->bottom();
3855 size_t chunk_size = sp->marking_task_size();
3856 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3857 // Having claimed the nth task in this space,
3858 // compute the chunk that it corresponds to:
3859 MemRegion span = MemRegion(start + nth_task*chunk_size,
3860 start + (nth_task+1)*chunk_size);
3861 // Try and bump the global finger via a CAS;
3862 // note that we need to do the global finger bump
3863 // _before_ taking the intersection below, because
3864 // the task corresponding to that region will be
3865 // deemed done even if the used_region() expands
3866 // because of allocation -- as it almost certainly will
3867 // during start-up while the threads yield in the
3868 // closure below.
3869 HeapWord* finger = span.end();
3870 bump_global_finger(finger); // atomically
3871 // There are null tasks here corresponding to chunks
3872 // beyond the "top" address of the space.
3873 span = span.intersection(sp->used_region());
3874 if (!span.is_empty()) { // Non-null task
3875 // We want to skip the first object because
3876 // the protocol is to scan any object in its entirety
3877 // that _starts_ in this span; a fortiori, any
3878 // object starting in an earlier span is scanned
3879 // as part of an earlier claimed task.
3880 // Below we use the "careful" version of block_start
3881 // so we do not try to navigate uninitialized objects.
3882 HeapWord* prev_obj = sp->block_start_careful(span.start());
3883 // Below we use a variant of block_size that uses the
3884 // Printezis bits to avoid waiting for allocated
3885 // objects to become initialized/parsable.
3886 while (prev_obj < span.start()) {
3887 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3888 if (sz > 0) {
3889 prev_obj += sz;
3890 } else {
3891 // In this case we may end up doing a bit of redundant
3892 // scanning, but that appears unavoidable, short of
3893 // locking the free list locks; see bug 6324141.
3894 break;
3895 }
3896 }
3897 if (prev_obj < span.end()) {
3898 MemRegion my_span = MemRegion(prev_obj, span.end());
3899 // Do the marking work within a non-empty span --
3900 // the last argument to the constructor indicates whether the
3901 // iteration should be incremental with periodic yields.
3902 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3903 &_collector->_markBitMap,
3904 work_queue(i),
3905 &_collector->_markStack,
3906 &_collector->_revisitStack,
3907 _asynch);
3908 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3909 } // else nothing to do for this task
3910 } // else nothing to do for this task
3911 }
3912 // We'd be tempted to assert here that since there are no
3913 // more tasks left to claim in this space, the global_finger
3914 // must exceed space->top() and a fortiori space->end(). However,
3915 // that would not quite be correct because the bumping of
3916 // global_finger occurs strictly after the claiming of a task,
3917 // so by the time we reach here the global finger may not yet
3918 // have been bumped up by the thread that claimed the last
3919 // task.
3920 pst->all_tasks_completed();
3921 }
3923 class Par_ConcMarkingClosure: public OopClosure {
3924 private:
3925 CMSCollector* _collector;
3926 MemRegion _span;
3927 CMSBitMap* _bit_map;
3928 CMSMarkStack* _overflow_stack;
3929 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3930 OopTaskQueue* _work_queue;
3931 protected:
3932 DO_OOP_WORK_DEFN
3933 public:
3934 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3935 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3936 _collector(collector),
3937 _span(_collector->_span),
3938 _work_queue(work_queue),
3939 _bit_map(bit_map),
3940 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3941 virtual void do_oop(oop* p);
3942 virtual void do_oop(narrowOop* p);
3943 void trim_queue(size_t max);
3944 void handle_stack_overflow(HeapWord* lost);
3945 };
3947 // Grey object rescan during work stealing phase --
3948 // the salient assumption here is that stolen oops must
3949 // always be initialized, so we do not need to check for
3950 // uninitialized objects before scanning here.
3951 void Par_ConcMarkingClosure::do_oop(oop obj) {
3952 assert(obj->is_oop_or_null(), "expected an oop or NULL");
3953 HeapWord* addr = (HeapWord*)obj;
3954 // Check if oop points into the CMS generation
3955 // and is not marked
3956 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3957 // a white object ...
3958 // If we manage to "claim" the object, by being the
3959 // first thread to mark it, then we push it on our
3960 // marking stack
3961 if (_bit_map->par_mark(addr)) { // ... now grey
3962 // push on work queue (grey set)
3963 bool simulate_overflow = false;
3964 NOT_PRODUCT(
3965 if (CMSMarkStackOverflowALot &&
3966 _collector->simulate_overflow()) {
3967 // simulate a stack overflow
3968 simulate_overflow = true;
3969 }
3970 )
3971 if (simulate_overflow ||
3972 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3973 // stack overflow
3974 if (PrintCMSStatistics != 0) {
3975 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3976 SIZE_FORMAT, _overflow_stack->capacity());
3977 }
3978 // We cannot assert that the overflow stack is full because
3979 // it may have been emptied since.
3980 assert(simulate_overflow ||
3981 _work_queue->size() == _work_queue->max_elems(),
3982 "Else push should have succeeded");
3983 handle_stack_overflow(addr);
3984 }
3985 } // Else, some other thread got there first
3986 }
3987 }
3989 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3990 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3992 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3993 while (_work_queue->size() > max) {
3994 oop new_oop;
3995 if (_work_queue->pop_local(new_oop)) {
3996 assert(new_oop->is_oop(), "Should be an oop");
3997 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3998 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3999 assert(new_oop->is_parsable(), "Should be parsable");
4000 new_oop->oop_iterate(this); // do_oop() above
4001 }
4002 }
4003 }
4005 // Upon stack overflow, we discard (part of) the stack,
4006 // remembering the least address amongst those discarded
4007 // in CMSCollector's _restart_address.
4008 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4009 // We need to do this under a mutex to prevent other
4010 // workers from interfering with the expansion below.
4011 MutexLockerEx ml(_overflow_stack->par_lock(),
4012 Mutex::_no_safepoint_check_flag);
4013 // Remember the least grey address discarded
4014 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4015 _collector->lower_restart_addr(ra);
4016 _overflow_stack->reset(); // discard stack contents
4017 _overflow_stack->expand(); // expand the stack if possible
4018 }
4021 void CMSConcMarkingTask::do_work_steal(int i) {
4022 OopTaskQueue* work_q = work_queue(i);
4023 oop obj_to_scan;
4024 CMSBitMap* bm = &(_collector->_markBitMap);
4025 CMSMarkStack* ovflw = &(_collector->_markStack);
4026 int* seed = _collector->hash_seed(i);
4027 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
4028 while (true) {
4029 cl.trim_queue(0);
4030 assert(work_q->size() == 0, "Should have been emptied above");
4031 if (get_work_from_overflow_stack(ovflw, work_q)) {
4032 // Can't assert below because the work obtained from the
4033 // overflow stack may already have been stolen from us.
4034 // assert(work_q->size() > 0, "Work from overflow stack");
4035 continue;
4036 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4037 assert(obj_to_scan->is_oop(), "Should be an oop");
4038 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4039 obj_to_scan->oop_iterate(&cl);
4040 } else if (terminator()->offer_termination()) {
4041 assert(work_q->size() == 0, "Impossible!");
4042 break;
4043 }
4044 }
4045 }
4047 // This is run by the CMS (coordinator) thread.
4048 void CMSConcMarkingTask::coordinator_yield() {
4049 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4050 "CMS thread should hold CMS token");
4052 // First give up the locks, then yield, then re-lock
4053 // We should probably use a constructor/destructor idiom to
4054 // do this unlock/lock or modify the MutexUnlocker class to
4055 // serve our purpose. XXX
4056 assert_lock_strong(_bit_map_lock);
4057 _bit_map_lock->unlock();
4058 ConcurrentMarkSweepThread::desynchronize(true);
4059 ConcurrentMarkSweepThread::acknowledge_yield_request();
4060 _collector->stopTimer();
4061 if (PrintCMSStatistics != 0) {
4062 _collector->incrementYields();
4063 }
4064 _collector->icms_wait();
4066 // It is possible for whichever thread initiated the yield request
4067 // not to get a chance to wake up and take the bitmap lock between
4068 // this thread releasing it and reacquiring it. So, while the
4069 // should_yield() flag is on, let's sleep for a bit to give the
4070 // other thread a chance to wake up. The limit imposed on the number
4071 // of iterations is defensive, to avoid any unforseen circumstances
4072 // putting us into an infinite loop. Since it's always been this
4073 // (coordinator_yield()) method that was observed to cause the
4074 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4075 // which is by default non-zero. For the other seven methods that
4076 // also perform the yield operation, as are using a different
4077 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4078 // can enable the sleeping for those methods too, if necessary.
4079 // See 6442774.
4080 //
4081 // We really need to reconsider the synchronization between the GC
4082 // thread and the yield-requesting threads in the future and we
4083 // should really use wait/notify, which is the recommended
4084 // way of doing this type of interaction. Additionally, we should
4085 // consolidate the eight methods that do the yield operation and they
4086 // are almost identical into one for better maintenability and
4087 // readability. See 6445193.
4088 //
4089 // Tony 2006.06.29
4090 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4091 ConcurrentMarkSweepThread::should_yield() &&
4092 !CMSCollector::foregroundGCIsActive(); ++i) {
4093 os::sleep(Thread::current(), 1, false);
4094 ConcurrentMarkSweepThread::acknowledge_yield_request();
4095 }
4097 ConcurrentMarkSweepThread::synchronize(true);
4098 _bit_map_lock->lock_without_safepoint_check();
4099 _collector->startTimer();
4100 }
4102 bool CMSCollector::do_marking_mt(bool asynch) {
4103 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4104 // In the future this would be determined ergonomically, based
4105 // on #cpu's, # active mutator threads (and load), and mutation rate.
4106 int num_workers = ParallelCMSThreads;
4108 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4109 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4111 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4112 asynch, num_workers /* number requested XXX */,
4113 conc_workers(), task_queues());
4115 // Since the actual number of workers we get may be different
4116 // from the number we requested above, do we need to do anything different
4117 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4118 // class?? XXX
4119 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4120 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4122 // Refs discovery is already non-atomic.
4123 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4124 // Mutate the Refs discovery so it is MT during the
4125 // multi-threaded marking phase.
4126 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4128 conc_workers()->start_task(&tsk);
4129 while (tsk.yielded()) {
4130 tsk.coordinator_yield();
4131 conc_workers()->continue_task(&tsk);
4132 }
4133 // If the task was aborted, _restart_addr will be non-NULL
4134 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4135 while (_restart_addr != NULL) {
4136 // XXX For now we do not make use of ABORTED state and have not
4137 // yet implemented the right abort semantics (even in the original
4138 // single-threaded CMS case). That needs some more investigation
4139 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4140 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4141 // If _restart_addr is non-NULL, a marking stack overflow
4142 // occured; we need to do a fresh marking iteration from the
4143 // indicated restart address.
4144 if (_foregroundGCIsActive && asynch) {
4145 // We may be running into repeated stack overflows, having
4146 // reached the limit of the stack size, while making very
4147 // slow forward progress. It may be best to bail out and
4148 // let the foreground collector do its job.
4149 // Clear _restart_addr, so that foreground GC
4150 // works from scratch. This avoids the headache of
4151 // a "rescan" which would otherwise be needed because
4152 // of the dirty mod union table & card table.
4153 _restart_addr = NULL;
4154 return false;
4155 }
4156 // Adjust the task to restart from _restart_addr
4157 tsk.reset(_restart_addr);
4158 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4159 _restart_addr);
4160 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4161 _restart_addr);
4162 _restart_addr = NULL;
4163 // Get the workers going again
4164 conc_workers()->start_task(&tsk);
4165 while (tsk.yielded()) {
4166 tsk.coordinator_yield();
4167 conc_workers()->continue_task(&tsk);
4168 }
4169 }
4170 assert(tsk.completed(), "Inconsistency");
4171 assert(tsk.result() == true, "Inconsistency");
4172 return true;
4173 }
4175 bool CMSCollector::do_marking_st(bool asynch) {
4176 ResourceMark rm;
4177 HandleMark hm;
4179 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4180 &_markStack, &_revisitStack, CMSYield && asynch);
4181 // the last argument to iterate indicates whether the iteration
4182 // should be incremental with periodic yields.
4183 _markBitMap.iterate(&markFromRootsClosure);
4184 // If _restart_addr is non-NULL, a marking stack overflow
4185 // occured; we need to do a fresh iteration from the
4186 // indicated restart address.
4187 while (_restart_addr != NULL) {
4188 if (_foregroundGCIsActive && asynch) {
4189 // We may be running into repeated stack overflows, having
4190 // reached the limit of the stack size, while making very
4191 // slow forward progress. It may be best to bail out and
4192 // let the foreground collector do its job.
4193 // Clear _restart_addr, so that foreground GC
4194 // works from scratch. This avoids the headache of
4195 // a "rescan" which would otherwise be needed because
4196 // of the dirty mod union table & card table.
4197 _restart_addr = NULL;
4198 return false; // indicating failure to complete marking
4199 }
4200 // Deal with stack overflow:
4201 // we restart marking from _restart_addr
4202 HeapWord* ra = _restart_addr;
4203 markFromRootsClosure.reset(ra);
4204 _restart_addr = NULL;
4205 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4206 }
4207 return true;
4208 }
4210 void CMSCollector::preclean() {
4211 check_correct_thread_executing();
4212 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4213 verify_work_stacks_empty();
4214 verify_overflow_empty();
4215 _abort_preclean = false;
4216 if (CMSPrecleaningEnabled) {
4217 _eden_chunk_index = 0;
4218 size_t used = get_eden_used();
4219 size_t capacity = get_eden_capacity();
4220 // Don't start sampling unless we will get sufficiently
4221 // many samples.
4222 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4223 * CMSScheduleRemarkEdenPenetration)) {
4224 _start_sampling = true;
4225 } else {
4226 _start_sampling = false;
4227 }
4228 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4229 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4230 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4231 }
4232 CMSTokenSync x(true); // is cms thread
4233 if (CMSPrecleaningEnabled) {
4234 sample_eden();
4235 _collectorState = AbortablePreclean;
4236 } else {
4237 _collectorState = FinalMarking;
4238 }
4239 verify_work_stacks_empty();
4240 verify_overflow_empty();
4241 }
4243 // Try and schedule the remark such that young gen
4244 // occupancy is CMSScheduleRemarkEdenPenetration %.
4245 void CMSCollector::abortable_preclean() {
4246 check_correct_thread_executing();
4247 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4248 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4250 // If Eden's current occupancy is below this threshold,
4251 // immediately schedule the remark; else preclean
4252 // past the next scavenge in an effort to
4253 // schedule the pause as described avove. By choosing
4254 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4255 // we will never do an actual abortable preclean cycle.
4256 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4257 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4258 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4259 // We need more smarts in the abortable preclean
4260 // loop below to deal with cases where allocation
4261 // in young gen is very very slow, and our precleaning
4262 // is running a losing race against a horde of
4263 // mutators intent on flooding us with CMS updates
4264 // (dirty cards).
4265 // One, admittedly dumb, strategy is to give up
4266 // after a certain number of abortable precleaning loops
4267 // or after a certain maximum time. We want to make
4268 // this smarter in the next iteration.
4269 // XXX FIX ME!!! YSR
4270 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4271 while (!(should_abort_preclean() ||
4272 ConcurrentMarkSweepThread::should_terminate())) {
4273 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4274 cumworkdone += workdone;
4275 loops++;
4276 // Voluntarily terminate abortable preclean phase if we have
4277 // been at it for too long.
4278 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4279 loops >= CMSMaxAbortablePrecleanLoops) {
4280 if (PrintGCDetails) {
4281 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4282 }
4283 break;
4284 }
4285 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4286 if (PrintGCDetails) {
4287 gclog_or_tty->print(" CMS: abort preclean due to time ");
4288 }
4289 break;
4290 }
4291 // If we are doing little work each iteration, we should
4292 // take a short break.
4293 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4294 // Sleep for some time, waiting for work to accumulate
4295 stopTimer();
4296 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4297 startTimer();
4298 waited++;
4299 }
4300 }
4301 if (PrintCMSStatistics > 0) {
4302 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4303 loops, waited, cumworkdone);
4304 }
4305 }
4306 CMSTokenSync x(true); // is cms thread
4307 if (_collectorState != Idling) {
4308 assert(_collectorState == AbortablePreclean,
4309 "Spontaneous state transition?");
4310 _collectorState = FinalMarking;
4311 } // Else, a foreground collection completed this CMS cycle.
4312 return;
4313 }
4315 // Respond to an Eden sampling opportunity
4316 void CMSCollector::sample_eden() {
4317 // Make sure a young gc cannot sneak in between our
4318 // reading and recording of a sample.
4319 assert(Thread::current()->is_ConcurrentGC_thread(),
4320 "Only the cms thread may collect Eden samples");
4321 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4322 "Should collect samples while holding CMS token");
4323 if (!_start_sampling) {
4324 return;
4325 }
4326 if (_eden_chunk_array) {
4327 if (_eden_chunk_index < _eden_chunk_capacity) {
4328 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4329 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4330 "Unexpected state of Eden");
4331 // We'd like to check that what we just sampled is an oop-start address;
4332 // however, we cannot do that here since the object may not yet have been
4333 // initialized. So we'll instead do the check when we _use_ this sample
4334 // later.
4335 if (_eden_chunk_index == 0 ||
4336 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4337 _eden_chunk_array[_eden_chunk_index-1])
4338 >= CMSSamplingGrain)) {
4339 _eden_chunk_index++; // commit sample
4340 }
4341 }
4342 }
4343 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4344 size_t used = get_eden_used();
4345 size_t capacity = get_eden_capacity();
4346 assert(used <= capacity, "Unexpected state of Eden");
4347 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4348 _abort_preclean = true;
4349 }
4350 }
4351 }
4354 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4355 assert(_collectorState == Precleaning ||
4356 _collectorState == AbortablePreclean, "incorrect state");
4357 ResourceMark rm;
4358 HandleMark hm;
4359 // Do one pass of scrubbing the discovered reference lists
4360 // to remove any reference objects with strongly-reachable
4361 // referents.
4362 if (clean_refs) {
4363 ReferenceProcessor* rp = ref_processor();
4364 CMSPrecleanRefsYieldClosure yield_cl(this);
4365 assert(rp->span().equals(_span), "Spans should be equal");
4366 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4367 &_markStack);
4368 CMSDrainMarkingStackClosure complete_trace(this,
4369 _span, &_markBitMap, &_markStack,
4370 &keep_alive);
4372 // We don't want this step to interfere with a young
4373 // collection because we don't want to take CPU
4374 // or memory bandwidth away from the young GC threads
4375 // (which may be as many as there are CPUs).
4376 // Note that we don't need to protect ourselves from
4377 // interference with mutators because they can't
4378 // manipulate the discovered reference lists nor affect
4379 // the computed reachability of the referents, the
4380 // only properties manipulated by the precleaning
4381 // of these reference lists.
4382 stopTimer();
4383 CMSTokenSyncWithLocks x(true /* is cms thread */,
4384 bitMapLock());
4385 startTimer();
4386 sample_eden();
4387 // The following will yield to allow foreground
4388 // collection to proceed promptly. XXX YSR:
4389 // The code in this method may need further
4390 // tweaking for better performance and some restructuring
4391 // for cleaner interfaces.
4392 rp->preclean_discovered_references(
4393 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4394 &yield_cl);
4395 }
4397 if (clean_survivor) { // preclean the active survivor space(s)
4398 assert(_young_gen->kind() == Generation::DefNew ||
4399 _young_gen->kind() == Generation::ParNew ||
4400 _young_gen->kind() == Generation::ASParNew,
4401 "incorrect type for cast");
4402 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4403 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4404 &_markBitMap, &_modUnionTable,
4405 &_markStack, &_revisitStack,
4406 true /* precleaning phase */);
4407 stopTimer();
4408 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4409 bitMapLock());
4410 startTimer();
4411 unsigned int before_count =
4412 GenCollectedHeap::heap()->total_collections();
4413 SurvivorSpacePrecleanClosure
4414 sss_cl(this, _span, &_markBitMap, &_markStack,
4415 &pam_cl, before_count, CMSYield);
4416 dng->from()->object_iterate_careful(&sss_cl);
4417 dng->to()->object_iterate_careful(&sss_cl);
4418 }
4419 MarkRefsIntoAndScanClosure
4420 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4421 &_markStack, &_revisitStack, this, CMSYield,
4422 true /* precleaning phase */);
4423 // CAUTION: The following closure has persistent state that may need to
4424 // be reset upon a decrease in the sequence of addresses it
4425 // processes.
4426 ScanMarkedObjectsAgainCarefullyClosure
4427 smoac_cl(this, _span,
4428 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4430 // Preclean dirty cards in ModUnionTable and CardTable using
4431 // appropriate convergence criterion;
4432 // repeat CMSPrecleanIter times unless we find that
4433 // we are losing.
4434 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4435 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4436 "Bad convergence multiplier");
4437 assert(CMSPrecleanThreshold >= 100,
4438 "Unreasonably low CMSPrecleanThreshold");
4440 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4441 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4442 numIter < CMSPrecleanIter;
4443 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4444 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4445 if (CMSPermGenPrecleaningEnabled) {
4446 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4447 }
4448 if (Verbose && PrintGCDetails) {
4449 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4450 }
4451 // Either there are very few dirty cards, so re-mark
4452 // pause will be small anyway, or our pre-cleaning isn't
4453 // that much faster than the rate at which cards are being
4454 // dirtied, so we might as well stop and re-mark since
4455 // precleaning won't improve our re-mark time by much.
4456 if (curNumCards <= CMSPrecleanThreshold ||
4457 (numIter > 0 &&
4458 (curNumCards * CMSPrecleanDenominator >
4459 lastNumCards * CMSPrecleanNumerator))) {
4460 numIter++;
4461 cumNumCards += curNumCards;
4462 break;
4463 }
4464 }
4465 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4466 if (CMSPermGenPrecleaningEnabled) {
4467 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4468 }
4469 cumNumCards += curNumCards;
4470 if (PrintGCDetails && PrintCMSStatistics != 0) {
4471 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4472 curNumCards, cumNumCards, numIter);
4473 }
4474 return cumNumCards; // as a measure of useful work done
4475 }
4477 // PRECLEANING NOTES:
4478 // Precleaning involves:
4479 // . reading the bits of the modUnionTable and clearing the set bits.
4480 // . For the cards corresponding to the set bits, we scan the
4481 // objects on those cards. This means we need the free_list_lock
4482 // so that we can safely iterate over the CMS space when scanning
4483 // for oops.
4484 // . When we scan the objects, we'll be both reading and setting
4485 // marks in the marking bit map, so we'll need the marking bit map.
4486 // . For protecting _collector_state transitions, we take the CGC_lock.
4487 // Note that any races in the reading of of card table entries by the
4488 // CMS thread on the one hand and the clearing of those entries by the
4489 // VM thread or the setting of those entries by the mutator threads on the
4490 // other are quite benign. However, for efficiency it makes sense to keep
4491 // the VM thread from racing with the CMS thread while the latter is
4492 // dirty card info to the modUnionTable. We therefore also use the
4493 // CGC_lock to protect the reading of the card table and the mod union
4494 // table by the CM thread.
4495 // . We run concurrently with mutator updates, so scanning
4496 // needs to be done carefully -- we should not try to scan
4497 // potentially uninitialized objects.
4498 //
4499 // Locking strategy: While holding the CGC_lock, we scan over and
4500 // reset a maximal dirty range of the mod union / card tables, then lock
4501 // the free_list_lock and bitmap lock to do a full marking, then
4502 // release these locks; and repeat the cycle. This allows for a
4503 // certain amount of fairness in the sharing of these locks between
4504 // the CMS collector on the one hand, and the VM thread and the
4505 // mutators on the other.
4507 // NOTE: preclean_mod_union_table() and preclean_card_table()
4508 // further below are largely identical; if you need to modify
4509 // one of these methods, please check the other method too.
4511 size_t CMSCollector::preclean_mod_union_table(
4512 ConcurrentMarkSweepGeneration* gen,
4513 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4514 verify_work_stacks_empty();
4515 verify_overflow_empty();
4517 // strategy: starting with the first card, accumulate contiguous
4518 // ranges of dirty cards; clear these cards, then scan the region
4519 // covered by these cards.
4521 // Since all of the MUT is committed ahead, we can just use
4522 // that, in case the generations expand while we are precleaning.
4523 // It might also be fine to just use the committed part of the
4524 // generation, but we might potentially miss cards when the
4525 // generation is rapidly expanding while we are in the midst
4526 // of precleaning.
4527 HeapWord* startAddr = gen->reserved().start();
4528 HeapWord* endAddr = gen->reserved().end();
4530 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4532 size_t numDirtyCards, cumNumDirtyCards;
4533 HeapWord *nextAddr, *lastAddr;
4534 for (cumNumDirtyCards = numDirtyCards = 0,
4535 nextAddr = lastAddr = startAddr;
4536 nextAddr < endAddr;
4537 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4539 ResourceMark rm;
4540 HandleMark hm;
4542 MemRegion dirtyRegion;
4543 {
4544 stopTimer();
4545 CMSTokenSync ts(true);
4546 startTimer();
4547 sample_eden();
4548 // Get dirty region starting at nextOffset (inclusive),
4549 // simultaneously clearing it.
4550 dirtyRegion =
4551 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4552 assert(dirtyRegion.start() >= nextAddr,
4553 "returned region inconsistent?");
4554 }
4555 // Remember where the next search should begin.
4556 // The returned region (if non-empty) is a right open interval,
4557 // so lastOffset is obtained from the right end of that
4558 // interval.
4559 lastAddr = dirtyRegion.end();
4560 // Should do something more transparent and less hacky XXX
4561 numDirtyCards =
4562 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4564 // We'll scan the cards in the dirty region (with periodic
4565 // yields for foreground GC as needed).
4566 if (!dirtyRegion.is_empty()) {
4567 assert(numDirtyCards > 0, "consistency check");
4568 HeapWord* stop_point = NULL;
4569 {
4570 stopTimer();
4571 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4572 bitMapLock());
4573 startTimer();
4574 verify_work_stacks_empty();
4575 verify_overflow_empty();
4576 sample_eden();
4577 stop_point =
4578 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4579 }
4580 if (stop_point != NULL) {
4581 // The careful iteration stopped early either because it found an
4582 // uninitialized object, or because we were in the midst of an
4583 // "abortable preclean", which should now be aborted. Redirty
4584 // the bits corresponding to the partially-scanned or unscanned
4585 // cards. We'll either restart at the next block boundary or
4586 // abort the preclean.
4587 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4588 (_collectorState == AbortablePreclean && should_abort_preclean()),
4589 "Unparsable objects should only be in perm gen.");
4591 stopTimer();
4592 CMSTokenSyncWithLocks ts(true, bitMapLock());
4593 startTimer();
4594 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4595 if (should_abort_preclean()) {
4596 break; // out of preclean loop
4597 } else {
4598 // Compute the next address at which preclean should pick up;
4599 // might need bitMapLock in order to read P-bits.
4600 lastAddr = next_card_start_after_block(stop_point);
4601 }
4602 }
4603 } else {
4604 assert(lastAddr == endAddr, "consistency check");
4605 assert(numDirtyCards == 0, "consistency check");
4606 break;
4607 }
4608 }
4609 verify_work_stacks_empty();
4610 verify_overflow_empty();
4611 return cumNumDirtyCards;
4612 }
4614 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4615 // below are largely identical; if you need to modify
4616 // one of these methods, please check the other method too.
4618 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4619 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4620 // strategy: it's similar to precleamModUnionTable above, in that
4621 // we accumulate contiguous ranges of dirty cards, mark these cards
4622 // precleaned, then scan the region covered by these cards.
4623 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4624 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4626 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4628 size_t numDirtyCards, cumNumDirtyCards;
4629 HeapWord *lastAddr, *nextAddr;
4631 for (cumNumDirtyCards = numDirtyCards = 0,
4632 nextAddr = lastAddr = startAddr;
4633 nextAddr < endAddr;
4634 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4636 ResourceMark rm;
4637 HandleMark hm;
4639 MemRegion dirtyRegion;
4640 {
4641 // See comments in "Precleaning notes" above on why we
4642 // do this locking. XXX Could the locking overheads be
4643 // too high when dirty cards are sparse? [I don't think so.]
4644 stopTimer();
4645 CMSTokenSync x(true); // is cms thread
4646 startTimer();
4647 sample_eden();
4648 // Get and clear dirty region from card table
4649 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4650 MemRegion(nextAddr, endAddr),
4651 true,
4652 CardTableModRefBS::precleaned_card_val());
4654 assert(dirtyRegion.start() >= nextAddr,
4655 "returned region inconsistent?");
4656 }
4657 lastAddr = dirtyRegion.end();
4658 numDirtyCards =
4659 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4661 if (!dirtyRegion.is_empty()) {
4662 stopTimer();
4663 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4664 startTimer();
4665 sample_eden();
4666 verify_work_stacks_empty();
4667 verify_overflow_empty();
4668 HeapWord* stop_point =
4669 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4670 if (stop_point != NULL) {
4671 // The careful iteration stopped early because it found an
4672 // uninitialized object. Redirty the bits corresponding to the
4673 // partially-scanned or unscanned cards, and start again at the
4674 // next block boundary.
4675 assert(CMSPermGenPrecleaningEnabled ||
4676 (_collectorState == AbortablePreclean && should_abort_preclean()),
4677 "Unparsable objects should only be in perm gen.");
4678 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4679 if (should_abort_preclean()) {
4680 break; // out of preclean loop
4681 } else {
4682 // Compute the next address at which preclean should pick up.
4683 lastAddr = next_card_start_after_block(stop_point);
4684 }
4685 }
4686 } else {
4687 break;
4688 }
4689 }
4690 verify_work_stacks_empty();
4691 verify_overflow_empty();
4692 return cumNumDirtyCards;
4693 }
4695 void CMSCollector::checkpointRootsFinal(bool asynch,
4696 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4697 assert(_collectorState == FinalMarking, "incorrect state transition?");
4698 check_correct_thread_executing();
4699 // world is stopped at this checkpoint
4700 assert(SafepointSynchronize::is_at_safepoint(),
4701 "world should be stopped");
4702 verify_work_stacks_empty();
4703 verify_overflow_empty();
4705 SpecializationStats::clear();
4706 if (PrintGCDetails) {
4707 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4708 _young_gen->used() / K,
4709 _young_gen->capacity() / K);
4710 }
4711 if (asynch) {
4712 if (CMSScavengeBeforeRemark) {
4713 GenCollectedHeap* gch = GenCollectedHeap::heap();
4714 // Temporarily set flag to false, GCH->do_collection will
4715 // expect it to be false and set to true
4716 FlagSetting fl(gch->_is_gc_active, false);
4717 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4718 PrintGCDetails && Verbose, true, gclog_or_tty);)
4719 int level = _cmsGen->level() - 1;
4720 if (level >= 0) {
4721 gch->do_collection(true, // full (i.e. force, see below)
4722 false, // !clear_all_soft_refs
4723 0, // size
4724 false, // is_tlab
4725 level // max_level
4726 );
4727 }
4728 }
4729 FreelistLocker x(this);
4730 MutexLockerEx y(bitMapLock(),
4731 Mutex::_no_safepoint_check_flag);
4732 assert(!init_mark_was_synchronous, "but that's impossible!");
4733 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4734 } else {
4735 // already have all the locks
4736 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4737 init_mark_was_synchronous);
4738 }
4739 verify_work_stacks_empty();
4740 verify_overflow_empty();
4741 SpecializationStats::print();
4742 }
4744 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4745 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4747 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4749 assert(haveFreelistLocks(), "must have free list locks");
4750 assert_lock_strong(bitMapLock());
4752 if (UseAdaptiveSizePolicy) {
4753 size_policy()->checkpoint_roots_final_begin();
4754 }
4756 ResourceMark rm;
4757 HandleMark hm;
4759 GenCollectedHeap* gch = GenCollectedHeap::heap();
4761 if (should_unload_classes()) {
4762 CodeCache::gc_prologue();
4763 }
4764 assert(haveFreelistLocks(), "must have free list locks");
4765 assert_lock_strong(bitMapLock());
4767 if (!init_mark_was_synchronous) {
4768 // We might assume that we need not fill TLAB's when
4769 // CMSScavengeBeforeRemark is set, because we may have just done
4770 // a scavenge which would have filled all TLAB's -- and besides
4771 // Eden would be empty. This however may not always be the case --
4772 // for instance although we asked for a scavenge, it may not have
4773 // happened because of a JNI critical section. We probably need
4774 // a policy for deciding whether we can in that case wait until
4775 // the critical section releases and then do the remark following
4776 // the scavenge, and skip it here. In the absence of that policy,
4777 // or of an indication of whether the scavenge did indeed occur,
4778 // we cannot rely on TLAB's having been filled and must do
4779 // so here just in case a scavenge did not happen.
4780 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4781 // Update the saved marks which may affect the root scans.
4782 gch->save_marks();
4784 {
4785 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4787 // Note on the role of the mod union table:
4788 // Since the marker in "markFromRoots" marks concurrently with
4789 // mutators, it is possible for some reachable objects not to have been
4790 // scanned. For instance, an only reference to an object A was
4791 // placed in object B after the marker scanned B. Unless B is rescanned,
4792 // A would be collected. Such updates to references in marked objects
4793 // are detected via the mod union table which is the set of all cards
4794 // dirtied since the first checkpoint in this GC cycle and prior to
4795 // the most recent young generation GC, minus those cleaned up by the
4796 // concurrent precleaning.
4797 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4798 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4799 do_remark_parallel();
4800 } else {
4801 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4802 gclog_or_tty);
4803 do_remark_non_parallel();
4804 }
4805 }
4806 } else {
4807 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4808 // The initial mark was stop-world, so there's no rescanning to
4809 // do; go straight on to the next step below.
4810 }
4811 verify_work_stacks_empty();
4812 verify_overflow_empty();
4814 {
4815 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4816 refProcessingWork(asynch, clear_all_soft_refs);
4817 }
4818 verify_work_stacks_empty();
4819 verify_overflow_empty();
4821 if (should_unload_classes()) {
4822 CodeCache::gc_epilogue();
4823 }
4825 // If we encountered any (marking stack / work queue) overflow
4826 // events during the current CMS cycle, take appropriate
4827 // remedial measures, where possible, so as to try and avoid
4828 // recurrence of that condition.
4829 assert(_markStack.isEmpty(), "No grey objects");
4830 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4831 _ser_kac_ovflw;
4832 if (ser_ovflw > 0) {
4833 if (PrintCMSStatistics != 0) {
4834 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4835 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4836 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4837 _ser_kac_ovflw);
4838 }
4839 _markStack.expand();
4840 _ser_pmc_remark_ovflw = 0;
4841 _ser_pmc_preclean_ovflw = 0;
4842 _ser_kac_ovflw = 0;
4843 }
4844 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4845 if (PrintCMSStatistics != 0) {
4846 gclog_or_tty->print_cr("Work queue overflow (benign) "
4847 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4848 _par_pmc_remark_ovflw, _par_kac_ovflw);
4849 }
4850 _par_pmc_remark_ovflw = 0;
4851 _par_kac_ovflw = 0;
4852 }
4853 if (PrintCMSStatistics != 0) {
4854 if (_markStack._hit_limit > 0) {
4855 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4856 _markStack._hit_limit);
4857 }
4858 if (_markStack._failed_double > 0) {
4859 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4860 " current capacity "SIZE_FORMAT,
4861 _markStack._failed_double,
4862 _markStack.capacity());
4863 }
4864 }
4865 _markStack._hit_limit = 0;
4866 _markStack._failed_double = 0;
4868 if ((VerifyAfterGC || VerifyDuringGC) &&
4869 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4870 verify_after_remark();
4871 }
4873 // Change under the freelistLocks.
4874 _collectorState = Sweeping;
4875 // Call isAllClear() under bitMapLock
4876 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4877 " final marking");
4878 if (UseAdaptiveSizePolicy) {
4879 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4880 }
4881 }
4883 // Parallel remark task
4884 class CMSParRemarkTask: public AbstractGangTask {
4885 CMSCollector* _collector;
4886 WorkGang* _workers;
4887 int _n_workers;
4888 CompactibleFreeListSpace* _cms_space;
4889 CompactibleFreeListSpace* _perm_space;
4891 // The per-thread work queues, available here for stealing.
4892 OopTaskQueueSet* _task_queues;
4893 ParallelTaskTerminator _term;
4895 public:
4896 CMSParRemarkTask(CMSCollector* collector,
4897 CompactibleFreeListSpace* cms_space,
4898 CompactibleFreeListSpace* perm_space,
4899 int n_workers, WorkGang* workers,
4900 OopTaskQueueSet* task_queues):
4901 AbstractGangTask("Rescan roots and grey objects in parallel"),
4902 _collector(collector),
4903 _cms_space(cms_space), _perm_space(perm_space),
4904 _n_workers(n_workers),
4905 _workers(workers),
4906 _task_queues(task_queues),
4907 _term(workers->total_workers(), task_queues) { }
4909 OopTaskQueueSet* task_queues() { return _task_queues; }
4911 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4913 ParallelTaskTerminator* terminator() { return &_term; }
4915 void work(int i);
4917 private:
4918 // Work method in support of parallel rescan ... of young gen spaces
4919 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4920 ContiguousSpace* space,
4921 HeapWord** chunk_array, size_t chunk_top);
4923 // ... of dirty cards in old space
4924 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4925 Par_MarkRefsIntoAndScanClosure* cl);
4927 // ... work stealing for the above
4928 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4929 };
4931 void CMSParRemarkTask::work(int i) {
4932 elapsedTimer _timer;
4933 ResourceMark rm;
4934 HandleMark hm;
4936 // ---------- rescan from roots --------------
4937 _timer.start();
4938 GenCollectedHeap* gch = GenCollectedHeap::heap();
4939 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4940 _collector->_span, _collector->ref_processor(),
4941 &(_collector->_markBitMap),
4942 work_queue(i), &(_collector->_revisitStack));
4944 // Rescan young gen roots first since these are likely
4945 // coarsely partitioned and may, on that account, constitute
4946 // the critical path; thus, it's best to start off that
4947 // work first.
4948 // ---------- young gen roots --------------
4949 {
4950 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
4951 EdenSpace* eden_space = dng->eden();
4952 ContiguousSpace* from_space = dng->from();
4953 ContiguousSpace* to_space = dng->to();
4955 HeapWord** eca = _collector->_eden_chunk_array;
4956 size_t ect = _collector->_eden_chunk_index;
4957 HeapWord** sca = _collector->_survivor_chunk_array;
4958 size_t sct = _collector->_survivor_chunk_index;
4960 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4961 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4963 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
4964 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
4965 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
4967 _timer.stop();
4968 if (PrintCMSStatistics != 0) {
4969 gclog_or_tty->print_cr(
4970 "Finished young gen rescan work in %dth thread: %3.3f sec",
4971 i, _timer.seconds());
4972 }
4973 }
4975 // ---------- remaining roots --------------
4976 _timer.reset();
4977 _timer.start();
4978 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
4979 false, // yg was scanned above
4980 true, // collecting perm gen
4981 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4982 NULL, &par_mrias_cl);
4983 _timer.stop();
4984 if (PrintCMSStatistics != 0) {
4985 gclog_or_tty->print_cr(
4986 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4987 i, _timer.seconds());
4988 }
4990 // ---------- rescan dirty cards ------------
4991 _timer.reset();
4992 _timer.start();
4994 // Do the rescan tasks for each of the two spaces
4995 // (cms_space and perm_space) in turn.
4996 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
4997 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
4998 _timer.stop();
4999 if (PrintCMSStatistics != 0) {
5000 gclog_or_tty->print_cr(
5001 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5002 i, _timer.seconds());
5003 }
5005 // ---------- steal work from other threads ...
5006 // ---------- ... and drain overflow list.
5007 _timer.reset();
5008 _timer.start();
5009 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5010 _timer.stop();
5011 if (PrintCMSStatistics != 0) {
5012 gclog_or_tty->print_cr(
5013 "Finished work stealing in %dth thread: %3.3f sec",
5014 i, _timer.seconds());
5015 }
5016 }
5018 void
5019 CMSParRemarkTask::do_young_space_rescan(int i,
5020 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5021 HeapWord** chunk_array, size_t chunk_top) {
5022 // Until all tasks completed:
5023 // . claim an unclaimed task
5024 // . compute region boundaries corresponding to task claimed
5025 // using chunk_array
5026 // . par_oop_iterate(cl) over that region
5028 ResourceMark rm;
5029 HandleMark hm;
5031 SequentialSubTasksDone* pst = space->par_seq_tasks();
5032 assert(pst->valid(), "Uninitialized use?");
5034 int nth_task = 0;
5035 int n_tasks = pst->n_tasks();
5037 HeapWord *start, *end;
5038 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5039 // We claimed task # nth_task; compute its boundaries.
5040 if (chunk_top == 0) { // no samples were taken
5041 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5042 start = space->bottom();
5043 end = space->top();
5044 } else if (nth_task == 0) {
5045 start = space->bottom();
5046 end = chunk_array[nth_task];
5047 } else if (nth_task < (jint)chunk_top) {
5048 assert(nth_task >= 1, "Control point invariant");
5049 start = chunk_array[nth_task - 1];
5050 end = chunk_array[nth_task];
5051 } else {
5052 assert(nth_task == (jint)chunk_top, "Control point invariant");
5053 start = chunk_array[chunk_top - 1];
5054 end = space->top();
5055 }
5056 MemRegion mr(start, end);
5057 // Verify that mr is in space
5058 assert(mr.is_empty() || space->used_region().contains(mr),
5059 "Should be in space");
5060 // Verify that "start" is an object boundary
5061 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5062 "Should be an oop");
5063 space->par_oop_iterate(mr, cl);
5064 }
5065 pst->all_tasks_completed();
5066 }
5068 void
5069 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5070 CompactibleFreeListSpace* sp, int i,
5071 Par_MarkRefsIntoAndScanClosure* cl) {
5072 // Until all tasks completed:
5073 // . claim an unclaimed task
5074 // . compute region boundaries corresponding to task claimed
5075 // . transfer dirty bits ct->mut for that region
5076 // . apply rescanclosure to dirty mut bits for that region
5078 ResourceMark rm;
5079 HandleMark hm;
5081 OopTaskQueue* work_q = work_queue(i);
5082 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5083 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5084 // CAUTION: This closure has state that persists across calls to
5085 // the work method dirty_range_iterate_clear() in that it has
5086 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5087 // use of that state in the imbedded UpwardsObjectClosure instance
5088 // assumes that the cards are always iterated (even if in parallel
5089 // by several threads) in monotonically increasing order per each
5090 // thread. This is true of the implementation below which picks
5091 // card ranges (chunks) in monotonically increasing order globally
5092 // and, a-fortiori, in monotonically increasing order per thread
5093 // (the latter order being a subsequence of the former).
5094 // If the work code below is ever reorganized into a more chaotic
5095 // work-partitioning form than the current "sequential tasks"
5096 // paradigm, the use of that persistent state will have to be
5097 // revisited and modified appropriately. See also related
5098 // bug 4756801 work on which should examine this code to make
5099 // sure that the changes there do not run counter to the
5100 // assumptions made here and necessary for correctness and
5101 // efficiency. Note also that this code might yield inefficient
5102 // behaviour in the case of very large objects that span one or
5103 // more work chunks. Such objects would potentially be scanned
5104 // several times redundantly. Work on 4756801 should try and
5105 // address that performance anomaly if at all possible. XXX
5106 MemRegion full_span = _collector->_span;
5107 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5108 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5109 MarkFromDirtyCardsClosure
5110 greyRescanClosure(_collector, full_span, // entire span of interest
5111 sp, bm, work_q, rs, cl);
5113 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5114 assert(pst->valid(), "Uninitialized use?");
5115 int nth_task = 0;
5116 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5117 MemRegion span = sp->used_region();
5118 HeapWord* start_addr = span.start();
5119 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5120 alignment);
5121 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5122 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5123 start_addr, "Check alignment");
5124 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5125 chunk_size, "Check alignment");
5127 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5128 // Having claimed the nth_task, compute corresponding mem-region,
5129 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5130 // The alignment restriction ensures that we do not need any
5131 // synchronization with other gang-workers while setting or
5132 // clearing bits in thus chunk of the MUT.
5133 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5134 start_addr + (nth_task+1)*chunk_size);
5135 // The last chunk's end might be way beyond end of the
5136 // used region. In that case pull back appropriately.
5137 if (this_span.end() > end_addr) {
5138 this_span.set_end(end_addr);
5139 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5140 }
5141 // Iterate over the dirty cards covering this chunk, marking them
5142 // precleaned, and setting the corresponding bits in the mod union
5143 // table. Since we have been careful to partition at Card and MUT-word
5144 // boundaries no synchronization is needed between parallel threads.
5145 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5146 &modUnionClosure);
5148 // Having transferred these marks into the modUnionTable,
5149 // rescan the marked objects on the dirty cards in the modUnionTable.
5150 // Even if this is at a synchronous collection, the initial marking
5151 // may have been done during an asynchronous collection so there
5152 // may be dirty bits in the mod-union table.
5153 _collector->_modUnionTable.dirty_range_iterate_clear(
5154 this_span, &greyRescanClosure);
5155 _collector->_modUnionTable.verifyNoOneBitsInRange(
5156 this_span.start(),
5157 this_span.end());
5158 }
5159 pst->all_tasks_completed(); // declare that i am done
5160 }
5162 // . see if we can share work_queues with ParNew? XXX
5163 void
5164 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5165 int* seed) {
5166 OopTaskQueue* work_q = work_queue(i);
5167 NOT_PRODUCT(int num_steals = 0;)
5168 oop obj_to_scan;
5169 CMSBitMap* bm = &(_collector->_markBitMap);
5170 size_t num_from_overflow_list =
5171 MIN2((size_t)work_q->max_elems()/4,
5172 (size_t)ParGCDesiredObjsFromOverflowList);
5174 while (true) {
5175 // Completely finish any left over work from (an) earlier round(s)
5176 cl->trim_queue(0);
5177 // Now check if there's any work in the overflow list
5178 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5179 work_q)) {
5180 // found something in global overflow list;
5181 // not yet ready to go stealing work from others.
5182 // We'd like to assert(work_q->size() != 0, ...)
5183 // because we just took work from the overflow list,
5184 // but of course we can't since all of that could have
5185 // been already stolen from us.
5186 // "He giveth and He taketh away."
5187 continue;
5188 }
5189 // Verify that we have no work before we resort to stealing
5190 assert(work_q->size() == 0, "Have work, shouldn't steal");
5191 // Try to steal from other queues that have work
5192 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5193 NOT_PRODUCT(num_steals++;)
5194 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5195 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5196 // Do scanning work
5197 obj_to_scan->oop_iterate(cl);
5198 // Loop around, finish this work, and try to steal some more
5199 } else if (terminator()->offer_termination()) {
5200 break; // nirvana from the infinite cycle
5201 }
5202 }
5203 NOT_PRODUCT(
5204 if (PrintCMSStatistics != 0) {
5205 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5206 }
5207 )
5208 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5209 "Else our work is not yet done");
5210 }
5212 // Return a thread-local PLAB recording array, as appropriate.
5213 void* CMSCollector::get_data_recorder(int thr_num) {
5214 if (_survivor_plab_array != NULL &&
5215 (CMSPLABRecordAlways ||
5216 (_collectorState > Marking && _collectorState < FinalMarking))) {
5217 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5218 ChunkArray* ca = &_survivor_plab_array[thr_num];
5219 ca->reset(); // clear it so that fresh data is recorded
5220 return (void*) ca;
5221 } else {
5222 return NULL;
5223 }
5224 }
5226 // Reset all the thread-local PLAB recording arrays
5227 void CMSCollector::reset_survivor_plab_arrays() {
5228 for (uint i = 0; i < ParallelGCThreads; i++) {
5229 _survivor_plab_array[i].reset();
5230 }
5231 }
5233 // Merge the per-thread plab arrays into the global survivor chunk
5234 // array which will provide the partitioning of the survivor space
5235 // for CMS rescan.
5236 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5237 assert(_survivor_plab_array != NULL, "Error");
5238 assert(_survivor_chunk_array != NULL, "Error");
5239 assert(_collectorState == FinalMarking, "Error");
5240 for (uint j = 0; j < ParallelGCThreads; j++) {
5241 _cursor[j] = 0;
5242 }
5243 HeapWord* top = surv->top();
5244 size_t i;
5245 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5246 HeapWord* min_val = top; // Higher than any PLAB address
5247 uint min_tid = 0; // position of min_val this round
5248 for (uint j = 0; j < ParallelGCThreads; j++) {
5249 ChunkArray* cur_sca = &_survivor_plab_array[j];
5250 if (_cursor[j] == cur_sca->end()) {
5251 continue;
5252 }
5253 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5254 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5255 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5256 if (cur_val < min_val) {
5257 min_tid = j;
5258 min_val = cur_val;
5259 } else {
5260 assert(cur_val < top, "All recorded addresses should be less");
5261 }
5262 }
5263 // At this point min_val and min_tid are respectively
5264 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5265 // and the thread (j) that witnesses that address.
5266 // We record this address in the _survivor_chunk_array[i]
5267 // and increment _cursor[min_tid] prior to the next round i.
5268 if (min_val == top) {
5269 break;
5270 }
5271 _survivor_chunk_array[i] = min_val;
5272 _cursor[min_tid]++;
5273 }
5274 // We are all done; record the size of the _survivor_chunk_array
5275 _survivor_chunk_index = i; // exclusive: [0, i)
5276 if (PrintCMSStatistics > 0) {
5277 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5278 }
5279 // Verify that we used up all the recorded entries
5280 #ifdef ASSERT
5281 size_t total = 0;
5282 for (uint j = 0; j < ParallelGCThreads; j++) {
5283 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5284 total += _cursor[j];
5285 }
5286 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5287 // Check that the merged array is in sorted order
5288 if (total > 0) {
5289 for (size_t i = 0; i < total - 1; i++) {
5290 if (PrintCMSStatistics > 0) {
5291 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5292 i, _survivor_chunk_array[i]);
5293 }
5294 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5295 "Not sorted");
5296 }
5297 }
5298 #endif // ASSERT
5299 }
5301 // Set up the space's par_seq_tasks structure for work claiming
5302 // for parallel rescan of young gen.
5303 // See ParRescanTask where this is currently used.
5304 void
5305 CMSCollector::
5306 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5307 assert(n_threads > 0, "Unexpected n_threads argument");
5308 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5310 // Eden space
5311 {
5312 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5313 assert(!pst->valid(), "Clobbering existing data?");
5314 // Each valid entry in [0, _eden_chunk_index) represents a task.
5315 size_t n_tasks = _eden_chunk_index + 1;
5316 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5317 pst->set_par_threads(n_threads);
5318 pst->set_n_tasks((int)n_tasks);
5319 }
5321 // Merge the survivor plab arrays into _survivor_chunk_array
5322 if (_survivor_plab_array != NULL) {
5323 merge_survivor_plab_arrays(dng->from());
5324 } else {
5325 assert(_survivor_chunk_index == 0, "Error");
5326 }
5328 // To space
5329 {
5330 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5331 assert(!pst->valid(), "Clobbering existing data?");
5332 pst->set_par_threads(n_threads);
5333 pst->set_n_tasks(1);
5334 assert(pst->valid(), "Error");
5335 }
5337 // From space
5338 {
5339 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5340 assert(!pst->valid(), "Clobbering existing data?");
5341 size_t n_tasks = _survivor_chunk_index + 1;
5342 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5343 pst->set_par_threads(n_threads);
5344 pst->set_n_tasks((int)n_tasks);
5345 assert(pst->valid(), "Error");
5346 }
5347 }
5349 // Parallel version of remark
5350 void CMSCollector::do_remark_parallel() {
5351 GenCollectedHeap* gch = GenCollectedHeap::heap();
5352 WorkGang* workers = gch->workers();
5353 assert(workers != NULL, "Need parallel worker threads.");
5354 int n_workers = workers->total_workers();
5355 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5356 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5358 CMSParRemarkTask tsk(this,
5359 cms_space, perm_space,
5360 n_workers, workers, task_queues());
5362 // Set up for parallel process_strong_roots work.
5363 gch->set_par_threads(n_workers);
5364 gch->change_strong_roots_parity();
5365 // We won't be iterating over the cards in the card table updating
5366 // the younger_gen cards, so we shouldn't call the following else
5367 // the verification code as well as subsequent younger_refs_iterate
5368 // code would get confused. XXX
5369 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5371 // The young gen rescan work will not be done as part of
5372 // process_strong_roots (which currently doesn't knw how to
5373 // parallelize such a scan), but rather will be broken up into
5374 // a set of parallel tasks (via the sampling that the [abortable]
5375 // preclean phase did of EdenSpace, plus the [two] tasks of
5376 // scanning the [two] survivor spaces. Further fine-grain
5377 // parallelization of the scanning of the survivor spaces
5378 // themselves, and of precleaning of the younger gen itself
5379 // is deferred to the future.
5380 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5382 // The dirty card rescan work is broken up into a "sequence"
5383 // of parallel tasks (per constituent space) that are dynamically
5384 // claimed by the parallel threads.
5385 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5386 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5388 // It turns out that even when we're using 1 thread, doing the work in a
5389 // separate thread causes wide variance in run times. We can't help this
5390 // in the multi-threaded case, but we special-case n=1 here to get
5391 // repeatable measurements of the 1-thread overhead of the parallel code.
5392 if (n_workers > 1) {
5393 // Make refs discovery MT-safe
5394 ReferenceProcessorMTMutator mt(ref_processor(), true);
5395 workers->run_task(&tsk);
5396 } else {
5397 tsk.work(0);
5398 }
5399 gch->set_par_threads(0); // 0 ==> non-parallel.
5400 // restore, single-threaded for now, any preserved marks
5401 // as a result of work_q overflow
5402 restore_preserved_marks_if_any();
5403 }
5405 // Non-parallel version of remark
5406 void CMSCollector::do_remark_non_parallel() {
5407 ResourceMark rm;
5408 HandleMark hm;
5409 GenCollectedHeap* gch = GenCollectedHeap::heap();
5410 MarkRefsIntoAndScanClosure
5411 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5412 &_markStack, &_revisitStack, this,
5413 false /* should_yield */, false /* not precleaning */);
5414 MarkFromDirtyCardsClosure
5415 markFromDirtyCardsClosure(this, _span,
5416 NULL, // space is set further below
5417 &_markBitMap, &_markStack, &_revisitStack,
5418 &mrias_cl);
5419 {
5420 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5421 // Iterate over the dirty cards, setting the corresponding bits in the
5422 // mod union table.
5423 {
5424 ModUnionClosure modUnionClosure(&_modUnionTable);
5425 _ct->ct_bs()->dirty_card_iterate(
5426 _cmsGen->used_region(),
5427 &modUnionClosure);
5428 _ct->ct_bs()->dirty_card_iterate(
5429 _permGen->used_region(),
5430 &modUnionClosure);
5431 }
5432 // Having transferred these marks into the modUnionTable, we just need
5433 // to rescan the marked objects on the dirty cards in the modUnionTable.
5434 // The initial marking may have been done during an asynchronous
5435 // collection so there may be dirty bits in the mod-union table.
5436 const int alignment =
5437 CardTableModRefBS::card_size * BitsPerWord;
5438 {
5439 // ... First handle dirty cards in CMS gen
5440 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5441 MemRegion ur = _cmsGen->used_region();
5442 HeapWord* lb = ur.start();
5443 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5444 MemRegion cms_span(lb, ub);
5445 _modUnionTable.dirty_range_iterate_clear(cms_span,
5446 &markFromDirtyCardsClosure);
5447 verify_work_stacks_empty();
5448 if (PrintCMSStatistics != 0) {
5449 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5450 markFromDirtyCardsClosure.num_dirty_cards());
5451 }
5452 }
5453 {
5454 // .. and then repeat for dirty cards in perm gen
5455 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5456 MemRegion ur = _permGen->used_region();
5457 HeapWord* lb = ur.start();
5458 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5459 MemRegion perm_span(lb, ub);
5460 _modUnionTable.dirty_range_iterate_clear(perm_span,
5461 &markFromDirtyCardsClosure);
5462 verify_work_stacks_empty();
5463 if (PrintCMSStatistics != 0) {
5464 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5465 markFromDirtyCardsClosure.num_dirty_cards());
5466 }
5467 }
5468 }
5469 if (VerifyDuringGC &&
5470 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5471 HandleMark hm; // Discard invalid handles created during verification
5472 Universe::verify(true);
5473 }
5474 {
5475 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5477 verify_work_stacks_empty();
5479 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5480 gch->gen_process_strong_roots(_cmsGen->level(),
5481 true, // younger gens as roots
5482 true, // collecting perm gen
5483 SharedHeap::ScanningOption(roots_scanning_options()),
5484 NULL, &mrias_cl);
5485 }
5486 verify_work_stacks_empty();
5487 // Restore evacuated mark words, if any, used for overflow list links
5488 if (!CMSOverflowEarlyRestoration) {
5489 restore_preserved_marks_if_any();
5490 }
5491 verify_overflow_empty();
5492 }
5494 ////////////////////////////////////////////////////////
5495 // Parallel Reference Processing Task Proxy Class
5496 ////////////////////////////////////////////////////////
5497 class CMSRefProcTaskProxy: public AbstractGangTask {
5498 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5499 CMSCollector* _collector;
5500 CMSBitMap* _mark_bit_map;
5501 const MemRegion _span;
5502 OopTaskQueueSet* _task_queues;
5503 ParallelTaskTerminator _term;
5504 ProcessTask& _task;
5506 public:
5507 CMSRefProcTaskProxy(ProcessTask& task,
5508 CMSCollector* collector,
5509 const MemRegion& span,
5510 CMSBitMap* mark_bit_map,
5511 int total_workers,
5512 OopTaskQueueSet* task_queues):
5513 AbstractGangTask("Process referents by policy in parallel"),
5514 _task(task),
5515 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5516 _task_queues(task_queues),
5517 _term(total_workers, task_queues)
5518 {
5519 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5520 "Inconsistency in _span");
5521 }
5523 OopTaskQueueSet* task_queues() { return _task_queues; }
5525 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5527 ParallelTaskTerminator* terminator() { return &_term; }
5529 void do_work_steal(int i,
5530 CMSParDrainMarkingStackClosure* drain,
5531 CMSParKeepAliveClosure* keep_alive,
5532 int* seed);
5534 virtual void work(int i);
5535 };
5537 void CMSRefProcTaskProxy::work(int i) {
5538 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5539 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5540 _mark_bit_map, work_queue(i));
5541 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5542 _mark_bit_map, work_queue(i));
5543 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5544 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5545 if (_task.marks_oops_alive()) {
5546 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5547 _collector->hash_seed(i));
5548 }
5549 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5550 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5551 }
5553 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5554 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5555 EnqueueTask& _task;
5557 public:
5558 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5559 : AbstractGangTask("Enqueue reference objects in parallel"),
5560 _task(task)
5561 { }
5563 virtual void work(int i)
5564 {
5565 _task.work(i);
5566 }
5567 };
5569 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5570 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5571 _collector(collector),
5572 _span(span),
5573 _bit_map(bit_map),
5574 _work_queue(work_queue),
5575 _mark_and_push(collector, span, bit_map, work_queue),
5576 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5577 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5578 { }
5580 // . see if we can share work_queues with ParNew? XXX
5581 void CMSRefProcTaskProxy::do_work_steal(int i,
5582 CMSParDrainMarkingStackClosure* drain,
5583 CMSParKeepAliveClosure* keep_alive,
5584 int* seed) {
5585 OopTaskQueue* work_q = work_queue(i);
5586 NOT_PRODUCT(int num_steals = 0;)
5587 oop obj_to_scan;
5588 size_t num_from_overflow_list =
5589 MIN2((size_t)work_q->max_elems()/4,
5590 (size_t)ParGCDesiredObjsFromOverflowList);
5592 while (true) {
5593 // Completely finish any left over work from (an) earlier round(s)
5594 drain->trim_queue(0);
5595 // Now check if there's any work in the overflow list
5596 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5597 work_q)) {
5598 // Found something in global overflow list;
5599 // not yet ready to go stealing work from others.
5600 // We'd like to assert(work_q->size() != 0, ...)
5601 // because we just took work from the overflow list,
5602 // but of course we can't, since all of that might have
5603 // been already stolen from us.
5604 continue;
5605 }
5606 // Verify that we have no work before we resort to stealing
5607 assert(work_q->size() == 0, "Have work, shouldn't steal");
5608 // Try to steal from other queues that have work
5609 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5610 NOT_PRODUCT(num_steals++;)
5611 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5612 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5613 // Do scanning work
5614 obj_to_scan->oop_iterate(keep_alive);
5615 // Loop around, finish this work, and try to steal some more
5616 } else if (terminator()->offer_termination()) {
5617 break; // nirvana from the infinite cycle
5618 }
5619 }
5620 NOT_PRODUCT(
5621 if (PrintCMSStatistics != 0) {
5622 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5623 }
5624 )
5625 }
5627 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5628 {
5629 GenCollectedHeap* gch = GenCollectedHeap::heap();
5630 WorkGang* workers = gch->workers();
5631 assert(workers != NULL, "Need parallel worker threads.");
5632 int n_workers = workers->total_workers();
5633 CMSRefProcTaskProxy rp_task(task, &_collector,
5634 _collector.ref_processor()->span(),
5635 _collector.markBitMap(),
5636 n_workers, _collector.task_queues());
5637 workers->run_task(&rp_task);
5638 }
5640 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5641 {
5643 GenCollectedHeap* gch = GenCollectedHeap::heap();
5644 WorkGang* workers = gch->workers();
5645 assert(workers != NULL, "Need parallel worker threads.");
5646 CMSRefEnqueueTaskProxy enq_task(task);
5647 workers->run_task(&enq_task);
5648 }
5650 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5652 ResourceMark rm;
5653 HandleMark hm;
5654 ReferencePolicy* soft_ref_policy;
5656 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
5657 // Process weak references.
5658 if (clear_all_soft_refs) {
5659 soft_ref_policy = new AlwaysClearPolicy();
5660 } else {
5661 #ifdef COMPILER2
5662 soft_ref_policy = new LRUMaxHeapPolicy();
5663 #else
5664 soft_ref_policy = new LRUCurrentHeapPolicy();
5665 #endif // COMPILER2
5666 }
5667 verify_work_stacks_empty();
5669 ReferenceProcessor* rp = ref_processor();
5670 assert(rp->span().equals(_span), "Spans should be equal");
5671 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5672 &_markStack);
5673 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5674 _span, &_markBitMap, &_markStack,
5675 &cmsKeepAliveClosure);
5676 {
5677 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5678 if (rp->processing_is_mt()) {
5679 CMSRefProcTaskExecutor task_executor(*this);
5680 rp->process_discovered_references(soft_ref_policy,
5681 &_is_alive_closure,
5682 &cmsKeepAliveClosure,
5683 &cmsDrainMarkingStackClosure,
5684 &task_executor);
5685 } else {
5686 rp->process_discovered_references(soft_ref_policy,
5687 &_is_alive_closure,
5688 &cmsKeepAliveClosure,
5689 &cmsDrainMarkingStackClosure,
5690 NULL);
5691 }
5692 verify_work_stacks_empty();
5693 }
5695 if (should_unload_classes()) {
5696 {
5697 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5699 // Follow SystemDictionary roots and unload classes
5700 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5702 // Follow CodeCache roots and unload any methods marked for unloading
5703 CodeCache::do_unloading(&_is_alive_closure,
5704 &cmsKeepAliveClosure,
5705 purged_class);
5707 cmsDrainMarkingStackClosure.do_void();
5708 verify_work_stacks_empty();
5710 // Update subklass/sibling/implementor links in KlassKlass descendants
5711 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5712 oop k;
5713 while ((k = _revisitStack.pop()) != NULL) {
5714 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5715 &_is_alive_closure,
5716 &cmsKeepAliveClosure);
5717 }
5718 assert(!ClassUnloading ||
5719 (_markStack.isEmpty() && overflow_list_is_empty()),
5720 "Should not have found new reachable objects");
5721 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5722 cmsDrainMarkingStackClosure.do_void();
5723 verify_work_stacks_empty();
5724 }
5726 {
5727 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5728 // Now clean up stale oops in SymbolTable and StringTable
5729 SymbolTable::unlink(&_is_alive_closure);
5730 StringTable::unlink(&_is_alive_closure);
5731 }
5732 }
5734 verify_work_stacks_empty();
5735 // Restore any preserved marks as a result of mark stack or
5736 // work queue overflow
5737 restore_preserved_marks_if_any(); // done single-threaded for now
5739 rp->set_enqueuing_is_done(true);
5740 if (rp->processing_is_mt()) {
5741 CMSRefProcTaskExecutor task_executor(*this);
5742 rp->enqueue_discovered_references(&task_executor);
5743 } else {
5744 rp->enqueue_discovered_references(NULL);
5745 }
5746 rp->verify_no_references_recorded();
5747 assert(!rp->discovery_enabled(), "should have been disabled");
5749 // JVMTI object tagging is based on JNI weak refs. If any of these
5750 // refs were cleared then JVMTI needs to update its maps and
5751 // maybe post ObjectFrees to agents.
5752 JvmtiExport::cms_ref_processing_epilogue();
5753 }
5755 #ifndef PRODUCT
5756 void CMSCollector::check_correct_thread_executing() {
5757 Thread* t = Thread::current();
5758 // Only the VM thread or the CMS thread should be here.
5759 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5760 "Unexpected thread type");
5761 // If this is the vm thread, the foreground process
5762 // should not be waiting. Note that _foregroundGCIsActive is
5763 // true while the foreground collector is waiting.
5764 if (_foregroundGCShouldWait) {
5765 // We cannot be the VM thread
5766 assert(t->is_ConcurrentGC_thread(),
5767 "Should be CMS thread");
5768 } else {
5769 // We can be the CMS thread only if we are in a stop-world
5770 // phase of CMS collection.
5771 if (t->is_ConcurrentGC_thread()) {
5772 assert(_collectorState == InitialMarking ||
5773 _collectorState == FinalMarking,
5774 "Should be a stop-world phase");
5775 // The CMS thread should be holding the CMS_token.
5776 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5777 "Potential interference with concurrently "
5778 "executing VM thread");
5779 }
5780 }
5781 }
5782 #endif
5784 void CMSCollector::sweep(bool asynch) {
5785 assert(_collectorState == Sweeping, "just checking");
5786 check_correct_thread_executing();
5787 verify_work_stacks_empty();
5788 verify_overflow_empty();
5789 incrementSweepCount();
5790 _sweep_timer.stop();
5791 _sweep_estimate.sample(_sweep_timer.seconds());
5792 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5794 // PermGen verification support: If perm gen sweeping is disabled in
5795 // this cycle, we preserve the perm gen object "deadness" information
5796 // in the perm_gen_verify_bit_map. In order to do that we traverse
5797 // all blocks in perm gen and mark all dead objects.
5798 if (verifying() && !should_unload_classes()) {
5799 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5800 "Should have already been allocated");
5801 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5802 markBitMap(), perm_gen_verify_bit_map());
5803 if (asynch) {
5804 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5805 bitMapLock());
5806 _permGen->cmsSpace()->blk_iterate(&mdo);
5807 } else {
5808 // In the case of synchronous sweep, we already have
5809 // the requisite locks/tokens.
5810 _permGen->cmsSpace()->blk_iterate(&mdo);
5811 }
5812 }
5814 if (asynch) {
5815 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5816 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5817 // First sweep the old gen then the perm gen
5818 {
5819 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5820 bitMapLock());
5821 sweepWork(_cmsGen, asynch);
5822 }
5824 // Now repeat for perm gen
5825 if (should_unload_classes()) {
5826 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5827 bitMapLock());
5828 sweepWork(_permGen, asynch);
5829 }
5831 // Update Universe::_heap_*_at_gc figures.
5832 // We need all the free list locks to make the abstract state
5833 // transition from Sweeping to Resetting. See detailed note
5834 // further below.
5835 {
5836 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5837 _permGen->freelistLock());
5838 // Update heap occupancy information which is used as
5839 // input to soft ref clearing policy at the next gc.
5840 Universe::update_heap_info_at_gc();
5841 _collectorState = Resizing;
5842 }
5843 } else {
5844 // already have needed locks
5845 sweepWork(_cmsGen, asynch);
5847 if (should_unload_classes()) {
5848 sweepWork(_permGen, asynch);
5849 }
5850 // Update heap occupancy information which is used as
5851 // input to soft ref clearing policy at the next gc.
5852 Universe::update_heap_info_at_gc();
5853 _collectorState = Resizing;
5854 }
5855 verify_work_stacks_empty();
5856 verify_overflow_empty();
5858 _sweep_timer.reset();
5859 _sweep_timer.start();
5861 update_time_of_last_gc(os::javaTimeMillis());
5863 // NOTE on abstract state transitions:
5864 // Mutators allocate-live and/or mark the mod-union table dirty
5865 // based on the state of the collection. The former is done in
5866 // the interval [Marking, Sweeping] and the latter in the interval
5867 // [Marking, Sweeping). Thus the transitions into the Marking state
5868 // and out of the Sweeping state must be synchronously visible
5869 // globally to the mutators.
5870 // The transition into the Marking state happens with the world
5871 // stopped so the mutators will globally see it. Sweeping is
5872 // done asynchronously by the background collector so the transition
5873 // from the Sweeping state to the Resizing state must be done
5874 // under the freelistLock (as is the check for whether to
5875 // allocate-live and whether to dirty the mod-union table).
5876 assert(_collectorState == Resizing, "Change of collector state to"
5877 " Resizing must be done under the freelistLocks (plural)");
5879 // Now that sweeping has been completed, if the GCH's
5880 // incremental_collection_will_fail flag is set, clear it,
5881 // thus inviting a younger gen collection to promote into
5882 // this generation. If such a promotion may still fail,
5883 // the flag will be set again when a young collection is
5884 // attempted.
5885 // I think the incremental_collection_will_fail flag's use
5886 // is specific to a 2 generation collection policy, so i'll
5887 // assert that that's the configuration we are operating within.
5888 // The use of the flag can and should be generalized appropriately
5889 // in the future to deal with a general n-generation system.
5891 GenCollectedHeap* gch = GenCollectedHeap::heap();
5892 assert(gch->collector_policy()->is_two_generation_policy(),
5893 "Resetting of incremental_collection_will_fail flag"
5894 " may be incorrect otherwise");
5895 gch->clear_incremental_collection_will_fail();
5896 gch->update_full_collections_completed(_collection_count_start);
5897 }
5899 // FIX ME!!! Looks like this belongs in CFLSpace, with
5900 // CMSGen merely delegating to it.
5901 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5902 double nearLargestPercent = 0.999;
5903 HeapWord* minAddr = _cmsSpace->bottom();
5904 HeapWord* largestAddr =
5905 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5906 if (largestAddr == 0) {
5907 // The dictionary appears to be empty. In this case
5908 // try to coalesce at the end of the heap.
5909 largestAddr = _cmsSpace->end();
5910 }
5911 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5912 size_t nearLargestOffset =
5913 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5914 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5915 }
5917 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5918 return addr >= _cmsSpace->nearLargestChunk();
5919 }
5921 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5922 return _cmsSpace->find_chunk_at_end();
5923 }
5925 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5926 bool full) {
5927 // The next lower level has been collected. Gather any statistics
5928 // that are of interest at this point.
5929 if (!full && (current_level + 1) == level()) {
5930 // Gather statistics on the young generation collection.
5931 collector()->stats().record_gc0_end(used());
5932 }
5933 }
5935 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
5936 GenCollectedHeap* gch = GenCollectedHeap::heap();
5937 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
5938 "Wrong type of heap");
5939 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
5940 gch->gen_policy()->size_policy();
5941 assert(sp->is_gc_cms_adaptive_size_policy(),
5942 "Wrong type of size policy");
5943 return sp;
5944 }
5946 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5947 if (PrintGCDetails && Verbose) {
5948 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
5949 }
5950 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
5951 _debug_collection_type =
5952 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
5953 if (PrintGCDetails && Verbose) {
5954 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
5955 }
5956 }
5958 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
5959 bool asynch) {
5960 // We iterate over the space(s) underlying this generation,
5961 // checking the mark bit map to see if the bits corresponding
5962 // to specific blocks are marked or not. Blocks that are
5963 // marked are live and are not swept up. All remaining blocks
5964 // are swept up, with coalescing on-the-fly as we sweep up
5965 // contiguous free and/or garbage blocks:
5966 // We need to ensure that the sweeper synchronizes with allocators
5967 // and stop-the-world collectors. In particular, the following
5968 // locks are used:
5969 // . CMS token: if this is held, a stop the world collection cannot occur
5970 // . freelistLock: if this is held no allocation can occur from this
5971 // generation by another thread
5972 // . bitMapLock: if this is held, no other thread can access or update
5973 //
5975 // Note that we need to hold the freelistLock if we use
5976 // block iterate below; else the iterator might go awry if
5977 // a mutator (or promotion) causes block contents to change
5978 // (for instance if the allocator divvies up a block).
5979 // If we hold the free list lock, for all practical purposes
5980 // young generation GC's can't occur (they'll usually need to
5981 // promote), so we might as well prevent all young generation
5982 // GC's while we do a sweeping step. For the same reason, we might
5983 // as well take the bit map lock for the entire duration
5985 // check that we hold the requisite locks
5986 assert(have_cms_token(), "Should hold cms token");
5987 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
5988 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
5989 "Should possess CMS token to sweep");
5990 assert_lock_strong(gen->freelistLock());
5991 assert_lock_strong(bitMapLock());
5993 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5994 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5995 _sweep_estimate.padded_average());
5996 gen->setNearLargestChunk();
5998 {
5999 SweepClosure sweepClosure(this, gen, &_markBitMap,
6000 CMSYield && asynch);
6001 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6002 // We need to free-up/coalesce garbage/blocks from a
6003 // co-terminal free run. This is done in the SweepClosure
6004 // destructor; so, do not remove this scope, else the
6005 // end-of-sweep-census below will be off by a little bit.
6006 }
6007 gen->cmsSpace()->sweep_completed();
6008 gen->cmsSpace()->endSweepFLCensus(sweepCount());
6009 if (should_unload_classes()) { // unloaded classes this cycle,
6010 _concurrent_cycles_since_last_unload = 0; // ... reset count
6011 } else { // did not unload classes,
6012 _concurrent_cycles_since_last_unload++; // ... increment count
6013 }
6014 }
6016 // Reset CMS data structures (for now just the marking bit map)
6017 // preparatory for the next cycle.
6018 void CMSCollector::reset(bool asynch) {
6019 GenCollectedHeap* gch = GenCollectedHeap::heap();
6020 CMSAdaptiveSizePolicy* sp = size_policy();
6021 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6022 if (asynch) {
6023 CMSTokenSyncWithLocks ts(true, bitMapLock());
6025 // If the state is not "Resetting", the foreground thread
6026 // has done a collection and the resetting.
6027 if (_collectorState != Resetting) {
6028 assert(_collectorState == Idling, "The state should only change"
6029 " because the foreground collector has finished the collection");
6030 return;
6031 }
6033 // Clear the mark bitmap (no grey objects to start with)
6034 // for the next cycle.
6035 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6036 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6038 HeapWord* curAddr = _markBitMap.startWord();
6039 while (curAddr < _markBitMap.endWord()) {
6040 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6041 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6042 _markBitMap.clear_large_range(chunk);
6043 if (ConcurrentMarkSweepThread::should_yield() &&
6044 !foregroundGCIsActive() &&
6045 CMSYield) {
6046 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6047 "CMS thread should hold CMS token");
6048 assert_lock_strong(bitMapLock());
6049 bitMapLock()->unlock();
6050 ConcurrentMarkSweepThread::desynchronize(true);
6051 ConcurrentMarkSweepThread::acknowledge_yield_request();
6052 stopTimer();
6053 if (PrintCMSStatistics != 0) {
6054 incrementYields();
6055 }
6056 icms_wait();
6058 // See the comment in coordinator_yield()
6059 for (unsigned i = 0; i < CMSYieldSleepCount &&
6060 ConcurrentMarkSweepThread::should_yield() &&
6061 !CMSCollector::foregroundGCIsActive(); ++i) {
6062 os::sleep(Thread::current(), 1, false);
6063 ConcurrentMarkSweepThread::acknowledge_yield_request();
6064 }
6066 ConcurrentMarkSweepThread::synchronize(true);
6067 bitMapLock()->lock_without_safepoint_check();
6068 startTimer();
6069 }
6070 curAddr = chunk.end();
6071 }
6072 _collectorState = Idling;
6073 } else {
6074 // already have the lock
6075 assert(_collectorState == Resetting, "just checking");
6076 assert_lock_strong(bitMapLock());
6077 _markBitMap.clear_all();
6078 _collectorState = Idling;
6079 }
6081 // Stop incremental mode after a cycle completes, so that any future cycles
6082 // are triggered by allocation.
6083 stop_icms();
6085 NOT_PRODUCT(
6086 if (RotateCMSCollectionTypes) {
6087 _cmsGen->rotate_debug_collection_type();
6088 }
6089 )
6090 }
6092 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6093 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6094 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6095 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6096 TraceCollectorStats tcs(counters());
6098 switch (op) {
6099 case CMS_op_checkpointRootsInitial: {
6100 checkpointRootsInitial(true); // asynch
6101 if (PrintGC) {
6102 _cmsGen->printOccupancy("initial-mark");
6103 }
6104 break;
6105 }
6106 case CMS_op_checkpointRootsFinal: {
6107 checkpointRootsFinal(true, // asynch
6108 false, // !clear_all_soft_refs
6109 false); // !init_mark_was_synchronous
6110 if (PrintGC) {
6111 _cmsGen->printOccupancy("remark");
6112 }
6113 break;
6114 }
6115 default:
6116 fatal("No such CMS_op");
6117 }
6118 }
6120 #ifndef PRODUCT
6121 size_t const CMSCollector::skip_header_HeapWords() {
6122 return FreeChunk::header_size();
6123 }
6125 // Try and collect here conditions that should hold when
6126 // CMS thread is exiting. The idea is that the foreground GC
6127 // thread should not be blocked if it wants to terminate
6128 // the CMS thread and yet continue to run the VM for a while
6129 // after that.
6130 void CMSCollector::verify_ok_to_terminate() const {
6131 assert(Thread::current()->is_ConcurrentGC_thread(),
6132 "should be called by CMS thread");
6133 assert(!_foregroundGCShouldWait, "should be false");
6134 // We could check here that all the various low-level locks
6135 // are not held by the CMS thread, but that is overkill; see
6136 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6137 // is checked.
6138 }
6139 #endif
6141 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6142 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6143 "missing Printezis mark?");
6144 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6145 size_t size = pointer_delta(nextOneAddr + 1, addr);
6146 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6147 "alignment problem");
6148 assert(size >= 3, "Necessary for Printezis marks to work");
6149 return size;
6150 }
6152 // A variant of the above (block_size_using_printezis_bits()) except
6153 // that we return 0 if the P-bits are not yet set.
6154 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6155 if (_markBitMap.isMarked(addr)) {
6156 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6157 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6158 size_t size = pointer_delta(nextOneAddr + 1, addr);
6159 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6160 "alignment problem");
6161 assert(size >= 3, "Necessary for Printezis marks to work");
6162 return size;
6163 } else {
6164 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6165 return 0;
6166 }
6167 }
6169 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6170 size_t sz = 0;
6171 oop p = (oop)addr;
6172 if (p->klass() != NULL && p->is_parsable()) {
6173 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6174 } else {
6175 sz = block_size_using_printezis_bits(addr);
6176 }
6177 assert(sz > 0, "size must be nonzero");
6178 HeapWord* next_block = addr + sz;
6179 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6180 CardTableModRefBS::card_size);
6181 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6182 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6183 "must be different cards");
6184 return next_card;
6185 }
6188 // CMS Bit Map Wrapper /////////////////////////////////////////
6190 // Construct a CMS bit map infrastructure, but don't create the
6191 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6192 // further below.
6193 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6194 _bm(),
6195 _shifter(shifter),
6196 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6197 {
6198 _bmStartWord = 0;
6199 _bmWordSize = 0;
6200 }
6202 bool CMSBitMap::allocate(MemRegion mr) {
6203 _bmStartWord = mr.start();
6204 _bmWordSize = mr.word_size();
6205 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6206 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6207 if (!brs.is_reserved()) {
6208 warning("CMS bit map allocation failure");
6209 return false;
6210 }
6211 // For now we'll just commit all of the bit map up fromt.
6212 // Later on we'll try to be more parsimonious with swap.
6213 if (!_virtual_space.initialize(brs, brs.size())) {
6214 warning("CMS bit map backing store failure");
6215 return false;
6216 }
6217 assert(_virtual_space.committed_size() == brs.size(),
6218 "didn't reserve backing store for all of CMS bit map?");
6219 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6220 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6221 _bmWordSize, "inconsistency in bit map sizing");
6222 _bm.set_size(_bmWordSize >> _shifter);
6224 // bm.clear(); // can we rely on getting zero'd memory? verify below
6225 assert(isAllClear(),
6226 "Expected zero'd memory from ReservedSpace constructor");
6227 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6228 "consistency check");
6229 return true;
6230 }
6232 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6233 HeapWord *next_addr, *end_addr, *last_addr;
6234 assert_locked();
6235 assert(covers(mr), "out-of-range error");
6236 // XXX assert that start and end are appropriately aligned
6237 for (next_addr = mr.start(), end_addr = mr.end();
6238 next_addr < end_addr; next_addr = last_addr) {
6239 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6240 last_addr = dirty_region.end();
6241 if (!dirty_region.is_empty()) {
6242 cl->do_MemRegion(dirty_region);
6243 } else {
6244 assert(last_addr == end_addr, "program logic");
6245 return;
6246 }
6247 }
6248 }
6250 #ifndef PRODUCT
6251 void CMSBitMap::assert_locked() const {
6252 CMSLockVerifier::assert_locked(lock());
6253 }
6255 bool CMSBitMap::covers(MemRegion mr) const {
6256 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6257 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6258 "size inconsistency");
6259 return (mr.start() >= _bmStartWord) &&
6260 (mr.end() <= endWord());
6261 }
6263 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6264 return (start >= _bmStartWord && (start + size) <= endWord());
6265 }
6267 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6268 // verify that there are no 1 bits in the interval [left, right)
6269 FalseBitMapClosure falseBitMapClosure;
6270 iterate(&falseBitMapClosure, left, right);
6271 }
6273 void CMSBitMap::region_invariant(MemRegion mr)
6274 {
6275 assert_locked();
6276 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6277 assert(!mr.is_empty(), "unexpected empty region");
6278 assert(covers(mr), "mr should be covered by bit map");
6279 // convert address range into offset range
6280 size_t start_ofs = heapWordToOffset(mr.start());
6281 // Make sure that end() is appropriately aligned
6282 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6283 (1 << (_shifter+LogHeapWordSize))),
6284 "Misaligned mr.end()");
6285 size_t end_ofs = heapWordToOffset(mr.end());
6286 assert(end_ofs > start_ofs, "Should mark at least one bit");
6287 }
6289 #endif
6291 bool CMSMarkStack::allocate(size_t size) {
6292 // allocate a stack of the requisite depth
6293 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6294 size * sizeof(oop)));
6295 if (!rs.is_reserved()) {
6296 warning("CMSMarkStack allocation failure");
6297 return false;
6298 }
6299 if (!_virtual_space.initialize(rs, rs.size())) {
6300 warning("CMSMarkStack backing store failure");
6301 return false;
6302 }
6303 assert(_virtual_space.committed_size() == rs.size(),
6304 "didn't reserve backing store for all of CMS stack?");
6305 _base = (oop*)(_virtual_space.low());
6306 _index = 0;
6307 _capacity = size;
6308 NOT_PRODUCT(_max_depth = 0);
6309 return true;
6310 }
6312 // XXX FIX ME !!! In the MT case we come in here holding a
6313 // leaf lock. For printing we need to take a further lock
6314 // which has lower rank. We need to recallibrate the two
6315 // lock-ranks involved in order to be able to rpint the
6316 // messages below. (Or defer the printing to the caller.
6317 // For now we take the expedient path of just disabling the
6318 // messages for the problematic case.)
6319 void CMSMarkStack::expand() {
6320 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6321 if (_capacity == CMSMarkStackSizeMax) {
6322 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6323 // We print a warning message only once per CMS cycle.
6324 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6325 }
6326 return;
6327 }
6328 // Double capacity if possible
6329 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6330 // Do not give up existing stack until we have managed to
6331 // get the double capacity that we desired.
6332 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6333 new_capacity * sizeof(oop)));
6334 if (rs.is_reserved()) {
6335 // Release the backing store associated with old stack
6336 _virtual_space.release();
6337 // Reinitialize virtual space for new stack
6338 if (!_virtual_space.initialize(rs, rs.size())) {
6339 fatal("Not enough swap for expanded marking stack");
6340 }
6341 _base = (oop*)(_virtual_space.low());
6342 _index = 0;
6343 _capacity = new_capacity;
6344 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6345 // Failed to double capacity, continue;
6346 // we print a detail message only once per CMS cycle.
6347 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6348 SIZE_FORMAT"K",
6349 _capacity / K, new_capacity / K);
6350 }
6351 }
6354 // Closures
6355 // XXX: there seems to be a lot of code duplication here;
6356 // should refactor and consolidate common code.
6358 // This closure is used to mark refs into the CMS generation in
6359 // the CMS bit map. Called at the first checkpoint. This closure
6360 // assumes that we do not need to re-mark dirty cards; if the CMS
6361 // generation on which this is used is not an oldest (modulo perm gen)
6362 // generation then this will lose younger_gen cards!
6364 MarkRefsIntoClosure::MarkRefsIntoClosure(
6365 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
6366 _span(span),
6367 _bitMap(bitMap),
6368 _should_do_nmethods(should_do_nmethods)
6369 {
6370 assert(_ref_processor == NULL, "deliberately left NULL");
6371 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6372 }
6374 void MarkRefsIntoClosure::do_oop(oop obj) {
6375 // if p points into _span, then mark corresponding bit in _markBitMap
6376 assert(obj->is_oop(), "expected an oop");
6377 HeapWord* addr = (HeapWord*)obj;
6378 if (_span.contains(addr)) {
6379 // this should be made more efficient
6380 _bitMap->mark(addr);
6381 }
6382 }
6384 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6385 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6387 // A variant of the above, used for CMS marking verification.
6388 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6389 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6390 bool should_do_nmethods):
6391 _span(span),
6392 _verification_bm(verification_bm),
6393 _cms_bm(cms_bm),
6394 _should_do_nmethods(should_do_nmethods) {
6395 assert(_ref_processor == NULL, "deliberately left NULL");
6396 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6397 }
6399 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6400 // if p points into _span, then mark corresponding bit in _markBitMap
6401 assert(obj->is_oop(), "expected an oop");
6402 HeapWord* addr = (HeapWord*)obj;
6403 if (_span.contains(addr)) {
6404 _verification_bm->mark(addr);
6405 if (!_cms_bm->isMarked(addr)) {
6406 oop(addr)->print();
6407 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6408 fatal("... aborting");
6409 }
6410 }
6411 }
6413 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6414 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6416 //////////////////////////////////////////////////
6417 // MarkRefsIntoAndScanClosure
6418 //////////////////////////////////////////////////
6420 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6421 ReferenceProcessor* rp,
6422 CMSBitMap* bit_map,
6423 CMSBitMap* mod_union_table,
6424 CMSMarkStack* mark_stack,
6425 CMSMarkStack* revisit_stack,
6426 CMSCollector* collector,
6427 bool should_yield,
6428 bool concurrent_precleaning):
6429 _collector(collector),
6430 _span(span),
6431 _bit_map(bit_map),
6432 _mark_stack(mark_stack),
6433 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6434 mark_stack, revisit_stack, concurrent_precleaning),
6435 _yield(should_yield),
6436 _concurrent_precleaning(concurrent_precleaning),
6437 _freelistLock(NULL)
6438 {
6439 _ref_processor = rp;
6440 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6441 }
6443 // This closure is used to mark refs into the CMS generation at the
6444 // second (final) checkpoint, and to scan and transitively follow
6445 // the unmarked oops. It is also used during the concurrent precleaning
6446 // phase while scanning objects on dirty cards in the CMS generation.
6447 // The marks are made in the marking bit map and the marking stack is
6448 // used for keeping the (newly) grey objects during the scan.
6449 // The parallel version (Par_...) appears further below.
6450 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6451 if (obj != NULL) {
6452 assert(obj->is_oop(), "expected an oop");
6453 HeapWord* addr = (HeapWord*)obj;
6454 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6455 assert(_collector->overflow_list_is_empty(),
6456 "overflow list should be empty");
6457 if (_span.contains(addr) &&
6458 !_bit_map->isMarked(addr)) {
6459 // mark bit map (object is now grey)
6460 _bit_map->mark(addr);
6461 // push on marking stack (stack should be empty), and drain the
6462 // stack by applying this closure to the oops in the oops popped
6463 // from the stack (i.e. blacken the grey objects)
6464 bool res = _mark_stack->push(obj);
6465 assert(res, "Should have space to push on empty stack");
6466 do {
6467 oop new_oop = _mark_stack->pop();
6468 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6469 assert(new_oop->is_parsable(), "Found unparsable oop");
6470 assert(_bit_map->isMarked((HeapWord*)new_oop),
6471 "only grey objects on this stack");
6472 // iterate over the oops in this oop, marking and pushing
6473 // the ones in CMS heap (i.e. in _span).
6474 new_oop->oop_iterate(&_pushAndMarkClosure);
6475 // check if it's time to yield
6476 do_yield_check();
6477 } while (!_mark_stack->isEmpty() ||
6478 (!_concurrent_precleaning && take_from_overflow_list()));
6479 // if marking stack is empty, and we are not doing this
6480 // during precleaning, then check the overflow list
6481 }
6482 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6483 assert(_collector->overflow_list_is_empty(),
6484 "overflow list was drained above");
6485 // We could restore evacuated mark words, if any, used for
6486 // overflow list links here because the overflow list is
6487 // provably empty here. That would reduce the maximum
6488 // size requirements for preserved_{oop,mark}_stack.
6489 // But we'll just postpone it until we are all done
6490 // so we can just stream through.
6491 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6492 _collector->restore_preserved_marks_if_any();
6493 assert(_collector->no_preserved_marks(), "No preserved marks");
6494 }
6495 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6496 "All preserved marks should have been restored above");
6497 }
6498 }
6500 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6501 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6503 void MarkRefsIntoAndScanClosure::do_yield_work() {
6504 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6505 "CMS thread should hold CMS token");
6506 assert_lock_strong(_freelistLock);
6507 assert_lock_strong(_bit_map->lock());
6508 // relinquish the free_list_lock and bitMaplock()
6509 _bit_map->lock()->unlock();
6510 _freelistLock->unlock();
6511 ConcurrentMarkSweepThread::desynchronize(true);
6512 ConcurrentMarkSweepThread::acknowledge_yield_request();
6513 _collector->stopTimer();
6514 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6515 if (PrintCMSStatistics != 0) {
6516 _collector->incrementYields();
6517 }
6518 _collector->icms_wait();
6520 // See the comment in coordinator_yield()
6521 for (unsigned i = 0;
6522 i < CMSYieldSleepCount &&
6523 ConcurrentMarkSweepThread::should_yield() &&
6524 !CMSCollector::foregroundGCIsActive();
6525 ++i) {
6526 os::sleep(Thread::current(), 1, false);
6527 ConcurrentMarkSweepThread::acknowledge_yield_request();
6528 }
6530 ConcurrentMarkSweepThread::synchronize(true);
6531 _freelistLock->lock_without_safepoint_check();
6532 _bit_map->lock()->lock_without_safepoint_check();
6533 _collector->startTimer();
6534 }
6536 ///////////////////////////////////////////////////////////
6537 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6538 // MarkRefsIntoAndScanClosure
6539 ///////////////////////////////////////////////////////////
6540 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6541 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6542 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6543 _span(span),
6544 _bit_map(bit_map),
6545 _work_queue(work_queue),
6546 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6547 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6548 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6549 revisit_stack)
6550 {
6551 _ref_processor = rp;
6552 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6553 }
6555 // This closure is used to mark refs into the CMS generation at the
6556 // second (final) checkpoint, and to scan and transitively follow
6557 // the unmarked oops. The marks are made in the marking bit map and
6558 // the work_queue is used for keeping the (newly) grey objects during
6559 // the scan phase whence they are also available for stealing by parallel
6560 // threads. Since the marking bit map is shared, updates are
6561 // synchronized (via CAS).
6562 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6563 if (obj != NULL) {
6564 // Ignore mark word because this could be an already marked oop
6565 // that may be chained at the end of the overflow list.
6566 assert(obj->is_oop(), "expected an oop");
6567 HeapWord* addr = (HeapWord*)obj;
6568 if (_span.contains(addr) &&
6569 !_bit_map->isMarked(addr)) {
6570 // mark bit map (object will become grey):
6571 // It is possible for several threads to be
6572 // trying to "claim" this object concurrently;
6573 // the unique thread that succeeds in marking the
6574 // object first will do the subsequent push on
6575 // to the work queue (or overflow list).
6576 if (_bit_map->par_mark(addr)) {
6577 // push on work_queue (which may not be empty), and trim the
6578 // queue to an appropriate length by applying this closure to
6579 // the oops in the oops popped from the stack (i.e. blacken the
6580 // grey objects)
6581 bool res = _work_queue->push(obj);
6582 assert(res, "Low water mark should be less than capacity?");
6583 trim_queue(_low_water_mark);
6584 } // Else, another thread claimed the object
6585 }
6586 }
6587 }
6589 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6590 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6592 // This closure is used to rescan the marked objects on the dirty cards
6593 // in the mod union table and the card table proper.
6594 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6595 oop p, MemRegion mr) {
6597 size_t size = 0;
6598 HeapWord* addr = (HeapWord*)p;
6599 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6600 assert(_span.contains(addr), "we are scanning the CMS generation");
6601 // check if it's time to yield
6602 if (do_yield_check()) {
6603 // We yielded for some foreground stop-world work,
6604 // and we have been asked to abort this ongoing preclean cycle.
6605 return 0;
6606 }
6607 if (_bitMap->isMarked(addr)) {
6608 // it's marked; is it potentially uninitialized?
6609 if (p->klass() != NULL) {
6610 if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
6611 // Signal precleaning to redirty the card since
6612 // the klass pointer is already installed.
6613 assert(size == 0, "Initial value");
6614 } else {
6615 assert(p->is_parsable(), "must be parsable.");
6616 // an initialized object; ignore mark word in verification below
6617 // since we are running concurrent with mutators
6618 assert(p->is_oop(true), "should be an oop");
6619 if (p->is_objArray()) {
6620 // objArrays are precisely marked; restrict scanning
6621 // to dirty cards only.
6622 size = p->oop_iterate(_scanningClosure, mr);
6623 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6624 "adjustObjectSize should be the identity for array sizes, "
6625 "which are necessarily larger than minimum object size of "
6626 "two heap words");
6627 } else {
6628 // A non-array may have been imprecisely marked; we need
6629 // to scan object in its entirety.
6630 size = CompactibleFreeListSpace::adjustObjectSize(
6631 p->oop_iterate(_scanningClosure));
6632 }
6633 #ifdef DEBUG
6634 size_t direct_size =
6635 CompactibleFreeListSpace::adjustObjectSize(p->size());
6636 assert(size == direct_size, "Inconsistency in size");
6637 assert(size >= 3, "Necessary for Printezis marks to work");
6638 if (!_bitMap->isMarked(addr+1)) {
6639 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6640 } else {
6641 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6642 assert(_bitMap->isMarked(addr+size-1),
6643 "inconsistent Printezis mark");
6644 }
6645 #endif // DEBUG
6646 }
6647 } else {
6648 // an unitialized object
6649 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6650 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6651 size = pointer_delta(nextOneAddr + 1, addr);
6652 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6653 "alignment problem");
6654 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6655 // will dirty the card when the klass pointer is installed in the
6656 // object (signalling the completion of initialization).
6657 }
6658 } else {
6659 // Either a not yet marked object or an uninitialized object
6660 if (p->klass() == NULL || !p->is_parsable()) {
6661 // An uninitialized object, skip to the next card, since
6662 // we may not be able to read its P-bits yet.
6663 assert(size == 0, "Initial value");
6664 } else {
6665 // An object not (yet) reached by marking: we merely need to
6666 // compute its size so as to go look at the next block.
6667 assert(p->is_oop(true), "should be an oop");
6668 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6669 }
6670 }
6671 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6672 return size;
6673 }
6675 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6676 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6677 "CMS thread should hold CMS token");
6678 assert_lock_strong(_freelistLock);
6679 assert_lock_strong(_bitMap->lock());
6680 // relinquish the free_list_lock and bitMaplock()
6681 _bitMap->lock()->unlock();
6682 _freelistLock->unlock();
6683 ConcurrentMarkSweepThread::desynchronize(true);
6684 ConcurrentMarkSweepThread::acknowledge_yield_request();
6685 _collector->stopTimer();
6686 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6687 if (PrintCMSStatistics != 0) {
6688 _collector->incrementYields();
6689 }
6690 _collector->icms_wait();
6692 // See the comment in coordinator_yield()
6693 for (unsigned i = 0; i < CMSYieldSleepCount &&
6694 ConcurrentMarkSweepThread::should_yield() &&
6695 !CMSCollector::foregroundGCIsActive(); ++i) {
6696 os::sleep(Thread::current(), 1, false);
6697 ConcurrentMarkSweepThread::acknowledge_yield_request();
6698 }
6700 ConcurrentMarkSweepThread::synchronize(true);
6701 _freelistLock->lock_without_safepoint_check();
6702 _bitMap->lock()->lock_without_safepoint_check();
6703 _collector->startTimer();
6704 }
6707 //////////////////////////////////////////////////////////////////
6708 // SurvivorSpacePrecleanClosure
6709 //////////////////////////////////////////////////////////////////
6710 // This (single-threaded) closure is used to preclean the oops in
6711 // the survivor spaces.
6712 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6714 HeapWord* addr = (HeapWord*)p;
6715 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6716 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6717 assert(p->klass() != NULL, "object should be initializd");
6718 assert(p->is_parsable(), "must be parsable.");
6719 // an initialized object; ignore mark word in verification below
6720 // since we are running concurrent with mutators
6721 assert(p->is_oop(true), "should be an oop");
6722 // Note that we do not yield while we iterate over
6723 // the interior oops of p, pushing the relevant ones
6724 // on our marking stack.
6725 size_t size = p->oop_iterate(_scanning_closure);
6726 do_yield_check();
6727 // Observe that below, we do not abandon the preclean
6728 // phase as soon as we should; rather we empty the
6729 // marking stack before returning. This is to satisfy
6730 // some existing assertions. In general, it may be a
6731 // good idea to abort immediately and complete the marking
6732 // from the grey objects at a later time.
6733 while (!_mark_stack->isEmpty()) {
6734 oop new_oop = _mark_stack->pop();
6735 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6736 assert(new_oop->is_parsable(), "Found unparsable oop");
6737 assert(_bit_map->isMarked((HeapWord*)new_oop),
6738 "only grey objects on this stack");
6739 // iterate over the oops in this oop, marking and pushing
6740 // the ones in CMS heap (i.e. in _span).
6741 new_oop->oop_iterate(_scanning_closure);
6742 // check if it's time to yield
6743 do_yield_check();
6744 }
6745 unsigned int after_count =
6746 GenCollectedHeap::heap()->total_collections();
6747 bool abort = (_before_count != after_count) ||
6748 _collector->should_abort_preclean();
6749 return abort ? 0 : size;
6750 }
6752 void SurvivorSpacePrecleanClosure::do_yield_work() {
6753 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6754 "CMS thread should hold CMS token");
6755 assert_lock_strong(_bit_map->lock());
6756 // Relinquish the bit map lock
6757 _bit_map->lock()->unlock();
6758 ConcurrentMarkSweepThread::desynchronize(true);
6759 ConcurrentMarkSweepThread::acknowledge_yield_request();
6760 _collector->stopTimer();
6761 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6762 if (PrintCMSStatistics != 0) {
6763 _collector->incrementYields();
6764 }
6765 _collector->icms_wait();
6767 // See the comment in coordinator_yield()
6768 for (unsigned i = 0; i < CMSYieldSleepCount &&
6769 ConcurrentMarkSweepThread::should_yield() &&
6770 !CMSCollector::foregroundGCIsActive(); ++i) {
6771 os::sleep(Thread::current(), 1, false);
6772 ConcurrentMarkSweepThread::acknowledge_yield_request();
6773 }
6775 ConcurrentMarkSweepThread::synchronize(true);
6776 _bit_map->lock()->lock_without_safepoint_check();
6777 _collector->startTimer();
6778 }
6780 // This closure is used to rescan the marked objects on the dirty cards
6781 // in the mod union table and the card table proper. In the parallel
6782 // case, although the bitMap is shared, we do a single read so the
6783 // isMarked() query is "safe".
6784 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6785 // Ignore mark word because we are running concurrent with mutators
6786 assert(p->is_oop_or_null(true), "expected an oop or null");
6787 HeapWord* addr = (HeapWord*)p;
6788 assert(_span.contains(addr), "we are scanning the CMS generation");
6789 bool is_obj_array = false;
6790 #ifdef DEBUG
6791 if (!_parallel) {
6792 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6793 assert(_collector->overflow_list_is_empty(),
6794 "overflow list should be empty");
6796 }
6797 #endif // DEBUG
6798 if (_bit_map->isMarked(addr)) {
6799 // Obj arrays are precisely marked, non-arrays are not;
6800 // so we scan objArrays precisely and non-arrays in their
6801 // entirety.
6802 if (p->is_objArray()) {
6803 is_obj_array = true;
6804 if (_parallel) {
6805 p->oop_iterate(_par_scan_closure, mr);
6806 } else {
6807 p->oop_iterate(_scan_closure, mr);
6808 }
6809 } else {
6810 if (_parallel) {
6811 p->oop_iterate(_par_scan_closure);
6812 } else {
6813 p->oop_iterate(_scan_closure);
6814 }
6815 }
6816 }
6817 #ifdef DEBUG
6818 if (!_parallel) {
6819 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6820 assert(_collector->overflow_list_is_empty(),
6821 "overflow list should be empty");
6823 }
6824 #endif // DEBUG
6825 return is_obj_array;
6826 }
6828 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6829 MemRegion span,
6830 CMSBitMap* bitMap, CMSMarkStack* markStack,
6831 CMSMarkStack* revisitStack,
6832 bool should_yield, bool verifying):
6833 _collector(collector),
6834 _span(span),
6835 _bitMap(bitMap),
6836 _mut(&collector->_modUnionTable),
6837 _markStack(markStack),
6838 _revisitStack(revisitStack),
6839 _yield(should_yield),
6840 _skipBits(0)
6841 {
6842 assert(_markStack->isEmpty(), "stack should be empty");
6843 _finger = _bitMap->startWord();
6844 _threshold = _finger;
6845 assert(_collector->_restart_addr == NULL, "Sanity check");
6846 assert(_span.contains(_finger), "Out of bounds _finger?");
6847 DEBUG_ONLY(_verifying = verifying;)
6848 }
6850 void MarkFromRootsClosure::reset(HeapWord* addr) {
6851 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6852 assert(_span.contains(addr), "Out of bounds _finger?");
6853 _finger = addr;
6854 _threshold = (HeapWord*)round_to(
6855 (intptr_t)_finger, CardTableModRefBS::card_size);
6856 }
6858 // Should revisit to see if this should be restructured for
6859 // greater efficiency.
6860 bool MarkFromRootsClosure::do_bit(size_t offset) {
6861 if (_skipBits > 0) {
6862 _skipBits--;
6863 return true;
6864 }
6865 // convert offset into a HeapWord*
6866 HeapWord* addr = _bitMap->startWord() + offset;
6867 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6868 "address out of range");
6869 assert(_bitMap->isMarked(addr), "tautology");
6870 if (_bitMap->isMarked(addr+1)) {
6871 // this is an allocated but not yet initialized object
6872 assert(_skipBits == 0, "tautology");
6873 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6874 oop p = oop(addr);
6875 if (p->klass() == NULL || !p->is_parsable()) {
6876 DEBUG_ONLY(if (!_verifying) {)
6877 // We re-dirty the cards on which this object lies and increase
6878 // the _threshold so that we'll come back to scan this object
6879 // during the preclean or remark phase. (CMSCleanOnEnter)
6880 if (CMSCleanOnEnter) {
6881 size_t sz = _collector->block_size_using_printezis_bits(addr);
6882 HeapWord* start_card_addr = (HeapWord*)round_down(
6883 (intptr_t)addr, CardTableModRefBS::card_size);
6884 HeapWord* end_card_addr = (HeapWord*)round_to(
6885 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6886 MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
6887 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6888 // Bump _threshold to end_card_addr; note that
6889 // _threshold cannot possibly exceed end_card_addr, anyhow.
6890 // This prevents future clearing of the card as the scan proceeds
6891 // to the right.
6892 assert(_threshold <= end_card_addr,
6893 "Because we are just scanning into this object");
6894 if (_threshold < end_card_addr) {
6895 _threshold = end_card_addr;
6896 }
6897 if (p->klass() != NULL) {
6898 // Redirty the range of cards...
6899 _mut->mark_range(redirty_range);
6900 } // ...else the setting of klass will dirty the card anyway.
6901 }
6902 DEBUG_ONLY(})
6903 return true;
6904 }
6905 }
6906 scanOopsInOop(addr);
6907 return true;
6908 }
6910 // We take a break if we've been at this for a while,
6911 // so as to avoid monopolizing the locks involved.
6912 void MarkFromRootsClosure::do_yield_work() {
6913 // First give up the locks, then yield, then re-lock
6914 // We should probably use a constructor/destructor idiom to
6915 // do this unlock/lock or modify the MutexUnlocker class to
6916 // serve our purpose. XXX
6917 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6918 "CMS thread should hold CMS token");
6919 assert_lock_strong(_bitMap->lock());
6920 _bitMap->lock()->unlock();
6921 ConcurrentMarkSweepThread::desynchronize(true);
6922 ConcurrentMarkSweepThread::acknowledge_yield_request();
6923 _collector->stopTimer();
6924 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6925 if (PrintCMSStatistics != 0) {
6926 _collector->incrementYields();
6927 }
6928 _collector->icms_wait();
6930 // See the comment in coordinator_yield()
6931 for (unsigned i = 0; i < CMSYieldSleepCount &&
6932 ConcurrentMarkSweepThread::should_yield() &&
6933 !CMSCollector::foregroundGCIsActive(); ++i) {
6934 os::sleep(Thread::current(), 1, false);
6935 ConcurrentMarkSweepThread::acknowledge_yield_request();
6936 }
6938 ConcurrentMarkSweepThread::synchronize(true);
6939 _bitMap->lock()->lock_without_safepoint_check();
6940 _collector->startTimer();
6941 }
6943 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6944 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6945 assert(_markStack->isEmpty(),
6946 "should drain stack to limit stack usage");
6947 // convert ptr to an oop preparatory to scanning
6948 oop obj = oop(ptr);
6949 // Ignore mark word in verification below, since we
6950 // may be running concurrent with mutators.
6951 assert(obj->is_oop(true), "should be an oop");
6952 assert(_finger <= ptr, "_finger runneth ahead");
6953 // advance the finger to right end of this object
6954 _finger = ptr + obj->size();
6955 assert(_finger > ptr, "we just incremented it above");
6956 // On large heaps, it may take us some time to get through
6957 // the marking phase (especially if running iCMS). During
6958 // this time it's possible that a lot of mutations have
6959 // accumulated in the card table and the mod union table --
6960 // these mutation records are redundant until we have
6961 // actually traced into the corresponding card.
6962 // Here, we check whether advancing the finger would make
6963 // us cross into a new card, and if so clear corresponding
6964 // cards in the MUT (preclean them in the card-table in the
6965 // future).
6967 DEBUG_ONLY(if (!_verifying) {)
6968 // The clean-on-enter optimization is disabled by default,
6969 // until we fix 6178663.
6970 if (CMSCleanOnEnter && (_finger > _threshold)) {
6971 // [_threshold, _finger) represents the interval
6972 // of cards to be cleared in MUT (or precleaned in card table).
6973 // The set of cards to be cleared is all those that overlap
6974 // with the interval [_threshold, _finger); note that
6975 // _threshold is always kept card-aligned but _finger isn't
6976 // always card-aligned.
6977 HeapWord* old_threshold = _threshold;
6978 assert(old_threshold == (HeapWord*)round_to(
6979 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6980 "_threshold should always be card-aligned");
6981 _threshold = (HeapWord*)round_to(
6982 (intptr_t)_finger, CardTableModRefBS::card_size);
6983 MemRegion mr(old_threshold, _threshold);
6984 assert(!mr.is_empty(), "Control point invariant");
6985 assert(_span.contains(mr), "Should clear within span");
6986 // XXX When _finger crosses from old gen into perm gen
6987 // we may be doing unnecessary cleaning; do better in the
6988 // future by detecting that condition and clearing fewer
6989 // MUT/CT entries.
6990 _mut->clear_range(mr);
6991 }
6992 DEBUG_ONLY(})
6994 // Note: the finger doesn't advance while we drain
6995 // the stack below.
6996 PushOrMarkClosure pushOrMarkClosure(_collector,
6997 _span, _bitMap, _markStack,
6998 _revisitStack,
6999 _finger, this);
7000 bool res = _markStack->push(obj);
7001 assert(res, "Empty non-zero size stack should have space for single push");
7002 while (!_markStack->isEmpty()) {
7003 oop new_oop = _markStack->pop();
7004 // Skip verifying header mark word below because we are
7005 // running concurrent with mutators.
7006 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7007 // now scan this oop's oops
7008 new_oop->oop_iterate(&pushOrMarkClosure);
7009 do_yield_check();
7010 }
7011 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7012 }
7014 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7015 CMSCollector* collector, MemRegion span,
7016 CMSBitMap* bit_map,
7017 OopTaskQueue* work_queue,
7018 CMSMarkStack* overflow_stack,
7019 CMSMarkStack* revisit_stack,
7020 bool should_yield):
7021 _collector(collector),
7022 _whole_span(collector->_span),
7023 _span(span),
7024 _bit_map(bit_map),
7025 _mut(&collector->_modUnionTable),
7026 _work_queue(work_queue),
7027 _overflow_stack(overflow_stack),
7028 _revisit_stack(revisit_stack),
7029 _yield(should_yield),
7030 _skip_bits(0),
7031 _task(task)
7032 {
7033 assert(_work_queue->size() == 0, "work_queue should be empty");
7034 _finger = span.start();
7035 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7036 assert(_span.contains(_finger), "Out of bounds _finger?");
7037 }
7039 // Should revisit to see if this should be restructured for
7040 // greater efficiency.
7041 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7042 if (_skip_bits > 0) {
7043 _skip_bits--;
7044 return true;
7045 }
7046 // convert offset into a HeapWord*
7047 HeapWord* addr = _bit_map->startWord() + offset;
7048 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7049 "address out of range");
7050 assert(_bit_map->isMarked(addr), "tautology");
7051 if (_bit_map->isMarked(addr+1)) {
7052 // this is an allocated object that might not yet be initialized
7053 assert(_skip_bits == 0, "tautology");
7054 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7055 oop p = oop(addr);
7056 if (p->klass() == NULL || !p->is_parsable()) {
7057 // in the case of Clean-on-Enter optimization, redirty card
7058 // and avoid clearing card by increasing the threshold.
7059 return true;
7060 }
7061 }
7062 scan_oops_in_oop(addr);
7063 return true;
7064 }
7066 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7067 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7068 // Should we assert that our work queue is empty or
7069 // below some drain limit?
7070 assert(_work_queue->size() == 0,
7071 "should drain stack to limit stack usage");
7072 // convert ptr to an oop preparatory to scanning
7073 oop obj = oop(ptr);
7074 // Ignore mark word in verification below, since we
7075 // may be running concurrent with mutators.
7076 assert(obj->is_oop(true), "should be an oop");
7077 assert(_finger <= ptr, "_finger runneth ahead");
7078 // advance the finger to right end of this object
7079 _finger = ptr + obj->size();
7080 assert(_finger > ptr, "we just incremented it above");
7081 // On large heaps, it may take us some time to get through
7082 // the marking phase (especially if running iCMS). During
7083 // this time it's possible that a lot of mutations have
7084 // accumulated in the card table and the mod union table --
7085 // these mutation records are redundant until we have
7086 // actually traced into the corresponding card.
7087 // Here, we check whether advancing the finger would make
7088 // us cross into a new card, and if so clear corresponding
7089 // cards in the MUT (preclean them in the card-table in the
7090 // future).
7092 // The clean-on-enter optimization is disabled by default,
7093 // until we fix 6178663.
7094 if (CMSCleanOnEnter && (_finger > _threshold)) {
7095 // [_threshold, _finger) represents the interval
7096 // of cards to be cleared in MUT (or precleaned in card table).
7097 // The set of cards to be cleared is all those that overlap
7098 // with the interval [_threshold, _finger); note that
7099 // _threshold is always kept card-aligned but _finger isn't
7100 // always card-aligned.
7101 HeapWord* old_threshold = _threshold;
7102 assert(old_threshold == (HeapWord*)round_to(
7103 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7104 "_threshold should always be card-aligned");
7105 _threshold = (HeapWord*)round_to(
7106 (intptr_t)_finger, CardTableModRefBS::card_size);
7107 MemRegion mr(old_threshold, _threshold);
7108 assert(!mr.is_empty(), "Control point invariant");
7109 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7110 // XXX When _finger crosses from old gen into perm gen
7111 // we may be doing unnecessary cleaning; do better in the
7112 // future by detecting that condition and clearing fewer
7113 // MUT/CT entries.
7114 _mut->clear_range(mr);
7115 }
7117 // Note: the local finger doesn't advance while we drain
7118 // the stack below, but the global finger sure can and will.
7119 HeapWord** gfa = _task->global_finger_addr();
7120 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7121 _span, _bit_map,
7122 _work_queue,
7123 _overflow_stack,
7124 _revisit_stack,
7125 _finger,
7126 gfa, this);
7127 bool res = _work_queue->push(obj); // overflow could occur here
7128 assert(res, "Will hold once we use workqueues");
7129 while (true) {
7130 oop new_oop;
7131 if (!_work_queue->pop_local(new_oop)) {
7132 // We emptied our work_queue; check if there's stuff that can
7133 // be gotten from the overflow stack.
7134 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7135 _overflow_stack, _work_queue)) {
7136 do_yield_check();
7137 continue;
7138 } else { // done
7139 break;
7140 }
7141 }
7142 // Skip verifying header mark word below because we are
7143 // running concurrent with mutators.
7144 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7145 // now scan this oop's oops
7146 new_oop->oop_iterate(&pushOrMarkClosure);
7147 do_yield_check();
7148 }
7149 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7150 }
7152 // Yield in response to a request from VM Thread or
7153 // from mutators.
7154 void Par_MarkFromRootsClosure::do_yield_work() {
7155 assert(_task != NULL, "sanity");
7156 _task->yield();
7157 }
7159 // A variant of the above used for verifying CMS marking work.
7160 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7161 MemRegion span,
7162 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7163 CMSMarkStack* mark_stack):
7164 _collector(collector),
7165 _span(span),
7166 _verification_bm(verification_bm),
7167 _cms_bm(cms_bm),
7168 _mark_stack(mark_stack),
7169 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7170 mark_stack)
7171 {
7172 assert(_mark_stack->isEmpty(), "stack should be empty");
7173 _finger = _verification_bm->startWord();
7174 assert(_collector->_restart_addr == NULL, "Sanity check");
7175 assert(_span.contains(_finger), "Out of bounds _finger?");
7176 }
7178 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7179 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7180 assert(_span.contains(addr), "Out of bounds _finger?");
7181 _finger = addr;
7182 }
7184 // Should revisit to see if this should be restructured for
7185 // greater efficiency.
7186 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7187 // convert offset into a HeapWord*
7188 HeapWord* addr = _verification_bm->startWord() + offset;
7189 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7190 "address out of range");
7191 assert(_verification_bm->isMarked(addr), "tautology");
7192 assert(_cms_bm->isMarked(addr), "tautology");
7194 assert(_mark_stack->isEmpty(),
7195 "should drain stack to limit stack usage");
7196 // convert addr to an oop preparatory to scanning
7197 oop obj = oop(addr);
7198 assert(obj->is_oop(), "should be an oop");
7199 assert(_finger <= addr, "_finger runneth ahead");
7200 // advance the finger to right end of this object
7201 _finger = addr + obj->size();
7202 assert(_finger > addr, "we just incremented it above");
7203 // Note: the finger doesn't advance while we drain
7204 // the stack below.
7205 bool res = _mark_stack->push(obj);
7206 assert(res, "Empty non-zero size stack should have space for single push");
7207 while (!_mark_stack->isEmpty()) {
7208 oop new_oop = _mark_stack->pop();
7209 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7210 // now scan this oop's oops
7211 new_oop->oop_iterate(&_pam_verify_closure);
7212 }
7213 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7214 return true;
7215 }
7217 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7218 CMSCollector* collector, MemRegion span,
7219 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7220 CMSMarkStack* mark_stack):
7221 OopClosure(collector->ref_processor()),
7222 _collector(collector),
7223 _span(span),
7224 _verification_bm(verification_bm),
7225 _cms_bm(cms_bm),
7226 _mark_stack(mark_stack)
7227 { }
7229 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7230 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7232 // Upon stack overflow, we discard (part of) the stack,
7233 // remembering the least address amongst those discarded
7234 // in CMSCollector's _restart_address.
7235 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7236 // Remember the least grey address discarded
7237 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7238 _collector->lower_restart_addr(ra);
7239 _mark_stack->reset(); // discard stack contents
7240 _mark_stack->expand(); // expand the stack if possible
7241 }
7243 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7244 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7245 HeapWord* addr = (HeapWord*)obj;
7246 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7247 // Oop lies in _span and isn't yet grey or black
7248 _verification_bm->mark(addr); // now grey
7249 if (!_cms_bm->isMarked(addr)) {
7250 oop(addr)->print();
7251 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7252 addr);
7253 fatal("... aborting");
7254 }
7256 if (!_mark_stack->push(obj)) { // stack overflow
7257 if (PrintCMSStatistics != 0) {
7258 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7259 SIZE_FORMAT, _mark_stack->capacity());
7260 }
7261 assert(_mark_stack->isFull(), "Else push should have succeeded");
7262 handle_stack_overflow(addr);
7263 }
7264 // anything including and to the right of _finger
7265 // will be scanned as we iterate over the remainder of the
7266 // bit map
7267 }
7268 }
7270 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7271 MemRegion span,
7272 CMSBitMap* bitMap, CMSMarkStack* markStack,
7273 CMSMarkStack* revisitStack,
7274 HeapWord* finger, MarkFromRootsClosure* parent) :
7275 OopClosure(collector->ref_processor()),
7276 _collector(collector),
7277 _span(span),
7278 _bitMap(bitMap),
7279 _markStack(markStack),
7280 _revisitStack(revisitStack),
7281 _finger(finger),
7282 _parent(parent),
7283 _should_remember_klasses(collector->should_unload_classes())
7284 { }
7286 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7287 MemRegion span,
7288 CMSBitMap* bit_map,
7289 OopTaskQueue* work_queue,
7290 CMSMarkStack* overflow_stack,
7291 CMSMarkStack* revisit_stack,
7292 HeapWord* finger,
7293 HeapWord** global_finger_addr,
7294 Par_MarkFromRootsClosure* parent) :
7295 OopClosure(collector->ref_processor()),
7296 _collector(collector),
7297 _whole_span(collector->_span),
7298 _span(span),
7299 _bit_map(bit_map),
7300 _work_queue(work_queue),
7301 _overflow_stack(overflow_stack),
7302 _revisit_stack(revisit_stack),
7303 _finger(finger),
7304 _global_finger_addr(global_finger_addr),
7305 _parent(parent),
7306 _should_remember_klasses(collector->should_unload_classes())
7307 { }
7309 void CMSCollector::lower_restart_addr(HeapWord* low) {
7310 assert(_span.contains(low), "Out of bounds addr");
7311 if (_restart_addr == NULL) {
7312 _restart_addr = low;
7313 } else {
7314 _restart_addr = MIN2(_restart_addr, low);
7315 }
7316 }
7318 // Upon stack overflow, we discard (part of) the stack,
7319 // remembering the least address amongst those discarded
7320 // in CMSCollector's _restart_address.
7321 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7322 // Remember the least grey address discarded
7323 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7324 _collector->lower_restart_addr(ra);
7325 _markStack->reset(); // discard stack contents
7326 _markStack->expand(); // expand the stack if possible
7327 }
7329 // Upon stack overflow, we discard (part of) the stack,
7330 // remembering the least address amongst those discarded
7331 // in CMSCollector's _restart_address.
7332 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7333 // We need to do this under a mutex to prevent other
7334 // workers from interfering with the expansion below.
7335 MutexLockerEx ml(_overflow_stack->par_lock(),
7336 Mutex::_no_safepoint_check_flag);
7337 // Remember the least grey address discarded
7338 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7339 _collector->lower_restart_addr(ra);
7340 _overflow_stack->reset(); // discard stack contents
7341 _overflow_stack->expand(); // expand the stack if possible
7342 }
7344 void PushOrMarkClosure::do_oop(oop obj) {
7345 // Ignore mark word because we are running concurrent with mutators.
7346 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7347 HeapWord* addr = (HeapWord*)obj;
7348 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7349 // Oop lies in _span and isn't yet grey or black
7350 _bitMap->mark(addr); // now grey
7351 if (addr < _finger) {
7352 // the bit map iteration has already either passed, or
7353 // sampled, this bit in the bit map; we'll need to
7354 // use the marking stack to scan this oop's oops.
7355 bool simulate_overflow = false;
7356 NOT_PRODUCT(
7357 if (CMSMarkStackOverflowALot &&
7358 _collector->simulate_overflow()) {
7359 // simulate a stack overflow
7360 simulate_overflow = true;
7361 }
7362 )
7363 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7364 if (PrintCMSStatistics != 0) {
7365 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7366 SIZE_FORMAT, _markStack->capacity());
7367 }
7368 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7369 handle_stack_overflow(addr);
7370 }
7371 }
7372 // anything including and to the right of _finger
7373 // will be scanned as we iterate over the remainder of the
7374 // bit map
7375 do_yield_check();
7376 }
7377 }
7379 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7380 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7382 void Par_PushOrMarkClosure::do_oop(oop obj) {
7383 // Ignore mark word because we are running concurrent with mutators.
7384 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7385 HeapWord* addr = (HeapWord*)obj;
7386 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7387 // Oop lies in _span and isn't yet grey or black
7388 // We read the global_finger (volatile read) strictly after marking oop
7389 bool res = _bit_map->par_mark(addr); // now grey
7390 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7391 // Should we push this marked oop on our stack?
7392 // -- if someone else marked it, nothing to do
7393 // -- if target oop is above global finger nothing to do
7394 // -- if target oop is in chunk and above local finger
7395 // then nothing to do
7396 // -- else push on work queue
7397 if ( !res // someone else marked it, they will deal with it
7398 || (addr >= *gfa) // will be scanned in a later task
7399 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7400 return;
7401 }
7402 // the bit map iteration has already either passed, or
7403 // sampled, this bit in the bit map; we'll need to
7404 // use the marking stack to scan this oop's oops.
7405 bool simulate_overflow = false;
7406 NOT_PRODUCT(
7407 if (CMSMarkStackOverflowALot &&
7408 _collector->simulate_overflow()) {
7409 // simulate a stack overflow
7410 simulate_overflow = true;
7411 }
7412 )
7413 if (simulate_overflow ||
7414 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7415 // stack overflow
7416 if (PrintCMSStatistics != 0) {
7417 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7418 SIZE_FORMAT, _overflow_stack->capacity());
7419 }
7420 // We cannot assert that the overflow stack is full because
7421 // it may have been emptied since.
7422 assert(simulate_overflow ||
7423 _work_queue->size() == _work_queue->max_elems(),
7424 "Else push should have succeeded");
7425 handle_stack_overflow(addr);
7426 }
7427 do_yield_check();
7428 }
7429 }
7431 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7432 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7434 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7435 MemRegion span,
7436 ReferenceProcessor* rp,
7437 CMSBitMap* bit_map,
7438 CMSBitMap* mod_union_table,
7439 CMSMarkStack* mark_stack,
7440 CMSMarkStack* revisit_stack,
7441 bool concurrent_precleaning):
7442 OopClosure(rp),
7443 _collector(collector),
7444 _span(span),
7445 _bit_map(bit_map),
7446 _mod_union_table(mod_union_table),
7447 _mark_stack(mark_stack),
7448 _revisit_stack(revisit_stack),
7449 _concurrent_precleaning(concurrent_precleaning),
7450 _should_remember_klasses(collector->should_unload_classes())
7451 {
7452 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7453 }
7455 // Grey object rescan during pre-cleaning and second checkpoint phases --
7456 // the non-parallel version (the parallel version appears further below.)
7457 void PushAndMarkClosure::do_oop(oop obj) {
7458 // Ignore mark word verification. If during concurrent precleaning,
7459 // the object monitor may be locked. If during the checkpoint
7460 // phases, the object may already have been reached by a different
7461 // path and may be at the end of the global overflow list (so
7462 // the mark word may be NULL).
7463 assert(obj->is_oop_or_null(true /* ignore mark word */),
7464 "expected an oop or NULL");
7465 HeapWord* addr = (HeapWord*)obj;
7466 // Check if oop points into the CMS generation
7467 // and is not marked
7468 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7469 // a white object ...
7470 _bit_map->mark(addr); // ... now grey
7471 // push on the marking stack (grey set)
7472 bool simulate_overflow = false;
7473 NOT_PRODUCT(
7474 if (CMSMarkStackOverflowALot &&
7475 _collector->simulate_overflow()) {
7476 // simulate a stack overflow
7477 simulate_overflow = true;
7478 }
7479 )
7480 if (simulate_overflow || !_mark_stack->push(obj)) {
7481 if (_concurrent_precleaning) {
7482 // During precleaning we can just dirty the appropriate card
7483 // in the mod union table, thus ensuring that the object remains
7484 // in the grey set and continue. Note that no one can be intefering
7485 // with us in this action of dirtying the mod union table, so
7486 // no locking is required.
7487 _mod_union_table->mark(addr);
7488 _collector->_ser_pmc_preclean_ovflw++;
7489 } else {
7490 // During the remark phase, we need to remember this oop
7491 // in the overflow list.
7492 _collector->push_on_overflow_list(obj);
7493 _collector->_ser_pmc_remark_ovflw++;
7494 }
7495 }
7496 }
7497 }
7499 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7500 MemRegion span,
7501 ReferenceProcessor* rp,
7502 CMSBitMap* bit_map,
7503 OopTaskQueue* work_queue,
7504 CMSMarkStack* revisit_stack):
7505 OopClosure(rp),
7506 _collector(collector),
7507 _span(span),
7508 _bit_map(bit_map),
7509 _work_queue(work_queue),
7510 _revisit_stack(revisit_stack),
7511 _should_remember_klasses(collector->should_unload_classes())
7512 {
7513 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7514 }
7516 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7517 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7519 // Grey object rescan during second checkpoint phase --
7520 // the parallel version.
7521 void Par_PushAndMarkClosure::do_oop(oop obj) {
7522 // In the assert below, we ignore the mark word because
7523 // this oop may point to an already visited object that is
7524 // on the overflow stack (in which case the mark word has
7525 // been hijacked for chaining into the overflow stack --
7526 // if this is the last object in the overflow stack then
7527 // its mark word will be NULL). Because this object may
7528 // have been subsequently popped off the global overflow
7529 // stack, and the mark word possibly restored to the prototypical
7530 // value, by the time we get to examined this failing assert in
7531 // the debugger, is_oop_or_null(false) may subsequently start
7532 // to hold.
7533 assert(obj->is_oop_or_null(true),
7534 "expected an oop or NULL");
7535 HeapWord* addr = (HeapWord*)obj;
7536 // Check if oop points into the CMS generation
7537 // and is not marked
7538 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7539 // a white object ...
7540 // If we manage to "claim" the object, by being the
7541 // first thread to mark it, then we push it on our
7542 // marking stack
7543 if (_bit_map->par_mark(addr)) { // ... now grey
7544 // push on work queue (grey set)
7545 bool simulate_overflow = false;
7546 NOT_PRODUCT(
7547 if (CMSMarkStackOverflowALot &&
7548 _collector->par_simulate_overflow()) {
7549 // simulate a stack overflow
7550 simulate_overflow = true;
7551 }
7552 )
7553 if (simulate_overflow || !_work_queue->push(obj)) {
7554 _collector->par_push_on_overflow_list(obj);
7555 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7556 }
7557 } // Else, some other thread got there first
7558 }
7559 }
7561 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7562 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7564 void PushAndMarkClosure::remember_klass(Klass* k) {
7565 if (!_revisit_stack->push(oop(k))) {
7566 fatal("Revisit stack overflowed in PushAndMarkClosure");
7567 }
7568 }
7570 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7571 if (!_revisit_stack->par_push(oop(k))) {
7572 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7573 }
7574 }
7576 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7577 Mutex* bml = _collector->bitMapLock();
7578 assert_lock_strong(bml);
7579 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7580 "CMS thread should hold CMS token");
7582 bml->unlock();
7583 ConcurrentMarkSweepThread::desynchronize(true);
7585 ConcurrentMarkSweepThread::acknowledge_yield_request();
7587 _collector->stopTimer();
7588 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7589 if (PrintCMSStatistics != 0) {
7590 _collector->incrementYields();
7591 }
7592 _collector->icms_wait();
7594 // See the comment in coordinator_yield()
7595 for (unsigned i = 0; i < CMSYieldSleepCount &&
7596 ConcurrentMarkSweepThread::should_yield() &&
7597 !CMSCollector::foregroundGCIsActive(); ++i) {
7598 os::sleep(Thread::current(), 1, false);
7599 ConcurrentMarkSweepThread::acknowledge_yield_request();
7600 }
7602 ConcurrentMarkSweepThread::synchronize(true);
7603 bml->lock();
7605 _collector->startTimer();
7606 }
7608 bool CMSPrecleanRefsYieldClosure::should_return() {
7609 if (ConcurrentMarkSweepThread::should_yield()) {
7610 do_yield_work();
7611 }
7612 return _collector->foregroundGCIsActive();
7613 }
7615 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7616 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7617 "mr should be aligned to start at a card boundary");
7618 // We'd like to assert:
7619 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7620 // "mr should be a range of cards");
7621 // However, that would be too strong in one case -- the last
7622 // partition ends at _unallocated_block which, in general, can be
7623 // an arbitrary boundary, not necessarily card aligned.
7624 if (PrintCMSStatistics != 0) {
7625 _num_dirty_cards +=
7626 mr.word_size()/CardTableModRefBS::card_size_in_words;
7627 }
7628 _space->object_iterate_mem(mr, &_scan_cl);
7629 }
7631 SweepClosure::SweepClosure(CMSCollector* collector,
7632 ConcurrentMarkSweepGeneration* g,
7633 CMSBitMap* bitMap, bool should_yield) :
7634 _collector(collector),
7635 _g(g),
7636 _sp(g->cmsSpace()),
7637 _limit(_sp->sweep_limit()),
7638 _freelistLock(_sp->freelistLock()),
7639 _bitMap(bitMap),
7640 _yield(should_yield),
7641 _inFreeRange(false), // No free range at beginning of sweep
7642 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7643 _lastFreeRangeCoalesced(false),
7644 _freeFinger(g->used_region().start())
7645 {
7646 NOT_PRODUCT(
7647 _numObjectsFreed = 0;
7648 _numWordsFreed = 0;
7649 _numObjectsLive = 0;
7650 _numWordsLive = 0;
7651 _numObjectsAlreadyFree = 0;
7652 _numWordsAlreadyFree = 0;
7653 _last_fc = NULL;
7655 _sp->initializeIndexedFreeListArrayReturnedBytes();
7656 _sp->dictionary()->initializeDictReturnedBytes();
7657 )
7658 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7659 "sweep _limit out of bounds");
7660 if (CMSTraceSweeper) {
7661 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7662 }
7663 }
7665 // We need this destructor to reclaim any space at the end
7666 // of the space, which do_blk below may not have added back to
7667 // the free lists. [basically dealing with the "fringe effect"]
7668 SweepClosure::~SweepClosure() {
7669 assert_lock_strong(_freelistLock);
7670 // this should be treated as the end of a free run if any
7671 // The current free range should be returned to the free lists
7672 // as one coalesced chunk.
7673 if (inFreeRange()) {
7674 flushCurFreeChunk(freeFinger(),
7675 pointer_delta(_limit, freeFinger()));
7676 assert(freeFinger() < _limit, "the finger pointeth off base");
7677 if (CMSTraceSweeper) {
7678 gclog_or_tty->print("destructor:");
7679 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7680 "[coalesced:"SIZE_FORMAT"]\n",
7681 freeFinger(), pointer_delta(_limit, freeFinger()),
7682 lastFreeRangeCoalesced());
7683 }
7684 }
7685 NOT_PRODUCT(
7686 if (Verbose && PrintGC) {
7687 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7688 SIZE_FORMAT " bytes",
7689 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7690 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7691 SIZE_FORMAT" bytes "
7692 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7693 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7694 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7695 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7696 sizeof(HeapWord);
7697 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7699 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7700 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7701 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7702 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7703 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7704 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7705 indexListReturnedBytes);
7706 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7707 dictReturnedBytes);
7708 }
7709 }
7710 )
7711 // Now, in debug mode, just null out the sweep_limit
7712 NOT_PRODUCT(_sp->clear_sweep_limit();)
7713 if (CMSTraceSweeper) {
7714 gclog_or_tty->print("end of sweep\n================\n");
7715 }
7716 }
7718 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7719 bool freeRangeInFreeLists) {
7720 if (CMSTraceSweeper) {
7721 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7722 freeFinger, _sp->block_size(freeFinger),
7723 freeRangeInFreeLists);
7724 }
7725 assert(!inFreeRange(), "Trampling existing free range");
7726 set_inFreeRange(true);
7727 set_lastFreeRangeCoalesced(false);
7729 set_freeFinger(freeFinger);
7730 set_freeRangeInFreeLists(freeRangeInFreeLists);
7731 if (CMSTestInFreeList) {
7732 if (freeRangeInFreeLists) {
7733 FreeChunk* fc = (FreeChunk*) freeFinger;
7734 assert(fc->isFree(), "A chunk on the free list should be free.");
7735 assert(fc->size() > 0, "Free range should have a size");
7736 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7737 }
7738 }
7739 }
7741 // Note that the sweeper runs concurrently with mutators. Thus,
7742 // it is possible for direct allocation in this generation to happen
7743 // in the middle of the sweep. Note that the sweeper also coalesces
7744 // contiguous free blocks. Thus, unless the sweeper and the allocator
7745 // synchronize appropriately freshly allocated blocks may get swept up.
7746 // This is accomplished by the sweeper locking the free lists while
7747 // it is sweeping. Thus blocks that are determined to be free are
7748 // indeed free. There is however one additional complication:
7749 // blocks that have been allocated since the final checkpoint and
7750 // mark, will not have been marked and so would be treated as
7751 // unreachable and swept up. To prevent this, the allocator marks
7752 // the bit map when allocating during the sweep phase. This leads,
7753 // however, to a further complication -- objects may have been allocated
7754 // but not yet initialized -- in the sense that the header isn't yet
7755 // installed. The sweeper can not then determine the size of the block
7756 // in order to skip over it. To deal with this case, we use a technique
7757 // (due to Printezis) to encode such uninitialized block sizes in the
7758 // bit map. Since the bit map uses a bit per every HeapWord, but the
7759 // CMS generation has a minimum object size of 3 HeapWords, it follows
7760 // that "normal marks" won't be adjacent in the bit map (there will
7761 // always be at least two 0 bits between successive 1 bits). We make use
7762 // of these "unused" bits to represent uninitialized blocks -- the bit
7763 // corresponding to the start of the uninitialized object and the next
7764 // bit are both set. Finally, a 1 bit marks the end of the object that
7765 // started with the two consecutive 1 bits to indicate its potentially
7766 // uninitialized state.
7768 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7769 FreeChunk* fc = (FreeChunk*)addr;
7770 size_t res;
7772 // check if we are done sweepinrg
7773 if (addr == _limit) { // we have swept up to the limit, do nothing more
7774 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7775 "sweep _limit out of bounds");
7776 // help the closure application finish
7777 return pointer_delta(_sp->end(), _limit);
7778 }
7779 assert(addr <= _limit, "sweep invariant");
7781 // check if we should yield
7782 do_yield_check(addr);
7783 if (fc->isFree()) {
7784 // Chunk that is already free
7785 res = fc->size();
7786 doAlreadyFreeChunk(fc);
7787 debug_only(_sp->verifyFreeLists());
7788 assert(res == fc->size(), "Don't expect the size to change");
7789 NOT_PRODUCT(
7790 _numObjectsAlreadyFree++;
7791 _numWordsAlreadyFree += res;
7792 )
7793 NOT_PRODUCT(_last_fc = fc;)
7794 } else if (!_bitMap->isMarked(addr)) {
7795 // Chunk is fresh garbage
7796 res = doGarbageChunk(fc);
7797 debug_only(_sp->verifyFreeLists());
7798 NOT_PRODUCT(
7799 _numObjectsFreed++;
7800 _numWordsFreed += res;
7801 )
7802 } else {
7803 // Chunk that is alive.
7804 res = doLiveChunk(fc);
7805 debug_only(_sp->verifyFreeLists());
7806 NOT_PRODUCT(
7807 _numObjectsLive++;
7808 _numWordsLive += res;
7809 )
7810 }
7811 return res;
7812 }
7814 // For the smart allocation, record following
7815 // split deaths - a free chunk is removed from its free list because
7816 // it is being split into two or more chunks.
7817 // split birth - a free chunk is being added to its free list because
7818 // a larger free chunk has been split and resulted in this free chunk.
7819 // coal death - a free chunk is being removed from its free list because
7820 // it is being coalesced into a large free chunk.
7821 // coal birth - a free chunk is being added to its free list because
7822 // it was created when two or more free chunks where coalesced into
7823 // this free chunk.
7824 //
7825 // These statistics are used to determine the desired number of free
7826 // chunks of a given size. The desired number is chosen to be relative
7827 // to the end of a CMS sweep. The desired number at the end of a sweep
7828 // is the
7829 // count-at-end-of-previous-sweep (an amount that was enough)
7830 // - count-at-beginning-of-current-sweep (the excess)
7831 // + split-births (gains in this size during interval)
7832 // - split-deaths (demands on this size during interval)
7833 // where the interval is from the end of one sweep to the end of the
7834 // next.
7835 //
7836 // When sweeping the sweeper maintains an accumulated chunk which is
7837 // the chunk that is made up of chunks that have been coalesced. That
7838 // will be termed the left-hand chunk. A new chunk of garbage that
7839 // is being considered for coalescing will be referred to as the
7840 // right-hand chunk.
7841 //
7842 // When making a decision on whether to coalesce a right-hand chunk with
7843 // the current left-hand chunk, the current count vs. the desired count
7844 // of the left-hand chunk is considered. Also if the right-hand chunk
7845 // is near the large chunk at the end of the heap (see
7846 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7847 // left-hand chunk is coalesced.
7848 //
7849 // When making a decision about whether to split a chunk, the desired count
7850 // vs. the current count of the candidate to be split is also considered.
7851 // If the candidate is underpopulated (currently fewer chunks than desired)
7852 // a chunk of an overpopulated (currently more chunks than desired) size may
7853 // be chosen. The "hint" associated with a free list, if non-null, points
7854 // to a free list which may be overpopulated.
7855 //
7857 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7858 size_t size = fc->size();
7859 // Chunks that cannot be coalesced are not in the
7860 // free lists.
7861 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7862 assert(_sp->verifyChunkInFreeLists(fc),
7863 "free chunk should be in free lists");
7864 }
7865 // a chunk that is already free, should not have been
7866 // marked in the bit map
7867 HeapWord* addr = (HeapWord*) fc;
7868 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7869 // Verify that the bit map has no bits marked between
7870 // addr and purported end of this block.
7871 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7873 // Some chunks cannot be coalesced in under any circumstances.
7874 // See the definition of cantCoalesce().
7875 if (!fc->cantCoalesce()) {
7876 // This chunk can potentially be coalesced.
7877 if (_sp->adaptive_freelists()) {
7878 // All the work is done in
7879 doPostIsFreeOrGarbageChunk(fc, size);
7880 } else { // Not adaptive free lists
7881 // this is a free chunk that can potentially be coalesced by the sweeper;
7882 if (!inFreeRange()) {
7883 // if the next chunk is a free block that can't be coalesced
7884 // it doesn't make sense to remove this chunk from the free lists
7885 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7886 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7887 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7888 nextChunk->isFree() && // which is free...
7889 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7890 // nothing to do
7891 } else {
7892 // Potentially the start of a new free range:
7893 // Don't eagerly remove it from the free lists.
7894 // No need to remove it if it will just be put
7895 // back again. (Also from a pragmatic point of view
7896 // if it is a free block in a region that is beyond
7897 // any allocated blocks, an assertion will fail)
7898 // Remember the start of a free run.
7899 initialize_free_range(addr, true);
7900 // end - can coalesce with next chunk
7901 }
7902 } else {
7903 // the midst of a free range, we are coalescing
7904 debug_only(record_free_block_coalesced(fc);)
7905 if (CMSTraceSweeper) {
7906 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7907 }
7908 // remove it from the free lists
7909 _sp->removeFreeChunkFromFreeLists(fc);
7910 set_lastFreeRangeCoalesced(true);
7911 // If the chunk is being coalesced and the current free range is
7912 // in the free lists, remove the current free range so that it
7913 // will be returned to the free lists in its entirety - all
7914 // the coalesced pieces included.
7915 if (freeRangeInFreeLists()) {
7916 FreeChunk* ffc = (FreeChunk*) freeFinger();
7917 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7918 "Size of free range is inconsistent with chunk size.");
7919 if (CMSTestInFreeList) {
7920 assert(_sp->verifyChunkInFreeLists(ffc),
7921 "free range is not in free lists");
7922 }
7923 _sp->removeFreeChunkFromFreeLists(ffc);
7924 set_freeRangeInFreeLists(false);
7925 }
7926 }
7927 }
7928 } else {
7929 // Code path common to both original and adaptive free lists.
7931 // cant coalesce with previous block; this should be treated
7932 // as the end of a free run if any
7933 if (inFreeRange()) {
7934 // we kicked some butt; time to pick up the garbage
7935 assert(freeFinger() < addr, "the finger pointeth off base");
7936 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
7937 }
7938 // else, nothing to do, just continue
7939 }
7940 }
7942 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
7943 // This is a chunk of garbage. It is not in any free list.
7944 // Add it to a free list or let it possibly be coalesced into
7945 // a larger chunk.
7946 HeapWord* addr = (HeapWord*) fc;
7947 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7949 if (_sp->adaptive_freelists()) {
7950 // Verify that the bit map has no bits marked between
7951 // addr and purported end of just dead object.
7952 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7954 doPostIsFreeOrGarbageChunk(fc, size);
7955 } else {
7956 if (!inFreeRange()) {
7957 // start of a new free range
7958 assert(size > 0, "A free range should have a size");
7959 initialize_free_range(addr, false);
7961 } else {
7962 // this will be swept up when we hit the end of the
7963 // free range
7964 if (CMSTraceSweeper) {
7965 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
7966 }
7967 // If the chunk is being coalesced and the current free range is
7968 // in the free lists, remove the current free range so that it
7969 // will be returned to the free lists in its entirety - all
7970 // the coalesced pieces included.
7971 if (freeRangeInFreeLists()) {
7972 FreeChunk* ffc = (FreeChunk*)freeFinger();
7973 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7974 "Size of free range is inconsistent with chunk size.");
7975 if (CMSTestInFreeList) {
7976 assert(_sp->verifyChunkInFreeLists(ffc),
7977 "free range is not in free lists");
7978 }
7979 _sp->removeFreeChunkFromFreeLists(ffc);
7980 set_freeRangeInFreeLists(false);
7981 }
7982 set_lastFreeRangeCoalesced(true);
7983 }
7984 // this will be swept up when we hit the end of the free range
7986 // Verify that the bit map has no bits marked between
7987 // addr and purported end of just dead object.
7988 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7989 }
7990 return size;
7991 }
7993 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
7994 HeapWord* addr = (HeapWord*) fc;
7995 // The sweeper has just found a live object. Return any accumulated
7996 // left hand chunk to the free lists.
7997 if (inFreeRange()) {
7998 if (_sp->adaptive_freelists()) {
7999 flushCurFreeChunk(freeFinger(),
8000 pointer_delta(addr, freeFinger()));
8001 } else { // not adaptive freelists
8002 set_inFreeRange(false);
8003 // Add the free range back to the free list if it is not already
8004 // there.
8005 if (!freeRangeInFreeLists()) {
8006 assert(freeFinger() < addr, "the finger pointeth off base");
8007 if (CMSTraceSweeper) {
8008 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8009 "[coalesced:%d]\n",
8010 freeFinger(), pointer_delta(addr, freeFinger()),
8011 lastFreeRangeCoalesced());
8012 }
8013 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8014 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8015 }
8016 }
8017 }
8019 // Common code path for original and adaptive free lists.
8021 // this object is live: we'd normally expect this to be
8022 // an oop, and like to assert the following:
8023 // assert(oop(addr)->is_oop(), "live block should be an oop");
8024 // However, as we commented above, this may be an object whose
8025 // header hasn't yet been initialized.
8026 size_t size;
8027 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8028 if (_bitMap->isMarked(addr + 1)) {
8029 // Determine the size from the bit map, rather than trying to
8030 // compute it from the object header.
8031 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8032 size = pointer_delta(nextOneAddr + 1, addr);
8033 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8034 "alignment problem");
8036 #ifdef DEBUG
8037 if (oop(addr)->klass() != NULL &&
8038 ( !_collector->should_unload_classes()
8039 || oop(addr)->is_parsable())) {
8040 // Ignore mark word because we are running concurrent with mutators
8041 assert(oop(addr)->is_oop(true), "live block should be an oop");
8042 assert(size ==
8043 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8044 "P-mark and computed size do not agree");
8045 }
8046 #endif
8048 } else {
8049 // This should be an initialized object that's alive.
8050 assert(oop(addr)->klass() != NULL &&
8051 (!_collector->should_unload_classes()
8052 || oop(addr)->is_parsable()),
8053 "Should be an initialized object");
8054 // Ignore mark word because we are running concurrent with mutators
8055 assert(oop(addr)->is_oop(true), "live block should be an oop");
8056 // Verify that the bit map has no bits marked between
8057 // addr and purported end of this block.
8058 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8059 assert(size >= 3, "Necessary for Printezis marks to work");
8060 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8061 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8062 }
8063 return size;
8064 }
8066 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8067 size_t chunkSize) {
8068 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8069 // scheme.
8070 bool fcInFreeLists = fc->isFree();
8071 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8072 assert((HeapWord*)fc <= _limit, "sweep invariant");
8073 if (CMSTestInFreeList && fcInFreeLists) {
8074 assert(_sp->verifyChunkInFreeLists(fc),
8075 "free chunk is not in free lists");
8076 }
8079 if (CMSTraceSweeper) {
8080 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8081 }
8083 HeapWord* addr = (HeapWord*) fc;
8085 bool coalesce;
8086 size_t left = pointer_delta(addr, freeFinger());
8087 size_t right = chunkSize;
8088 switch (FLSCoalescePolicy) {
8089 // numeric value forms a coalition aggressiveness metric
8090 case 0: { // never coalesce
8091 coalesce = false;
8092 break;
8093 }
8094 case 1: { // coalesce if left & right chunks on overpopulated lists
8095 coalesce = _sp->coalOverPopulated(left) &&
8096 _sp->coalOverPopulated(right);
8097 break;
8098 }
8099 case 2: { // coalesce if left chunk on overpopulated list (default)
8100 coalesce = _sp->coalOverPopulated(left);
8101 break;
8102 }
8103 case 3: { // coalesce if left OR right chunk on overpopulated list
8104 coalesce = _sp->coalOverPopulated(left) ||
8105 _sp->coalOverPopulated(right);
8106 break;
8107 }
8108 case 4: { // always coalesce
8109 coalesce = true;
8110 break;
8111 }
8112 default:
8113 ShouldNotReachHere();
8114 }
8116 // Should the current free range be coalesced?
8117 // If the chunk is in a free range and either we decided to coalesce above
8118 // or the chunk is near the large block at the end of the heap
8119 // (isNearLargestChunk() returns true), then coalesce this chunk.
8120 bool doCoalesce = inFreeRange() &&
8121 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8122 if (doCoalesce) {
8123 // Coalesce the current free range on the left with the new
8124 // chunk on the right. If either is on a free list,
8125 // it must be removed from the list and stashed in the closure.
8126 if (freeRangeInFreeLists()) {
8127 FreeChunk* ffc = (FreeChunk*)freeFinger();
8128 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8129 "Size of free range is inconsistent with chunk size.");
8130 if (CMSTestInFreeList) {
8131 assert(_sp->verifyChunkInFreeLists(ffc),
8132 "Chunk is not in free lists");
8133 }
8134 _sp->coalDeath(ffc->size());
8135 _sp->removeFreeChunkFromFreeLists(ffc);
8136 set_freeRangeInFreeLists(false);
8137 }
8138 if (fcInFreeLists) {
8139 _sp->coalDeath(chunkSize);
8140 assert(fc->size() == chunkSize,
8141 "The chunk has the wrong size or is not in the free lists");
8142 _sp->removeFreeChunkFromFreeLists(fc);
8143 }
8144 set_lastFreeRangeCoalesced(true);
8145 } else { // not in a free range and/or should not coalesce
8146 // Return the current free range and start a new one.
8147 if (inFreeRange()) {
8148 // In a free range but cannot coalesce with the right hand chunk.
8149 // Put the current free range into the free lists.
8150 flushCurFreeChunk(freeFinger(),
8151 pointer_delta(addr, freeFinger()));
8152 }
8153 // Set up for new free range. Pass along whether the right hand
8154 // chunk is in the free lists.
8155 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8156 }
8157 }
8158 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8159 assert(inFreeRange(), "Should only be called if currently in a free range.");
8160 assert(size > 0,
8161 "A zero sized chunk cannot be added to the free lists.");
8162 if (!freeRangeInFreeLists()) {
8163 if(CMSTestInFreeList) {
8164 FreeChunk* fc = (FreeChunk*) chunk;
8165 fc->setSize(size);
8166 assert(!_sp->verifyChunkInFreeLists(fc),
8167 "chunk should not be in free lists yet");
8168 }
8169 if (CMSTraceSweeper) {
8170 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8171 chunk, size);
8172 }
8173 // A new free range is going to be starting. The current
8174 // free range has not been added to the free lists yet or
8175 // was removed so add it back.
8176 // If the current free range was coalesced, then the death
8177 // of the free range was recorded. Record a birth now.
8178 if (lastFreeRangeCoalesced()) {
8179 _sp->coalBirth(size);
8180 }
8181 _sp->addChunkAndRepairOffsetTable(chunk, size,
8182 lastFreeRangeCoalesced());
8183 }
8184 set_inFreeRange(false);
8185 set_freeRangeInFreeLists(false);
8186 }
8188 // We take a break if we've been at this for a while,
8189 // so as to avoid monopolizing the locks involved.
8190 void SweepClosure::do_yield_work(HeapWord* addr) {
8191 // Return current free chunk being used for coalescing (if any)
8192 // to the appropriate freelist. After yielding, the next
8193 // free block encountered will start a coalescing range of
8194 // free blocks. If the next free block is adjacent to the
8195 // chunk just flushed, they will need to wait for the next
8196 // sweep to be coalesced.
8197 if (inFreeRange()) {
8198 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8199 }
8201 // First give up the locks, then yield, then re-lock.
8202 // We should probably use a constructor/destructor idiom to
8203 // do this unlock/lock or modify the MutexUnlocker class to
8204 // serve our purpose. XXX
8205 assert_lock_strong(_bitMap->lock());
8206 assert_lock_strong(_freelistLock);
8207 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8208 "CMS thread should hold CMS token");
8209 _bitMap->lock()->unlock();
8210 _freelistLock->unlock();
8211 ConcurrentMarkSweepThread::desynchronize(true);
8212 ConcurrentMarkSweepThread::acknowledge_yield_request();
8213 _collector->stopTimer();
8214 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8215 if (PrintCMSStatistics != 0) {
8216 _collector->incrementYields();
8217 }
8218 _collector->icms_wait();
8220 // See the comment in coordinator_yield()
8221 for (unsigned i = 0; i < CMSYieldSleepCount &&
8222 ConcurrentMarkSweepThread::should_yield() &&
8223 !CMSCollector::foregroundGCIsActive(); ++i) {
8224 os::sleep(Thread::current(), 1, false);
8225 ConcurrentMarkSweepThread::acknowledge_yield_request();
8226 }
8228 ConcurrentMarkSweepThread::synchronize(true);
8229 _freelistLock->lock();
8230 _bitMap->lock()->lock_without_safepoint_check();
8231 _collector->startTimer();
8232 }
8234 #ifndef PRODUCT
8235 // This is actually very useful in a product build if it can
8236 // be called from the debugger. Compile it into the product
8237 // as needed.
8238 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8239 return debug_cms_space->verifyChunkInFreeLists(fc);
8240 }
8242 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8243 if (CMSTraceSweeper) {
8244 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8245 }
8246 }
8247 #endif
8249 // CMSIsAliveClosure
8250 bool CMSIsAliveClosure::do_object_b(oop obj) {
8251 HeapWord* addr = (HeapWord*)obj;
8252 return addr != NULL &&
8253 (!_span.contains(addr) || _bit_map->isMarked(addr));
8254 }
8256 // CMSKeepAliveClosure: the serial version
8257 void CMSKeepAliveClosure::do_oop(oop obj) {
8258 HeapWord* addr = (HeapWord*)obj;
8259 if (_span.contains(addr) &&
8260 !_bit_map->isMarked(addr)) {
8261 _bit_map->mark(addr);
8262 bool simulate_overflow = false;
8263 NOT_PRODUCT(
8264 if (CMSMarkStackOverflowALot &&
8265 _collector->simulate_overflow()) {
8266 // simulate a stack overflow
8267 simulate_overflow = true;
8268 }
8269 )
8270 if (simulate_overflow || !_mark_stack->push(obj)) {
8271 _collector->push_on_overflow_list(obj);
8272 _collector->_ser_kac_ovflw++;
8273 }
8274 }
8275 }
8277 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8278 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8280 // CMSParKeepAliveClosure: a parallel version of the above.
8281 // The work queues are private to each closure (thread),
8282 // but (may be) available for stealing by other threads.
8283 void CMSParKeepAliveClosure::do_oop(oop obj) {
8284 HeapWord* addr = (HeapWord*)obj;
8285 if (_span.contains(addr) &&
8286 !_bit_map->isMarked(addr)) {
8287 // In general, during recursive tracing, several threads
8288 // may be concurrently getting here; the first one to
8289 // "tag" it, claims it.
8290 if (_bit_map->par_mark(addr)) {
8291 bool res = _work_queue->push(obj);
8292 assert(res, "Low water mark should be much less than capacity");
8293 // Do a recursive trim in the hope that this will keep
8294 // stack usage lower, but leave some oops for potential stealers
8295 trim_queue(_low_water_mark);
8296 } // Else, another thread got there first
8297 }
8298 }
8300 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8301 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8303 void CMSParKeepAliveClosure::trim_queue(uint max) {
8304 while (_work_queue->size() > max) {
8305 oop new_oop;
8306 if (_work_queue->pop_local(new_oop)) {
8307 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8308 assert(_bit_map->isMarked((HeapWord*)new_oop),
8309 "no white objects on this stack!");
8310 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8311 // iterate over the oops in this oop, marking and pushing
8312 // the ones in CMS heap (i.e. in _span).
8313 new_oop->oop_iterate(&_mark_and_push);
8314 }
8315 }
8316 }
8318 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8319 HeapWord* addr = (HeapWord*)obj;
8320 if (_span.contains(addr) &&
8321 !_bit_map->isMarked(addr)) {
8322 if (_bit_map->par_mark(addr)) {
8323 bool simulate_overflow = false;
8324 NOT_PRODUCT(
8325 if (CMSMarkStackOverflowALot &&
8326 _collector->par_simulate_overflow()) {
8327 // simulate a stack overflow
8328 simulate_overflow = true;
8329 }
8330 )
8331 if (simulate_overflow || !_work_queue->push(obj)) {
8332 _collector->par_push_on_overflow_list(obj);
8333 _collector->_par_kac_ovflw++;
8334 }
8335 } // Else another thread got there already
8336 }
8337 }
8339 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8340 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8342 //////////////////////////////////////////////////////////////////
8343 // CMSExpansionCause /////////////////////////////
8344 //////////////////////////////////////////////////////////////////
8345 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8346 switch (cause) {
8347 case _no_expansion:
8348 return "No expansion";
8349 case _satisfy_free_ratio:
8350 return "Free ratio";
8351 case _satisfy_promotion:
8352 return "Satisfy promotion";
8353 case _satisfy_allocation:
8354 return "allocation";
8355 case _allocate_par_lab:
8356 return "Par LAB";
8357 case _allocate_par_spooling_space:
8358 return "Par Spooling Space";
8359 case _adaptive_size_policy:
8360 return "Ergonomics";
8361 default:
8362 return "unknown";
8363 }
8364 }
8366 void CMSDrainMarkingStackClosure::do_void() {
8367 // the max number to take from overflow list at a time
8368 const size_t num = _mark_stack->capacity()/4;
8369 while (!_mark_stack->isEmpty() ||
8370 // if stack is empty, check the overflow list
8371 _collector->take_from_overflow_list(num, _mark_stack)) {
8372 oop obj = _mark_stack->pop();
8373 HeapWord* addr = (HeapWord*)obj;
8374 assert(_span.contains(addr), "Should be within span");
8375 assert(_bit_map->isMarked(addr), "Should be marked");
8376 assert(obj->is_oop(), "Should be an oop");
8377 obj->oop_iterate(_keep_alive);
8378 }
8379 }
8381 void CMSParDrainMarkingStackClosure::do_void() {
8382 // drain queue
8383 trim_queue(0);
8384 }
8386 // Trim our work_queue so its length is below max at return
8387 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8388 while (_work_queue->size() > max) {
8389 oop new_oop;
8390 if (_work_queue->pop_local(new_oop)) {
8391 assert(new_oop->is_oop(), "Expected an oop");
8392 assert(_bit_map->isMarked((HeapWord*)new_oop),
8393 "no white objects on this stack!");
8394 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8395 // iterate over the oops in this oop, marking and pushing
8396 // the ones in CMS heap (i.e. in _span).
8397 new_oop->oop_iterate(&_mark_and_push);
8398 }
8399 }
8400 }
8402 ////////////////////////////////////////////////////////////////////
8403 // Support for Marking Stack Overflow list handling and related code
8404 ////////////////////////////////////////////////////////////////////
8405 // Much of the following code is similar in shape and spirit to the
8406 // code used in ParNewGC. We should try and share that code
8407 // as much as possible in the future.
8409 #ifndef PRODUCT
8410 // Debugging support for CMSStackOverflowALot
8412 // It's OK to call this multi-threaded; the worst thing
8413 // that can happen is that we'll get a bunch of closely
8414 // spaced simulated oveflows, but that's OK, in fact
8415 // probably good as it would exercise the overflow code
8416 // under contention.
8417 bool CMSCollector::simulate_overflow() {
8418 if (_overflow_counter-- <= 0) { // just being defensive
8419 _overflow_counter = CMSMarkStackOverflowInterval;
8420 return true;
8421 } else {
8422 return false;
8423 }
8424 }
8426 bool CMSCollector::par_simulate_overflow() {
8427 return simulate_overflow();
8428 }
8429 #endif
8431 // Single-threaded
8432 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8433 assert(stack->isEmpty(), "Expected precondition");
8434 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8435 size_t i = num;
8436 oop cur = _overflow_list;
8437 const markOop proto = markOopDesc::prototype();
8438 NOT_PRODUCT(size_t n = 0;)
8439 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8440 next = oop(cur->mark());
8441 cur->set_mark(proto); // until proven otherwise
8442 assert(cur->is_oop(), "Should be an oop");
8443 bool res = stack->push(cur);
8444 assert(res, "Bit off more than can chew?");
8445 NOT_PRODUCT(n++;)
8446 }
8447 _overflow_list = cur;
8448 #ifndef PRODUCT
8449 assert(_num_par_pushes >= n, "Too many pops?");
8450 _num_par_pushes -=n;
8451 #endif
8452 return !stack->isEmpty();
8453 }
8455 // Multi-threaded; use CAS to break off a prefix
8456 bool CMSCollector::par_take_from_overflow_list(size_t num,
8457 OopTaskQueue* work_q) {
8458 assert(work_q->size() == 0, "That's the current policy");
8459 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8460 if (_overflow_list == NULL) {
8461 return false;
8462 }
8463 // Grab the entire list; we'll put back a suffix
8464 oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
8465 if (prefix == NULL) { // someone grabbed it before we did ...
8466 // ... we could spin for a short while, but for now we don't
8467 return false;
8468 }
8469 size_t i = num;
8470 oop cur = prefix;
8471 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8472 if (cur->mark() != NULL) {
8473 oop suffix_head = cur->mark(); // suffix will be put back on global list
8474 cur->set_mark(NULL); // break off suffix
8475 // Find tail of suffix so we can prepend suffix to global list
8476 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8477 oop suffix_tail = cur;
8478 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8479 "Tautology");
8480 oop observed_overflow_list = _overflow_list;
8481 do {
8482 cur = observed_overflow_list;
8483 suffix_tail->set_mark(markOop(cur));
8484 observed_overflow_list =
8485 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
8486 } while (cur != observed_overflow_list);
8487 }
8489 // Push the prefix elements on work_q
8490 assert(prefix != NULL, "control point invariant");
8491 const markOop proto = markOopDesc::prototype();
8492 oop next;
8493 NOT_PRODUCT(size_t n = 0;)
8494 for (cur = prefix; cur != NULL; cur = next) {
8495 next = oop(cur->mark());
8496 cur->set_mark(proto); // until proven otherwise
8497 assert(cur->is_oop(), "Should be an oop");
8498 bool res = work_q->push(cur);
8499 assert(res, "Bit off more than we can chew?");
8500 NOT_PRODUCT(n++;)
8501 }
8502 #ifndef PRODUCT
8503 assert(_num_par_pushes >= n, "Too many pops?");
8504 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8505 #endif
8506 return true;
8507 }
8509 // Single-threaded
8510 void CMSCollector::push_on_overflow_list(oop p) {
8511 NOT_PRODUCT(_num_par_pushes++;)
8512 assert(p->is_oop(), "Not an oop");
8513 preserve_mark_if_necessary(p);
8514 p->set_mark((markOop)_overflow_list);
8515 _overflow_list = p;
8516 }
8518 // Multi-threaded; use CAS to prepend to overflow list
8519 void CMSCollector::par_push_on_overflow_list(oop p) {
8520 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8521 assert(p->is_oop(), "Not an oop");
8522 par_preserve_mark_if_necessary(p);
8523 oop observed_overflow_list = _overflow_list;
8524 oop cur_overflow_list;
8525 do {
8526 cur_overflow_list = observed_overflow_list;
8527 p->set_mark(markOop(cur_overflow_list));
8528 observed_overflow_list =
8529 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8530 } while (cur_overflow_list != observed_overflow_list);
8531 }
8533 // Single threaded
8534 // General Note on GrowableArray: pushes may silently fail
8535 // because we are (temporarily) out of C-heap for expanding
8536 // the stack. The problem is quite ubiquitous and affects
8537 // a lot of code in the JVM. The prudent thing for GrowableArray
8538 // to do (for now) is to exit with an error. However, that may
8539 // be too draconian in some cases because the caller may be
8540 // able to recover without much harm. For suych cases, we
8541 // should probably introduce a "soft_push" method which returns
8542 // an indication of success or failure with the assumption that
8543 // the caller may be able to recover from a failure; code in
8544 // the VM can then be changed, incrementally, to deal with such
8545 // failures where possible, thus, incrementally hardening the VM
8546 // in such low resource situations.
8547 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8548 int PreserveMarkStackSize = 128;
8550 if (_preserved_oop_stack == NULL) {
8551 assert(_preserved_mark_stack == NULL,
8552 "bijection with preserved_oop_stack");
8553 // Allocate the stacks
8554 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8555 GrowableArray<oop>(PreserveMarkStackSize, true);
8556 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8557 GrowableArray<markOop>(PreserveMarkStackSize, true);
8558 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8559 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8560 "Preserved Mark/Oop Stack for CMS (C-heap)");
8561 }
8562 }
8563 _preserved_oop_stack->push(p);
8564 _preserved_mark_stack->push(m);
8565 assert(m == p->mark(), "Mark word changed");
8566 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8567 "bijection");
8568 }
8570 // Single threaded
8571 void CMSCollector::preserve_mark_if_necessary(oop p) {
8572 markOop m = p->mark();
8573 if (m->must_be_preserved(p)) {
8574 preserve_mark_work(p, m);
8575 }
8576 }
8578 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8579 markOop m = p->mark();
8580 if (m->must_be_preserved(p)) {
8581 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8582 // Even though we read the mark word without holding
8583 // the lock, we are assured that it will not change
8584 // because we "own" this oop, so no other thread can
8585 // be trying to push it on the overflow list; see
8586 // the assertion in preserve_mark_work() that checks
8587 // that m == p->mark().
8588 preserve_mark_work(p, m);
8589 }
8590 }
8592 // We should be able to do this multi-threaded,
8593 // a chunk of stack being a task (this is
8594 // correct because each oop only ever appears
8595 // once in the overflow list. However, it's
8596 // not very easy to completely overlap this with
8597 // other operations, so will generally not be done
8598 // until all work's been completed. Because we
8599 // expect the preserved oop stack (set) to be small,
8600 // it's probably fine to do this single-threaded.
8601 // We can explore cleverer concurrent/overlapped/parallel
8602 // processing of preserved marks if we feel the
8603 // need for this in the future. Stack overflow should
8604 // be so rare in practice and, when it happens, its
8605 // effect on performance so great that this will
8606 // likely just be in the noise anyway.
8607 void CMSCollector::restore_preserved_marks_if_any() {
8608 if (_preserved_oop_stack == NULL) {
8609 assert(_preserved_mark_stack == NULL,
8610 "bijection with preserved_oop_stack");
8611 return;
8612 }
8614 assert(SafepointSynchronize::is_at_safepoint(),
8615 "world should be stopped");
8616 assert(Thread::current()->is_ConcurrentGC_thread() ||
8617 Thread::current()->is_VM_thread(),
8618 "should be single-threaded");
8620 int length = _preserved_oop_stack->length();
8621 assert(_preserved_mark_stack->length() == length, "bijection");
8622 for (int i = 0; i < length; i++) {
8623 oop p = _preserved_oop_stack->at(i);
8624 assert(p->is_oop(), "Should be an oop");
8625 assert(_span.contains(p), "oop should be in _span");
8626 assert(p->mark() == markOopDesc::prototype(),
8627 "Set when taken from overflow list");
8628 markOop m = _preserved_mark_stack->at(i);
8629 p->set_mark(m);
8630 }
8631 _preserved_mark_stack->clear();
8632 _preserved_oop_stack->clear();
8633 assert(_preserved_mark_stack->is_empty() &&
8634 _preserved_oop_stack->is_empty(),
8635 "stacks were cleared above");
8636 }
8638 #ifndef PRODUCT
8639 bool CMSCollector::no_preserved_marks() const {
8640 return ( ( _preserved_mark_stack == NULL
8641 && _preserved_oop_stack == NULL)
8642 || ( _preserved_mark_stack->is_empty()
8643 && _preserved_oop_stack->is_empty()));
8644 }
8645 #endif
8647 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8648 {
8649 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8650 CMSAdaptiveSizePolicy* size_policy =
8651 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8652 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8653 "Wrong type for size policy");
8654 return size_policy;
8655 }
8657 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8658 size_t desired_promo_size) {
8659 if (cur_promo_size < desired_promo_size) {
8660 size_t expand_bytes = desired_promo_size - cur_promo_size;
8661 if (PrintAdaptiveSizePolicy && Verbose) {
8662 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8663 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8664 expand_bytes);
8665 }
8666 expand(expand_bytes,
8667 MinHeapDeltaBytes,
8668 CMSExpansionCause::_adaptive_size_policy);
8669 } else if (desired_promo_size < cur_promo_size) {
8670 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8671 if (PrintAdaptiveSizePolicy && Verbose) {
8672 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8673 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8674 shrink_bytes);
8675 }
8676 shrink(shrink_bytes);
8677 }
8678 }
8680 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8681 GenCollectedHeap* gch = GenCollectedHeap::heap();
8682 CMSGCAdaptivePolicyCounters* counters =
8683 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8684 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8685 "Wrong kind of counters");
8686 return counters;
8687 }
8690 void ASConcurrentMarkSweepGeneration::update_counters() {
8691 if (UsePerfData) {
8692 _space_counters->update_all();
8693 _gen_counters->update_all();
8694 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8695 GenCollectedHeap* gch = GenCollectedHeap::heap();
8696 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8697 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8698 "Wrong gc statistics type");
8699 counters->update_counters(gc_stats_l);
8700 }
8701 }
8703 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8704 if (UsePerfData) {
8705 _space_counters->update_used(used);
8706 _space_counters->update_capacity();
8707 _gen_counters->update_all();
8709 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8710 GenCollectedHeap* gch = GenCollectedHeap::heap();
8711 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8712 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8713 "Wrong gc statistics type");
8714 counters->update_counters(gc_stats_l);
8715 }
8716 }
8718 // The desired expansion delta is computed so that:
8719 // . desired free percentage or greater is used
8720 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8721 assert_locked_or_safepoint(Heap_lock);
8723 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8725 // If incremental collection failed, we just want to expand
8726 // to the limit.
8727 if (incremental_collection_failed()) {
8728 clear_incremental_collection_failed();
8729 grow_to_reserved();
8730 return;
8731 }
8733 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8735 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8736 "Wrong type of heap");
8737 int prev_level = level() - 1;
8738 assert(prev_level >= 0, "The cms generation is the lowest generation");
8739 Generation* prev_gen = gch->get_gen(prev_level);
8740 assert(prev_gen->kind() == Generation::ASParNew,
8741 "Wrong type of young generation");
8742 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8743 size_t cur_eden = younger_gen->eden()->capacity();
8744 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8745 size_t cur_promo = free();
8746 size_policy->compute_tenured_generation_free_space(cur_promo,
8747 max_available(),
8748 cur_eden);
8749 resize(cur_promo, size_policy->promo_size());
8751 // Record the new size of the space in the cms generation
8752 // that is available for promotions. This is temporary.
8753 // It should be the desired promo size.
8754 size_policy->avg_cms_promo()->sample(free());
8755 size_policy->avg_old_live()->sample(used());
8757 if (UsePerfData) {
8758 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8759 counters->update_cms_capacity_counter(capacity());
8760 }
8761 }
8763 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
8764 assert_locked_or_safepoint(Heap_lock);
8765 assert_lock_strong(freelistLock());
8766 HeapWord* old_end = _cmsSpace->end();
8767 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
8768 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
8769 FreeChunk* chunk_at_end = find_chunk_at_end();
8770 if (chunk_at_end == NULL) {
8771 // No room to shrink
8772 if (PrintGCDetails && Verbose) {
8773 gclog_or_tty->print_cr("No room to shrink: old_end "
8774 PTR_FORMAT " unallocated_start " PTR_FORMAT
8775 " chunk_at_end " PTR_FORMAT,
8776 old_end, unallocated_start, chunk_at_end);
8777 }
8778 return;
8779 } else {
8781 // Find the chunk at the end of the space and determine
8782 // how much it can be shrunk.
8783 size_t shrinkable_size_in_bytes = chunk_at_end->size();
8784 size_t aligned_shrinkable_size_in_bytes =
8785 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
8786 assert(unallocated_start <= chunk_at_end->end(),
8787 "Inconsistent chunk at end of space");
8788 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
8789 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
8791 // Shrink the underlying space
8792 _virtual_space.shrink_by(bytes);
8793 if (PrintGCDetails && Verbose) {
8794 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
8795 " desired_bytes " SIZE_FORMAT
8796 " shrinkable_size_in_bytes " SIZE_FORMAT
8797 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
8798 " bytes " SIZE_FORMAT,
8799 desired_bytes, shrinkable_size_in_bytes,
8800 aligned_shrinkable_size_in_bytes, bytes);
8801 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
8802 " unallocated_start " SIZE_FORMAT,
8803 old_end, unallocated_start);
8804 }
8806 // If the space did shrink (shrinking is not guaranteed),
8807 // shrink the chunk at the end by the appropriate amount.
8808 if (((HeapWord*)_virtual_space.high()) < old_end) {
8809 size_t new_word_size =
8810 heap_word_size(_virtual_space.committed_size());
8812 // Have to remove the chunk from the dictionary because it is changing
8813 // size and might be someplace elsewhere in the dictionary.
8815 // Get the chunk at end, shrink it, and put it
8816 // back.
8817 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
8818 size_t word_size_change = word_size_before - new_word_size;
8819 size_t chunk_at_end_old_size = chunk_at_end->size();
8820 assert(chunk_at_end_old_size >= word_size_change,
8821 "Shrink is too large");
8822 chunk_at_end->setSize(chunk_at_end_old_size -
8823 word_size_change);
8824 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
8825 word_size_change);
8827 _cmsSpace->returnChunkToDictionary(chunk_at_end);
8829 MemRegion mr(_cmsSpace->bottom(), new_word_size);
8830 _bts->resize(new_word_size); // resize the block offset shared array
8831 Universe::heap()->barrier_set()->resize_covered_region(mr);
8832 _cmsSpace->assert_locked();
8833 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
8835 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
8837 // update the space and generation capacity counters
8838 if (UsePerfData) {
8839 _space_counters->update_capacity();
8840 _gen_counters->update_all();
8841 }
8843 if (Verbose && PrintGCDetails) {
8844 size_t new_mem_size = _virtual_space.committed_size();
8845 size_t old_mem_size = new_mem_size + bytes;
8846 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
8847 name(), old_mem_size/K, bytes/K, new_mem_size/K);
8848 }
8849 }
8851 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
8852 "Inconsistency at end of space");
8853 assert(chunk_at_end->end() == _cmsSpace->end(),
8854 "Shrinking is inconsistent");
8855 return;
8856 }
8857 }
8859 // Transfer some number of overflown objects to usual marking
8860 // stack. Return true if some objects were transferred.
8861 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8862 size_t num = MIN2((size_t)_mark_stack->capacity()/4,
8863 (size_t)ParGCDesiredObjsFromOverflowList);
8865 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8866 assert(_collector->overflow_list_is_empty() || res,
8867 "If list is not empty, we should have taken something");
8868 assert(!res || !_mark_stack->isEmpty(),
8869 "If we took something, it should now be on our stack");
8870 return res;
8871 }
8873 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8874 size_t res = _sp->block_size_no_stall(addr, _collector);
8875 assert(res != 0, "Should always be able to compute a size");
8876 if (_sp->block_is_obj(addr)) {
8877 if (_live_bit_map->isMarked(addr)) {
8878 // It can't have been dead in a previous cycle
8879 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8880 } else {
8881 _dead_bit_map->mark(addr); // mark the dead object
8882 }
8883 }
8884 return res;
8885 }