Fri, 09 May 2008 08:55:13 -0700
6670684: 4/5 SA command universe did not print out CMS space information
Summary: Forward port of Yumin's fix for 6670684 from HSX-11; Yumin verified the port was correct.
Reviewed-by: dcubed
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
194 "Offset of FreeChunk::_prev within FreeChunk must match"
195 " that of OopDesc::_klass within OopDesc");
196 )
197 if (ParallelGCThreads > 0) {
198 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
199 _par_gc_thread_states =
200 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
201 if (_par_gc_thread_states == NULL) {
202 vm_exit_during_initialization("Could not allocate par gc structs");
203 }
204 for (uint i = 0; i < ParallelGCThreads; i++) {
205 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
206 if (_par_gc_thread_states[i] == NULL) {
207 vm_exit_during_initialization("Could not allocate par gc structs");
208 }
209 }
210 } else {
211 _par_gc_thread_states = NULL;
212 }
213 _incremental_collection_failed = false;
214 // The "dilatation_factor" is the expansion that can occur on
215 // account of the fact that the minimum object size in the CMS
216 // generation may be larger than that in, say, a contiguous young
217 // generation.
218 // Ideally, in the calculation below, we'd compute the dilatation
219 // factor as: MinChunkSize/(promoting_gen's min object size)
220 // Since we do not have such a general query interface for the
221 // promoting generation, we'll instead just use the mimimum
222 // object size (which today is a header's worth of space);
223 // note that all arithmetic is in units of HeapWords.
224 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
225 assert(_dilatation_factor >= 1.0, "from previous assert");
226 }
228 void ConcurrentMarkSweepGeneration::ref_processor_init() {
229 assert(collector() != NULL, "no collector");
230 collector()->ref_processor_init();
231 }
233 void CMSCollector::ref_processor_init() {
234 if (_ref_processor == NULL) {
235 // Allocate and initialize a reference processor
236 _ref_processor = ReferenceProcessor::create_ref_processor(
237 _span, // span
238 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
239 _cmsGen->refs_discovery_is_mt(), // mt_discovery
240 &_is_alive_closure,
241 ParallelGCThreads,
242 ParallelRefProcEnabled);
243 // Initialize the _ref_processor field of CMSGen
244 _cmsGen->set_ref_processor(_ref_processor);
246 // Allocate a dummy ref processor for perm gen.
247 ReferenceProcessor* rp2 = new ReferenceProcessor();
248 if (rp2 == NULL) {
249 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
250 }
251 _permGen->set_ref_processor(rp2);
252 }
253 }
255 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
256 GenCollectedHeap* gch = GenCollectedHeap::heap();
257 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
258 "Wrong type of heap");
259 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
260 gch->gen_policy()->size_policy();
261 assert(sp->is_gc_cms_adaptive_size_policy(),
262 "Wrong type of size policy");
263 return sp;
264 }
266 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
267 CMSGCAdaptivePolicyCounters* results =
268 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
269 assert(
270 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
271 "Wrong gc policy counter kind");
272 return results;
273 }
276 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
278 const char* gen_name = "old";
280 // Generation Counters - generation 1, 1 subspace
281 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
283 _space_counters = new GSpaceCounters(gen_name, 0,
284 _virtual_space.reserved_size(),
285 this, _gen_counters);
286 }
288 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
289 _cms_gen(cms_gen)
290 {
291 assert(alpha <= 100, "bad value");
292 _saved_alpha = alpha;
294 // Initialize the alphas to the bootstrap value of 100.
295 _gc0_alpha = _cms_alpha = 100;
297 _cms_begin_time.update();
298 _cms_end_time.update();
300 _gc0_duration = 0.0;
301 _gc0_period = 0.0;
302 _gc0_promoted = 0;
304 _cms_duration = 0.0;
305 _cms_period = 0.0;
306 _cms_allocated = 0;
308 _cms_used_at_gc0_begin = 0;
309 _cms_used_at_gc0_end = 0;
310 _allow_duty_cycle_reduction = false;
311 _valid_bits = 0;
312 _icms_duty_cycle = CMSIncrementalDutyCycle;
313 }
315 // If promotion failure handling is on use
316 // the padded average size of the promotion for each
317 // young generation collection.
318 double CMSStats::time_until_cms_gen_full() const {
319 size_t cms_free = _cms_gen->cmsSpace()->free();
320 GenCollectedHeap* gch = GenCollectedHeap::heap();
321 size_t expected_promotion = gch->get_gen(0)->capacity();
322 if (HandlePromotionFailure) {
323 expected_promotion = MIN2(
324 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
325 expected_promotion);
326 }
327 if (cms_free > expected_promotion) {
328 // Start a cms collection if there isn't enough space to promote
329 // for the next minor collection. Use the padded average as
330 // a safety factor.
331 cms_free -= expected_promotion;
333 // Adjust by the safety factor.
334 double cms_free_dbl = (double)cms_free;
335 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
337 if (PrintGCDetails && Verbose) {
338 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
339 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
340 cms_free, expected_promotion);
341 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
342 cms_free_dbl, cms_consumption_rate() + 1.0);
343 }
344 // Add 1 in case the consumption rate goes to zero.
345 return cms_free_dbl / (cms_consumption_rate() + 1.0);
346 }
347 return 0.0;
348 }
350 // Compare the duration of the cms collection to the
351 // time remaining before the cms generation is empty.
352 // Note that the time from the start of the cms collection
353 // to the start of the cms sweep (less than the total
354 // duration of the cms collection) can be used. This
355 // has been tried and some applications experienced
356 // promotion failures early in execution. This was
357 // possibly because the averages were not accurate
358 // enough at the beginning.
359 double CMSStats::time_until_cms_start() const {
360 // We add "gc0_period" to the "work" calculation
361 // below because this query is done (mostly) at the
362 // end of a scavenge, so we need to conservatively
363 // account for that much possible delay
364 // in the query so as to avoid concurrent mode failures
365 // due to starting the collection just a wee bit too
366 // late.
367 double work = cms_duration() + gc0_period();
368 double deadline = time_until_cms_gen_full();
369 if (work > deadline) {
370 if (Verbose && PrintGCDetails) {
371 gclog_or_tty->print(
372 " CMSCollector: collect because of anticipated promotion "
373 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
374 gc0_period(), time_until_cms_gen_full());
375 }
376 return 0.0;
377 }
378 return work - deadline;
379 }
381 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
382 // amount of change to prevent wild oscillation.
383 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
384 unsigned int new_duty_cycle) {
385 assert(old_duty_cycle <= 100, "bad input value");
386 assert(new_duty_cycle <= 100, "bad input value");
388 // Note: use subtraction with caution since it may underflow (values are
389 // unsigned). Addition is safe since we're in the range 0-100.
390 unsigned int damped_duty_cycle = new_duty_cycle;
391 if (new_duty_cycle < old_duty_cycle) {
392 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
393 if (new_duty_cycle + largest_delta < old_duty_cycle) {
394 damped_duty_cycle = old_duty_cycle - largest_delta;
395 }
396 } else if (new_duty_cycle > old_duty_cycle) {
397 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
398 if (new_duty_cycle > old_duty_cycle + largest_delta) {
399 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
400 }
401 }
402 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
404 if (CMSTraceIncrementalPacing) {
405 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
406 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
407 }
408 return damped_duty_cycle;
409 }
411 unsigned int CMSStats::icms_update_duty_cycle_impl() {
412 assert(CMSIncrementalPacing && valid(),
413 "should be handled in icms_update_duty_cycle()");
415 double cms_time_so_far = cms_timer().seconds();
416 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
417 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
419 // Avoid division by 0.
420 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
421 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
423 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
424 if (new_duty_cycle > _icms_duty_cycle) {
425 // Avoid very small duty cycles (1 or 2); 0 is allowed.
426 if (new_duty_cycle > 2) {
427 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
428 new_duty_cycle);
429 }
430 } else if (_allow_duty_cycle_reduction) {
431 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
432 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
433 // Respect the minimum duty cycle.
434 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
435 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
436 }
438 if (PrintGCDetails || CMSTraceIncrementalPacing) {
439 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
440 }
442 _allow_duty_cycle_reduction = false;
443 return _icms_duty_cycle;
444 }
446 #ifndef PRODUCT
447 void CMSStats::print_on(outputStream *st) const {
448 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
449 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
450 gc0_duration(), gc0_period(), gc0_promoted());
451 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
452 cms_duration(), cms_duration_per_mb(),
453 cms_period(), cms_allocated());
454 st->print(",cms_since_beg=%g,cms_since_end=%g",
455 cms_time_since_begin(), cms_time_since_end());
456 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
457 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
458 if (CMSIncrementalMode) {
459 st->print(",dc=%d", icms_duty_cycle());
460 }
462 if (valid()) {
463 st->print(",promo_rate=%g,cms_alloc_rate=%g",
464 promotion_rate(), cms_allocation_rate());
465 st->print(",cms_consumption_rate=%g,time_until_full=%g",
466 cms_consumption_rate(), time_until_cms_gen_full());
467 }
468 st->print(" ");
469 }
470 #endif // #ifndef PRODUCT
472 CMSCollector::CollectorState CMSCollector::_collectorState =
473 CMSCollector::Idling;
474 bool CMSCollector::_foregroundGCIsActive = false;
475 bool CMSCollector::_foregroundGCShouldWait = false;
477 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
478 ConcurrentMarkSweepGeneration* permGen,
479 CardTableRS* ct,
480 ConcurrentMarkSweepPolicy* cp):
481 _cmsGen(cmsGen),
482 _permGen(permGen),
483 _ct(ct),
484 _ref_processor(NULL), // will be set later
485 _conc_workers(NULL), // may be set later
486 _abort_preclean(false),
487 _start_sampling(false),
488 _between_prologue_and_epilogue(false),
489 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
490 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
491 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
492 -1 /* lock-free */, "No_lock" /* dummy */),
493 _modUnionClosure(&_modUnionTable),
494 _modUnionClosurePar(&_modUnionTable),
495 _is_alive_closure(&_markBitMap),
496 _restart_addr(NULL),
497 _overflow_list(NULL),
498 _preserved_oop_stack(NULL),
499 _preserved_mark_stack(NULL),
500 _stats(cmsGen),
501 _eden_chunk_array(NULL), // may be set in ctor body
502 _eden_chunk_capacity(0), // -- ditto --
503 _eden_chunk_index(0), // -- ditto --
504 _survivor_plab_array(NULL), // -- ditto --
505 _survivor_chunk_array(NULL), // -- ditto --
506 _survivor_chunk_capacity(0), // -- ditto --
507 _survivor_chunk_index(0), // -- ditto --
508 _ser_pmc_preclean_ovflw(0),
509 _ser_pmc_remark_ovflw(0),
510 _par_pmc_remark_ovflw(0),
511 _ser_kac_ovflw(0),
512 _par_kac_ovflw(0),
513 #ifndef PRODUCT
514 _num_par_pushes(0),
515 #endif
516 _collection_count_start(0),
517 _verifying(false),
518 _icms_start_limit(NULL),
519 _icms_stop_limit(NULL),
520 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
521 _completed_initialization(false),
522 _collector_policy(cp),
523 _unload_classes(false),
524 _unloaded_classes_last_cycle(false),
525 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
526 {
527 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
528 ExplicitGCInvokesConcurrent = true;
529 }
530 // Now expand the span and allocate the collection support structures
531 // (MUT, marking bit map etc.) to cover both generations subject to
532 // collection.
534 // First check that _permGen is adjacent to _cmsGen and above it.
535 assert( _cmsGen->reserved().word_size() > 0
536 && _permGen->reserved().word_size() > 0,
537 "generations should not be of zero size");
538 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
539 "_cmsGen and _permGen should not overlap");
540 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
541 "_cmsGen->end() different from _permGen->start()");
543 // For use by dirty card to oop closures.
544 _cmsGen->cmsSpace()->set_collector(this);
545 _permGen->cmsSpace()->set_collector(this);
547 // Adjust my span to cover old (cms) gen and perm gen
548 _span = _cmsGen->reserved()._union(_permGen->reserved());
549 // Initialize the span of is_alive_closure
550 _is_alive_closure.set_span(_span);
552 // Allocate MUT and marking bit map
553 {
554 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
555 if (!_markBitMap.allocate(_span)) {
556 warning("Failed to allocate CMS Bit Map");
557 return;
558 }
559 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
560 }
561 {
562 _modUnionTable.allocate(_span);
563 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
564 }
566 if (!_markStack.allocate(CMSMarkStackSize)) {
567 warning("Failed to allocate CMS Marking Stack");
568 return;
569 }
570 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
571 warning("Failed to allocate CMS Revisit Stack");
572 return;
573 }
575 // Support for multi-threaded concurrent phases
576 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
577 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
578 // just for now
579 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
580 }
581 if (ParallelCMSThreads > 1) {
582 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
583 ParallelCMSThreads, true);
584 if (_conc_workers == NULL) {
585 warning("GC/CMS: _conc_workers allocation failure: "
586 "forcing -CMSConcurrentMTEnabled");
587 CMSConcurrentMTEnabled = false;
588 }
589 } else {
590 CMSConcurrentMTEnabled = false;
591 }
592 }
593 if (!CMSConcurrentMTEnabled) {
594 ParallelCMSThreads = 0;
595 } else {
596 // Turn off CMSCleanOnEnter optimization temporarily for
597 // the MT case where it's not fixed yet; see 6178663.
598 CMSCleanOnEnter = false;
599 }
600 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
601 "Inconsistency");
603 // Parallel task queues; these are shared for the
604 // concurrent and stop-world phases of CMS, but
605 // are not shared with parallel scavenge (ParNew).
606 {
607 uint i;
608 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
610 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
611 || ParallelRefProcEnabled)
612 && num_queues > 0) {
613 _task_queues = new OopTaskQueueSet(num_queues);
614 if (_task_queues == NULL) {
615 warning("task_queues allocation failure.");
616 return;
617 }
618 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
619 if (_hash_seed == NULL) {
620 warning("_hash_seed array allocation failure");
621 return;
622 }
624 // XXX use a global constant instead of 64!
625 typedef struct OopTaskQueuePadded {
626 OopTaskQueue work_queue;
627 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
628 } OopTaskQueuePadded;
630 for (i = 0; i < num_queues; i++) {
631 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
632 if (q_padded == NULL) {
633 warning("work_queue allocation failure.");
634 return;
635 }
636 _task_queues->register_queue(i, &q_padded->work_queue);
637 }
638 for (i = 0; i < num_queues; i++) {
639 _task_queues->queue(i)->initialize();
640 _hash_seed[i] = 17; // copied from ParNew
641 }
642 }
643 }
645 // "initiatingOccupancy" is the occupancy ratio at which we trigger
646 // a new collection cycle. Unless explicitly specified via
647 // CMSTriggerRatio, it is calculated by:
648 // Let "f" be MinHeapFreeRatio in
649 //
650 // intiatingOccupancy = 100-f +
651 // f * (CMSTriggerRatio/100)
652 // That is, if we assume the heap is at its desired maximum occupancy at the
653 // end of a collection, we let CMSTriggerRatio of the (purported) free
654 // space be allocated before initiating a new collection cycle.
655 if (CMSInitiatingOccupancyFraction > 0) {
656 _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
657 } else {
658 _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
659 (double)(CMSTriggerRatio *
660 MinHeapFreeRatio) / 100.0)
661 / 100.0;
662 }
663 // Clip CMSBootstrapOccupancy between 0 and 100.
664 _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
665 /(double)100;
667 _full_gcs_since_conc_gc = 0;
669 // Now tell CMS generations the identity of their collector
670 ConcurrentMarkSweepGeneration::set_collector(this);
672 // Create & start a CMS thread for this CMS collector
673 _cmsThread = ConcurrentMarkSweepThread::start(this);
674 assert(cmsThread() != NULL, "CMS Thread should have been created");
675 assert(cmsThread()->collector() == this,
676 "CMS Thread should refer to this gen");
677 assert(CGC_lock != NULL, "Where's the CGC_lock?");
679 // Support for parallelizing young gen rescan
680 GenCollectedHeap* gch = GenCollectedHeap::heap();
681 _young_gen = gch->prev_gen(_cmsGen);
682 if (gch->supports_inline_contig_alloc()) {
683 _top_addr = gch->top_addr();
684 _end_addr = gch->end_addr();
685 assert(_young_gen != NULL, "no _young_gen");
686 _eden_chunk_index = 0;
687 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
688 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
689 if (_eden_chunk_array == NULL) {
690 _eden_chunk_capacity = 0;
691 warning("GC/CMS: _eden_chunk_array allocation failure");
692 }
693 }
694 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
696 // Support for parallelizing survivor space rescan
697 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
698 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
699 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
700 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
701 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
702 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
703 || _cursor == NULL) {
704 warning("Failed to allocate survivor plab/chunk array");
705 if (_survivor_plab_array != NULL) {
706 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
707 _survivor_plab_array = NULL;
708 }
709 if (_survivor_chunk_array != NULL) {
710 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
711 _survivor_chunk_array = NULL;
712 }
713 if (_cursor != NULL) {
714 FREE_C_HEAP_ARRAY(size_t, _cursor);
715 _cursor = NULL;
716 }
717 } else {
718 _survivor_chunk_capacity = 2*max_plab_samples;
719 for (uint i = 0; i < ParallelGCThreads; i++) {
720 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
721 if (vec == NULL) {
722 warning("Failed to allocate survivor plab array");
723 for (int j = i; j > 0; j--) {
724 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
725 }
726 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
727 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
728 _survivor_plab_array = NULL;
729 _survivor_chunk_array = NULL;
730 _survivor_chunk_capacity = 0;
731 break;
732 } else {
733 ChunkArray* cur =
734 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
735 max_plab_samples);
736 assert(cur->end() == 0, "Should be 0");
737 assert(cur->array() == vec, "Should be vec");
738 assert(cur->capacity() == max_plab_samples, "Error");
739 }
740 }
741 }
742 }
743 assert( ( _survivor_plab_array != NULL
744 && _survivor_chunk_array != NULL)
745 || ( _survivor_chunk_capacity == 0
746 && _survivor_chunk_index == 0),
747 "Error");
749 // Choose what strong roots should be scanned depending on verification options
750 // and perm gen collection mode.
751 if (!CMSClassUnloadingEnabled) {
752 // If class unloading is disabled we want to include all classes into the root set.
753 add_root_scanning_option(SharedHeap::SO_AllClasses);
754 } else {
755 add_root_scanning_option(SharedHeap::SO_SystemClasses);
756 }
758 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
759 _gc_counters = new CollectorCounters("CMS", 1);
760 _completed_initialization = true;
761 _sweep_timer.start(); // start of time
762 }
764 const char* ConcurrentMarkSweepGeneration::name() const {
765 return "concurrent mark-sweep generation";
766 }
767 void ConcurrentMarkSweepGeneration::update_counters() {
768 if (UsePerfData) {
769 _space_counters->update_all();
770 _gen_counters->update_all();
771 }
772 }
774 // this is an optimized version of update_counters(). it takes the
775 // used value as a parameter rather than computing it.
776 //
777 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
778 if (UsePerfData) {
779 _space_counters->update_used(used);
780 _space_counters->update_capacity();
781 _gen_counters->update_all();
782 }
783 }
785 void ConcurrentMarkSweepGeneration::print() const {
786 Generation::print();
787 cmsSpace()->print();
788 }
790 #ifndef PRODUCT
791 void ConcurrentMarkSweepGeneration::print_statistics() {
792 cmsSpace()->printFLCensus(0);
793 }
794 #endif
796 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
797 GenCollectedHeap* gch = GenCollectedHeap::heap();
798 if (PrintGCDetails) {
799 if (Verbose) {
800 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
801 level(), short_name(), s, used(), capacity());
802 } else {
803 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
804 level(), short_name(), s, used() / K, capacity() / K);
805 }
806 }
807 if (Verbose) {
808 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
809 gch->used(), gch->capacity());
810 } else {
811 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
812 gch->used() / K, gch->capacity() / K);
813 }
814 }
816 size_t
817 ConcurrentMarkSweepGeneration::contiguous_available() const {
818 // dld proposes an improvement in precision here. If the committed
819 // part of the space ends in a free block we should add that to
820 // uncommitted size in the calculation below. Will make this
821 // change later, staying with the approximation below for the
822 // time being. -- ysr.
823 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
824 }
826 size_t
827 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
828 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
829 }
831 size_t ConcurrentMarkSweepGeneration::max_available() const {
832 return free() + _virtual_space.uncommitted_size();
833 }
835 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
836 size_t max_promotion_in_bytes,
837 bool younger_handles_promotion_failure) const {
839 // This is the most conservative test. Full promotion is
840 // guaranteed if this is used. The multiplicative factor is to
841 // account for the worst case "dilatation".
842 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
843 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
844 adjusted_max_promo_bytes = (double)max_uintx;
845 }
846 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
848 if (younger_handles_promotion_failure && !result) {
849 // Full promotion is not guaranteed because fragmentation
850 // of the cms generation can prevent the full promotion.
851 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
853 if (!result) {
854 // With promotion failure handling the test for the ability
855 // to support the promotion does not have to be guaranteed.
856 // Use an average of the amount promoted.
857 result = max_available() >= (size_t)
858 gc_stats()->avg_promoted()->padded_average();
859 if (PrintGC && Verbose && result) {
860 gclog_or_tty->print_cr(
861 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
862 " max_available: " SIZE_FORMAT
863 " avg_promoted: " SIZE_FORMAT,
864 max_available(), (size_t)
865 gc_stats()->avg_promoted()->padded_average());
866 }
867 } else {
868 if (PrintGC && Verbose) {
869 gclog_or_tty->print_cr(
870 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
871 " max_available: " SIZE_FORMAT
872 " adj_max_promo_bytes: " SIZE_FORMAT,
873 max_available(), (size_t)adjusted_max_promo_bytes);
874 }
875 }
876 } else {
877 if (PrintGC && Verbose) {
878 gclog_or_tty->print_cr(
879 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
880 " contiguous_available: " SIZE_FORMAT
881 " adj_max_promo_bytes: " SIZE_FORMAT,
882 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
883 }
884 }
885 return result;
886 }
888 CompactibleSpace*
889 ConcurrentMarkSweepGeneration::first_compaction_space() const {
890 return _cmsSpace;
891 }
893 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
894 // Clear the promotion information. These pointers can be adjusted
895 // along with all the other pointers into the heap but
896 // compaction is expected to be a rare event with
897 // a heap using cms so don't do it without seeing the need.
898 if (ParallelGCThreads > 0) {
899 for (uint i = 0; i < ParallelGCThreads; i++) {
900 _par_gc_thread_states[i]->promo.reset();
901 }
902 }
903 }
905 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
906 blk->do_space(_cmsSpace);
907 }
909 void ConcurrentMarkSweepGeneration::compute_new_size() {
910 assert_locked_or_safepoint(Heap_lock);
912 // If incremental collection failed, we just want to expand
913 // to the limit.
914 if (incremental_collection_failed()) {
915 clear_incremental_collection_failed();
916 grow_to_reserved();
917 return;
918 }
920 size_t expand_bytes = 0;
921 double free_percentage = ((double) free()) / capacity();
922 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
923 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
925 // compute expansion delta needed for reaching desired free percentage
926 if (free_percentage < desired_free_percentage) {
927 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
928 assert(desired_capacity >= capacity(), "invalid expansion size");
929 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
930 }
931 if (expand_bytes > 0) {
932 if (PrintGCDetails && Verbose) {
933 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
934 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
935 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
936 gclog_or_tty->print_cr(" Desired free fraction %f",
937 desired_free_percentage);
938 gclog_or_tty->print_cr(" Maximum free fraction %f",
939 maximum_free_percentage);
940 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
941 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
942 desired_capacity/1000);
943 int prev_level = level() - 1;
944 if (prev_level >= 0) {
945 size_t prev_size = 0;
946 GenCollectedHeap* gch = GenCollectedHeap::heap();
947 Generation* prev_gen = gch->_gens[prev_level];
948 prev_size = prev_gen->capacity();
949 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
950 prev_size/1000);
951 }
952 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
953 unsafe_max_alloc_nogc()/1000);
954 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
955 contiguous_available()/1000);
956 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
957 expand_bytes);
958 }
959 // safe if expansion fails
960 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
961 if (PrintGCDetails && Verbose) {
962 gclog_or_tty->print_cr(" Expanded free fraction %f",
963 ((double) free()) / capacity());
964 }
965 }
966 }
968 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
969 return cmsSpace()->freelistLock();
970 }
972 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
973 bool tlab) {
974 CMSSynchronousYieldRequest yr;
975 MutexLockerEx x(freelistLock(),
976 Mutex::_no_safepoint_check_flag);
977 return have_lock_and_allocate(size, tlab);
978 }
980 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
981 bool tlab) {
982 assert_lock_strong(freelistLock());
983 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
984 HeapWord* res = cmsSpace()->allocate(adjustedSize);
985 // Allocate the object live (grey) if the background collector has
986 // started marking. This is necessary because the marker may
987 // have passed this address and consequently this object will
988 // not otherwise be greyed and would be incorrectly swept up.
989 // Note that if this object contains references, the writing
990 // of those references will dirty the card containing this object
991 // allowing the object to be blackened (and its references scanned)
992 // either during a preclean phase or at the final checkpoint.
993 if (res != NULL) {
994 collector()->direct_allocated(res, adjustedSize);
995 _direct_allocated_words += adjustedSize;
996 // allocation counters
997 NOT_PRODUCT(
998 _numObjectsAllocated++;
999 _numWordsAllocated += (int)adjustedSize;
1000 )
1001 }
1002 return res;
1003 }
1005 // In the case of direct allocation by mutators in a generation that
1006 // is being concurrently collected, the object must be allocated
1007 // live (grey) if the background collector has started marking.
1008 // This is necessary because the marker may
1009 // have passed this address and consequently this object will
1010 // not otherwise be greyed and would be incorrectly swept up.
1011 // Note that if this object contains references, the writing
1012 // of those references will dirty the card containing this object
1013 // allowing the object to be blackened (and its references scanned)
1014 // either during a preclean phase or at the final checkpoint.
1015 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1016 assert(_markBitMap.covers(start, size), "Out of bounds");
1017 if (_collectorState >= Marking) {
1018 MutexLockerEx y(_markBitMap.lock(),
1019 Mutex::_no_safepoint_check_flag);
1020 // [see comments preceding SweepClosure::do_blk() below for details]
1021 // 1. need to mark the object as live so it isn't collected
1022 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1023 // 3. need to mark the end of the object so sweeper can skip over it
1024 // if it's uninitialized when the sweeper reaches it.
1025 _markBitMap.mark(start); // object is live
1026 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1027 _markBitMap.mark(start + size - 1);
1028 // mark end of object
1029 }
1030 // check that oop looks uninitialized
1031 assert(oop(start)->klass() == NULL, "_klass should be NULL");
1032 }
1034 void CMSCollector::promoted(bool par, HeapWord* start,
1035 bool is_obj_array, size_t obj_size) {
1036 assert(_markBitMap.covers(start), "Out of bounds");
1037 // See comment in direct_allocated() about when objects should
1038 // be allocated live.
1039 if (_collectorState >= Marking) {
1040 // we already hold the marking bit map lock, taken in
1041 // the prologue
1042 if (par) {
1043 _markBitMap.par_mark(start);
1044 } else {
1045 _markBitMap.mark(start);
1046 }
1047 // We don't need to mark the object as uninitialized (as
1048 // in direct_allocated above) because this is being done with the
1049 // world stopped and the object will be initialized by the
1050 // time the sweeper gets to look at it.
1051 assert(SafepointSynchronize::is_at_safepoint(),
1052 "expect promotion only at safepoints");
1054 if (_collectorState < Sweeping) {
1055 // Mark the appropriate cards in the modUnionTable, so that
1056 // this object gets scanned before the sweep. If this is
1057 // not done, CMS generation references in the object might
1058 // not get marked.
1059 // For the case of arrays, which are otherwise precisely
1060 // marked, we need to dirty the entire array, not just its head.
1061 if (is_obj_array) {
1062 // The [par_]mark_range() method expects mr.end() below to
1063 // be aligned to the granularity of a bit's representation
1064 // in the heap. In the case of the MUT below, that's a
1065 // card size.
1066 MemRegion mr(start,
1067 (HeapWord*)round_to((intptr_t)(start + obj_size),
1068 CardTableModRefBS::card_size /* bytes */));
1069 if (par) {
1070 _modUnionTable.par_mark_range(mr);
1071 } else {
1072 _modUnionTable.mark_range(mr);
1073 }
1074 } else { // not an obj array; we can just mark the head
1075 if (par) {
1076 _modUnionTable.par_mark(start);
1077 } else {
1078 _modUnionTable.mark(start);
1079 }
1080 }
1081 }
1082 }
1083 }
1085 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1086 {
1087 size_t delta = pointer_delta(addr, space->bottom());
1088 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1089 }
1091 void CMSCollector::icms_update_allocation_limits()
1092 {
1093 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1094 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1096 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1097 if (CMSTraceIncrementalPacing) {
1098 stats().print();
1099 }
1101 assert(duty_cycle <= 100, "invalid duty cycle");
1102 if (duty_cycle != 0) {
1103 // The duty_cycle is a percentage between 0 and 100; convert to words and
1104 // then compute the offset from the endpoints of the space.
1105 size_t free_words = eden->free() / HeapWordSize;
1106 double free_words_dbl = (double)free_words;
1107 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1108 size_t offset_words = (free_words - duty_cycle_words) / 2;
1110 _icms_start_limit = eden->top() + offset_words;
1111 _icms_stop_limit = eden->end() - offset_words;
1113 // The limits may be adjusted (shifted to the right) by
1114 // CMSIncrementalOffset, to allow the application more mutator time after a
1115 // young gen gc (when all mutators were stopped) and before CMS starts and
1116 // takes away one or more cpus.
1117 if (CMSIncrementalOffset != 0) {
1118 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1119 size_t adjustment = (size_t)adjustment_dbl;
1120 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1121 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1122 _icms_start_limit += adjustment;
1123 _icms_stop_limit = tmp_stop;
1124 }
1125 }
1126 }
1127 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1128 _icms_start_limit = _icms_stop_limit = eden->end();
1129 }
1131 // Install the new start limit.
1132 eden->set_soft_end(_icms_start_limit);
1134 if (CMSTraceIncrementalMode) {
1135 gclog_or_tty->print(" icms alloc limits: "
1136 PTR_FORMAT "," PTR_FORMAT
1137 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1138 _icms_start_limit, _icms_stop_limit,
1139 percent_of_space(eden, _icms_start_limit),
1140 percent_of_space(eden, _icms_stop_limit));
1141 if (Verbose) {
1142 gclog_or_tty->print("eden: ");
1143 eden->print_on(gclog_or_tty);
1144 }
1145 }
1146 }
1148 // Any changes here should try to maintain the invariant
1149 // that if this method is called with _icms_start_limit
1150 // and _icms_stop_limit both NULL, then it should return NULL
1151 // and not notify the icms thread.
1152 HeapWord*
1153 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1154 size_t word_size)
1155 {
1156 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1157 // nop.
1158 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1159 if (top <= _icms_start_limit) {
1160 if (CMSTraceIncrementalMode) {
1161 space->print_on(gclog_or_tty);
1162 gclog_or_tty->stamp();
1163 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1164 ", new limit=" PTR_FORMAT
1165 " (" SIZE_FORMAT "%%)",
1166 top, _icms_stop_limit,
1167 percent_of_space(space, _icms_stop_limit));
1168 }
1169 ConcurrentMarkSweepThread::start_icms();
1170 assert(top < _icms_stop_limit, "Tautology");
1171 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1172 return _icms_stop_limit;
1173 }
1175 // The allocation will cross both the _start and _stop limits, so do the
1176 // stop notification also and return end().
1177 if (CMSTraceIncrementalMode) {
1178 space->print_on(gclog_or_tty);
1179 gclog_or_tty->stamp();
1180 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1181 ", new limit=" PTR_FORMAT
1182 " (" SIZE_FORMAT "%%)",
1183 top, space->end(),
1184 percent_of_space(space, space->end()));
1185 }
1186 ConcurrentMarkSweepThread::stop_icms();
1187 return space->end();
1188 }
1190 if (top <= _icms_stop_limit) {
1191 if (CMSTraceIncrementalMode) {
1192 space->print_on(gclog_or_tty);
1193 gclog_or_tty->stamp();
1194 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1195 ", new limit=" PTR_FORMAT
1196 " (" SIZE_FORMAT "%%)",
1197 top, space->end(),
1198 percent_of_space(space, space->end()));
1199 }
1200 ConcurrentMarkSweepThread::stop_icms();
1201 return space->end();
1202 }
1204 if (CMSTraceIncrementalMode) {
1205 space->print_on(gclog_or_tty);
1206 gclog_or_tty->stamp();
1207 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1208 ", new limit=" PTR_FORMAT,
1209 top, NULL);
1210 }
1211 }
1213 return NULL;
1214 }
1216 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
1217 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1218 // allocate, copy and if necessary update promoinfo --
1219 // delegate to underlying space.
1220 assert_lock_strong(freelistLock());
1222 #ifndef PRODUCT
1223 if (Universe::heap()->promotion_should_fail()) {
1224 return NULL;
1225 }
1226 #endif // #ifndef PRODUCT
1228 oop res = _cmsSpace->promote(obj, obj_size, ref);
1229 if (res == NULL) {
1230 // expand and retry
1231 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1232 expand(s*HeapWordSize, MinHeapDeltaBytes,
1233 CMSExpansionCause::_satisfy_promotion);
1234 // Since there's currently no next generation, we don't try to promote
1235 // into a more senior generation.
1236 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1237 "is made to pass on a possibly failing "
1238 "promotion to next generation");
1239 res = _cmsSpace->promote(obj, obj_size, ref);
1240 }
1241 if (res != NULL) {
1242 // See comment in allocate() about when objects should
1243 // be allocated live.
1244 assert(obj->is_oop(), "Will dereference klass pointer below");
1245 collector()->promoted(false, // Not parallel
1246 (HeapWord*)res, obj->is_objArray(), obj_size);
1247 // promotion counters
1248 NOT_PRODUCT(
1249 _numObjectsPromoted++;
1250 _numWordsPromoted +=
1251 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1252 )
1253 }
1254 return res;
1255 }
1258 HeapWord*
1259 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1260 HeapWord* top,
1261 size_t word_sz)
1262 {
1263 return collector()->allocation_limit_reached(space, top, word_sz);
1264 }
1266 // Things to support parallel young-gen collection.
1267 oop
1268 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1269 oop old, markOop m,
1270 size_t word_sz) {
1271 #ifndef PRODUCT
1272 if (Universe::heap()->promotion_should_fail()) {
1273 return NULL;
1274 }
1275 #endif // #ifndef PRODUCT
1277 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1278 PromotionInfo* promoInfo = &ps->promo;
1279 // if we are tracking promotions, then first ensure space for
1280 // promotion (including spooling space for saving header if necessary).
1281 // then allocate and copy, then track promoted info if needed.
1282 // When tracking (see PromotionInfo::track()), the mark word may
1283 // be displaced and in this case restoration of the mark word
1284 // occurs in the (oop_since_save_marks_)iterate phase.
1285 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1286 // Out of space for allocating spooling buffers;
1287 // try expanding and allocating spooling buffers.
1288 if (!expand_and_ensure_spooling_space(promoInfo)) {
1289 return NULL;
1290 }
1291 }
1292 assert(promoInfo->has_spooling_space(), "Control point invariant");
1293 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1294 if (obj_ptr == NULL) {
1295 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1296 if (obj_ptr == NULL) {
1297 return NULL;
1298 }
1299 }
1300 oop obj = oop(obj_ptr);
1301 assert(obj->klass() == NULL, "Object should be uninitialized here.");
1302 // Otherwise, copy the object. Here we must be careful to insert the
1303 // klass pointer last, since this marks the block as an allocated object.
1304 HeapWord* old_ptr = (HeapWord*)old;
1305 if (word_sz > (size_t)oopDesc::header_size()) {
1306 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1307 obj_ptr + oopDesc::header_size(),
1308 word_sz - oopDesc::header_size());
1309 }
1310 // Restore the mark word copied above.
1311 obj->set_mark(m);
1312 // Now we can track the promoted object, if necessary. We take care
1313 // To delay the transition from uninitialized to full object
1314 // (i.e., insertion of klass pointer) until after, so that it
1315 // atomically becomes a promoted object.
1316 if (promoInfo->tracking()) {
1317 promoInfo->track((PromotedObject*)obj, old->klass());
1318 }
1319 // Finally, install the klass pointer.
1320 obj->set_klass(old->klass());
1322 assert(old->is_oop(), "Will dereference klass ptr below");
1323 collector()->promoted(true, // parallel
1324 obj_ptr, old->is_objArray(), word_sz);
1326 NOT_PRODUCT(
1327 Atomic::inc(&_numObjectsPromoted);
1328 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1329 &_numWordsPromoted);
1330 )
1332 return obj;
1333 }
1335 void
1336 ConcurrentMarkSweepGeneration::
1337 par_promote_alloc_undo(int thread_num,
1338 HeapWord* obj, size_t word_sz) {
1339 // CMS does not support promotion undo.
1340 ShouldNotReachHere();
1341 }
1343 void
1344 ConcurrentMarkSweepGeneration::
1345 par_promote_alloc_done(int thread_num) {
1346 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1347 ps->lab.retire();
1348 #if CFLS_LAB_REFILL_STATS
1349 if (thread_num == 0) {
1350 _cmsSpace->print_par_alloc_stats();
1351 }
1352 #endif
1353 }
1355 void
1356 ConcurrentMarkSweepGeneration::
1357 par_oop_since_save_marks_iterate_done(int thread_num) {
1358 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1359 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1360 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1361 }
1363 // XXXPERM
1364 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1365 size_t size,
1366 bool tlab)
1367 {
1368 // We allow a STW collection only if a full
1369 // collection was requested.
1370 return full || should_allocate(size, tlab); // FIX ME !!!
1371 // This and promotion failure handling are connected at the
1372 // hip and should be fixed by untying them.
1373 }
1375 bool CMSCollector::shouldConcurrentCollect() {
1376 if (_full_gc_requested) {
1377 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1378 if (Verbose && PrintGCDetails) {
1379 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1380 " gc request");
1381 }
1382 return true;
1383 }
1385 // For debugging purposes, change the type of collection.
1386 // If the rotation is not on the concurrent collection
1387 // type, don't start a concurrent collection.
1388 NOT_PRODUCT(
1389 if (RotateCMSCollectionTypes &&
1390 (_cmsGen->debug_collection_type() !=
1391 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1392 assert(_cmsGen->debug_collection_type() !=
1393 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1394 "Bad cms collection type");
1395 return false;
1396 }
1397 )
1399 FreelistLocker x(this);
1400 // ------------------------------------------------------------------
1401 // Print out lots of information which affects the initiation of
1402 // a collection.
1403 if (PrintCMSInitiationStatistics && stats().valid()) {
1404 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1405 gclog_or_tty->stamp();
1406 gclog_or_tty->print_cr("");
1407 stats().print_on(gclog_or_tty);
1408 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1409 stats().time_until_cms_gen_full());
1410 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1411 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412 _cmsGen->contiguous_available());
1413 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
1417 }
1418 // ------------------------------------------------------------------
1420 // If the estimated time to complete a cms collection (cms_duration())
1421 // is less than the estimated time remaining until the cms generation
1422 // is full, start a collection.
1423 if (!UseCMSInitiatingOccupancyOnly) {
1424 if (stats().valid()) {
1425 if (stats().time_until_cms_start() == 0.0) {
1426 return true;
1427 }
1428 } else {
1429 // We want to conservatively collect somewhat early in order
1430 // to try and "bootstrap" our CMS/promotion statistics;
1431 // this branch will not fire after the first successful CMS
1432 // collection because the stats should then be valid.
1433 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1434 if (Verbose && PrintGCDetails) {
1435 gclog_or_tty->print_cr(
1436 " CMSCollector: collect for bootstrapping statistics:"
1437 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1438 _bootstrap_occupancy);
1439 }
1440 return true;
1441 }
1442 }
1443 }
1445 // Otherwise, we start a collection cycle if either the perm gen or
1446 // old gen want a collection cycle started. Each may use
1447 // an appropriate criterion for making this decision.
1448 // XXX We need to make sure that the gen expansion
1449 // criterion dovetails well with this.
1450 if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
1451 if (Verbose && PrintGCDetails) {
1452 gclog_or_tty->print_cr("CMS old gen initiated");
1453 }
1454 return true;
1455 }
1457 if (cms_should_unload_classes() &&
1458 _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
1459 if (Verbose && PrintGCDetails) {
1460 gclog_or_tty->print_cr("CMS perm gen initiated");
1461 }
1462 return true;
1463 }
1465 return false;
1466 }
1468 // Clear _expansion_cause fields of constituent generations
1469 void CMSCollector::clear_expansion_cause() {
1470 _cmsGen->clear_expansion_cause();
1471 _permGen->clear_expansion_cause();
1472 }
1474 bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475 double initiatingOccupancy) {
1476 // We should be conservative in starting a collection cycle. To
1477 // start too eagerly runs the risk of collecting too often in the
1478 // extreme. To collect too rarely falls back on full collections,
1479 // which works, even if not optimum in terms of concurrent work.
1480 // As a work around for too eagerly collecting, use the flag
1481 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1482 // giving the user an easily understandable way of controlling the
1483 // collections.
1484 // We want to start a new collection cycle if any of the following
1485 // conditions hold:
1486 // . our current occupancy exceeds the initiating occupancy, or
1487 // . we recently needed to expand and have not since that expansion,
1488 // collected, or
1489 // . we are not using adaptive free lists and linear allocation is
1490 // going to fail, or
1491 // . (for old gen) incremental collection has already failed or
1492 // may soon fail in the near future as we may not be able to absorb
1493 // promotions.
1494 assert_lock_strong(freelistLock());
1496 if (occupancy() > initiatingOccupancy) {
1497 if (PrintGCDetails && Verbose) {
1498 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1499 short_name(), occupancy(), initiatingOccupancy);
1500 }
1501 return true;
1502 }
1503 if (UseCMSInitiatingOccupancyOnly) {
1504 return false;
1505 }
1506 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507 if (PrintGCDetails && Verbose) {
1508 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509 short_name());
1510 }
1511 return true;
1512 }
1513 GenCollectedHeap* gch = GenCollectedHeap::heap();
1514 assert(gch->collector_policy()->is_two_generation_policy(),
1515 "You may want to check the correctness of the following");
1516 if (gch->incremental_collection_will_fail()) {
1517 if (PrintGCDetails && Verbose) {
1518 gclog_or_tty->print(" %s: collect because incremental collection will fail ",
1519 short_name());
1520 }
1521 return true;
1522 }
1523 if (!_cmsSpace->adaptive_freelists() &&
1524 _cmsSpace->linearAllocationWouldFail()) {
1525 if (PrintGCDetails && Verbose) {
1526 gclog_or_tty->print(" %s: collect because of linAB ",
1527 short_name());
1528 }
1529 return true;
1530 }
1531 return false;
1532 }
1534 void ConcurrentMarkSweepGeneration::collect(bool full,
1535 bool clear_all_soft_refs,
1536 size_t size,
1537 bool tlab)
1538 {
1539 collector()->collect(full, clear_all_soft_refs, size, tlab);
1540 }
1542 void CMSCollector::collect(bool full,
1543 bool clear_all_soft_refs,
1544 size_t size,
1545 bool tlab)
1546 {
1547 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1548 // For debugging purposes skip the collection if the state
1549 // is not currently idle
1550 if (TraceCMSState) {
1551 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1552 Thread::current(), full, _collectorState);
1553 }
1554 return;
1555 }
1557 // The following "if" branch is present for defensive reasons.
1558 // In the current uses of this interface, it can be replaced with:
1559 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1560 // But I am not placing that assert here to allow future
1561 // generality in invoking this interface.
1562 if (GC_locker::is_active()) {
1563 // A consistency test for GC_locker
1564 assert(GC_locker::needs_gc(), "Should have been set already");
1565 // Skip this foreground collection, instead
1566 // expanding the heap if necessary.
1567 // Need the free list locks for the call to free() in compute_new_size()
1568 compute_new_size();
1569 return;
1570 }
1571 acquire_control_and_collect(full, clear_all_soft_refs);
1572 _full_gcs_since_conc_gc++;
1574 }
1576 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1577 GenCollectedHeap* gch = GenCollectedHeap::heap();
1578 unsigned int gc_count = gch->total_full_collections();
1579 if (gc_count == full_gc_count) {
1580 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1581 _full_gc_requested = true;
1582 CGC_lock->notify(); // nudge CMS thread
1583 }
1584 }
1587 // The foreground and background collectors need to coordinate in order
1588 // to make sure that they do not mutually interfere with CMS collections.
1589 // When a background collection is active,
1590 // the foreground collector may need to take over (preempt) and
1591 // synchronously complete an ongoing collection. Depending on the
1592 // frequency of the background collections and the heap usage
1593 // of the application, this preemption can be seldom or frequent.
1594 // There are only certain
1595 // points in the background collection that the "collection-baton"
1596 // can be passed to the foreground collector.
1597 //
1598 // The foreground collector will wait for the baton before
1599 // starting any part of the collection. The foreground collector
1600 // will only wait at one location.
1601 //
1602 // The background collector will yield the baton before starting a new
1603 // phase of the collection (e.g., before initial marking, marking from roots,
1604 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1605 // of the loop which switches the phases. The background collector does some
1606 // of the phases (initial mark, final re-mark) with the world stopped.
1607 // Because of locking involved in stopping the world,
1608 // the foreground collector should not block waiting for the background
1609 // collector when it is doing a stop-the-world phase. The background
1610 // collector will yield the baton at an additional point just before
1611 // it enters a stop-the-world phase. Once the world is stopped, the
1612 // background collector checks the phase of the collection. If the
1613 // phase has not changed, it proceeds with the collection. If the
1614 // phase has changed, it skips that phase of the collection. See
1615 // the comments on the use of the Heap_lock in collect_in_background().
1616 //
1617 // Variable used in baton passing.
1618 // _foregroundGCIsActive - Set to true by the foreground collector when
1619 // it wants the baton. The foreground clears it when it has finished
1620 // the collection.
1621 // _foregroundGCShouldWait - Set to true by the background collector
1622 // when it is running. The foreground collector waits while
1623 // _foregroundGCShouldWait is true.
1624 // CGC_lock - monitor used to protect access to the above variables
1625 // and to notify the foreground and background collectors.
1626 // _collectorState - current state of the CMS collection.
1627 //
1628 // The foreground collector
1629 // acquires the CGC_lock
1630 // sets _foregroundGCIsActive
1631 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1632 // various locks acquired in preparation for the collection
1633 // are released so as not to block the background collector
1634 // that is in the midst of a collection
1635 // proceeds with the collection
1636 // clears _foregroundGCIsActive
1637 // returns
1638 //
1639 // The background collector in a loop iterating on the phases of the
1640 // collection
1641 // acquires the CGC_lock
1642 // sets _foregroundGCShouldWait
1643 // if _foregroundGCIsActive is set
1644 // clears _foregroundGCShouldWait, notifies _CGC_lock
1645 // waits on _CGC_lock for _foregroundGCIsActive to become false
1646 // and exits the loop.
1647 // otherwise
1648 // proceed with that phase of the collection
1649 // if the phase is a stop-the-world phase,
1650 // yield the baton once more just before enqueueing
1651 // the stop-world CMS operation (executed by the VM thread).
1652 // returns after all phases of the collection are done
1653 //
1655 void CMSCollector::acquire_control_and_collect(bool full,
1656 bool clear_all_soft_refs) {
1657 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1658 assert(!Thread::current()->is_ConcurrentGC_thread(),
1659 "shouldn't try to acquire control from self!");
1661 // Start the protocol for acquiring control of the
1662 // collection from the background collector (aka CMS thread).
1663 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1664 "VM thread should have CMS token");
1665 // Remember the possibly interrupted state of an ongoing
1666 // concurrent collection
1667 CollectorState first_state = _collectorState;
1669 // Signal to a possibly ongoing concurrent collection that
1670 // we want to do a foreground collection.
1671 _foregroundGCIsActive = true;
1673 // Disable incremental mode during a foreground collection.
1674 ICMSDisabler icms_disabler;
1676 // release locks and wait for a notify from the background collector
1677 // releasing the locks in only necessary for phases which
1678 // do yields to improve the granularity of the collection.
1679 assert_lock_strong(bitMapLock());
1680 // We need to lock the Free list lock for the space that we are
1681 // currently collecting.
1682 assert(haveFreelistLocks(), "Must be holding free list locks");
1683 bitMapLock()->unlock();
1684 releaseFreelistLocks();
1685 {
1686 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1687 if (_foregroundGCShouldWait) {
1688 // We are going to be waiting for action for the CMS thread;
1689 // it had better not be gone (for instance at shutdown)!
1690 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1691 "CMS thread must be running");
1692 // Wait here until the background collector gives us the go-ahead
1693 ConcurrentMarkSweepThread::clear_CMS_flag(
1694 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1695 // Get a possibly blocked CMS thread going:
1696 // Note that we set _foregroundGCIsActive true above,
1697 // without protection of the CGC_lock.
1698 CGC_lock->notify();
1699 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1700 "Possible deadlock");
1701 while (_foregroundGCShouldWait) {
1702 // wait for notification
1703 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1704 // Possibility of delay/starvation here, since CMS token does
1705 // not know to give priority to VM thread? Actually, i think
1706 // there wouldn't be any delay/starvation, but the proof of
1707 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1708 }
1709 ConcurrentMarkSweepThread::set_CMS_flag(
1710 ConcurrentMarkSweepThread::CMS_vm_has_token);
1711 }
1712 }
1713 // The CMS_token is already held. Get back the other locks.
1714 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1715 "VM thread should have CMS token");
1716 getFreelistLocks();
1717 bitMapLock()->lock_without_safepoint_check();
1718 if (TraceCMSState) {
1719 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1720 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1721 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1722 }
1724 // Check if we need to do a compaction, or if not, whether
1725 // we need to start the mark-sweep from scratch.
1726 bool should_compact = false;
1727 bool should_start_over = false;
1728 decide_foreground_collection_type(clear_all_soft_refs,
1729 &should_compact, &should_start_over);
1731 NOT_PRODUCT(
1732 if (RotateCMSCollectionTypes) {
1733 if (_cmsGen->debug_collection_type() ==
1734 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1735 should_compact = true;
1736 } else if (_cmsGen->debug_collection_type() ==
1737 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1738 should_compact = false;
1739 }
1740 }
1741 )
1743 if (PrintGCDetails && first_state > Idling) {
1744 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1745 if (GCCause::is_user_requested_gc(cause) ||
1746 GCCause::is_serviceability_requested_gc(cause)) {
1747 gclog_or_tty->print(" (concurrent mode interrupted)");
1748 } else {
1749 gclog_or_tty->print(" (concurrent mode failure)");
1750 }
1751 }
1753 if (should_compact) {
1754 // If the collection is being acquired from the background
1755 // collector, there may be references on the discovered
1756 // references lists that have NULL referents (being those
1757 // that were concurrently cleared by a mutator) or
1758 // that are no longer active (having been enqueued concurrently
1759 // by the mutator).
1760 // Scrub the list of those references because Mark-Sweep-Compact
1761 // code assumes referents are not NULL and that all discovered
1762 // Reference objects are active.
1763 ref_processor()->clean_up_discovered_references();
1765 do_compaction_work(clear_all_soft_refs);
1767 // Has the GC time limit been exceeded?
1768 check_gc_time_limit();
1770 } else {
1771 do_mark_sweep_work(clear_all_soft_refs, first_state,
1772 should_start_over);
1773 }
1774 // Reset the expansion cause, now that we just completed
1775 // a collection cycle.
1776 clear_expansion_cause();
1777 _foregroundGCIsActive = false;
1778 return;
1779 }
1781 void CMSCollector::check_gc_time_limit() {
1783 // Ignore explicit GC's. Exiting here does not set the flag and
1784 // does not reset the count. Updating of the averages for system
1785 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1786 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1787 if (GCCause::is_user_requested_gc(gc_cause) ||
1788 GCCause::is_serviceability_requested_gc(gc_cause)) {
1789 return;
1790 }
1792 // Calculate the fraction of the CMS generation was freed during
1793 // the last collection.
1794 // Only consider the STW compacting cost for now.
1795 //
1796 // Note that the gc time limit test only works for the collections
1797 // of the young gen + tenured gen and not for collections of the
1798 // permanent gen. That is because the calculation of the space
1799 // freed by the collection is the free space in the young gen +
1800 // tenured gen.
1802 double fraction_free =
1803 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1804 if ((100.0 * size_policy()->compacting_gc_cost()) >
1805 ((double) GCTimeLimit) &&
1806 ((fraction_free * 100) < GCHeapFreeLimit)) {
1807 size_policy()->inc_gc_time_limit_count();
1808 if (UseGCOverheadLimit &&
1809 (size_policy()->gc_time_limit_count() >
1810 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1811 size_policy()->set_gc_time_limit_exceeded(true);
1812 // Avoid consecutive OOM due to the gc time limit by resetting
1813 // the counter.
1814 size_policy()->reset_gc_time_limit_count();
1815 if (PrintGCDetails) {
1816 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1817 "of %d%%", GCTimeLimit);
1818 }
1819 } else {
1820 if (PrintGCDetails) {
1821 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1822 "of %d%%", GCTimeLimit);
1823 }
1824 }
1825 } else {
1826 size_policy()->reset_gc_time_limit_count();
1827 }
1828 }
1830 // Resize the perm generation and the tenured generation
1831 // after obtaining the free list locks for the
1832 // two generations.
1833 void CMSCollector::compute_new_size() {
1834 assert_locked_or_safepoint(Heap_lock);
1835 FreelistLocker z(this);
1836 _permGen->compute_new_size();
1837 _cmsGen->compute_new_size();
1838 }
1840 // A work method used by foreground collection to determine
1841 // what type of collection (compacting or not, continuing or fresh)
1842 // it should do.
1843 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1844 // and CMSCompactWhenClearAllSoftRefs the default in the future
1845 // and do away with the flags after a suitable period.
1846 void CMSCollector::decide_foreground_collection_type(
1847 bool clear_all_soft_refs, bool* should_compact,
1848 bool* should_start_over) {
1849 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1850 // flag is set, and we have either requested a System.gc() or
1851 // the number of full gc's since the last concurrent cycle
1852 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1853 // or if an incremental collection has failed
1854 GenCollectedHeap* gch = GenCollectedHeap::heap();
1855 assert(gch->collector_policy()->is_two_generation_policy(),
1856 "You may want to check the correctness of the following");
1857 // Inform cms gen if this was due to partial collection failing.
1858 // The CMS gen may use this fact to determine its expansion policy.
1859 if (gch->incremental_collection_will_fail()) {
1860 assert(!_cmsGen->incremental_collection_failed(),
1861 "Should have been noticed, reacted to and cleared");
1862 _cmsGen->set_incremental_collection_failed();
1863 }
1864 *should_compact =
1865 UseCMSCompactAtFullCollection &&
1866 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1867 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1868 gch->incremental_collection_will_fail());
1869 *should_start_over = false;
1870 if (clear_all_soft_refs && !*should_compact) {
1871 // We are about to do a last ditch collection attempt
1872 // so it would normally make sense to do a compaction
1873 // to reclaim as much space as possible.
1874 if (CMSCompactWhenClearAllSoftRefs) {
1875 // Default: The rationale is that in this case either
1876 // we are past the final marking phase, in which case
1877 // we'd have to start over, or so little has been done
1878 // that there's little point in saving that work. Compaction
1879 // appears to be the sensible choice in either case.
1880 *should_compact = true;
1881 } else {
1882 // We have been asked to clear all soft refs, but not to
1883 // compact. Make sure that we aren't past the final checkpoint
1884 // phase, for that is where we process soft refs. If we are already
1885 // past that phase, we'll need to redo the refs discovery phase and
1886 // if necessary clear soft refs that weren't previously
1887 // cleared. We do so by remembering the phase in which
1888 // we came in, and if we are past the refs processing
1889 // phase, we'll choose to just redo the mark-sweep
1890 // collection from scratch.
1891 if (_collectorState > FinalMarking) {
1892 // We are past the refs processing phase;
1893 // start over and do a fresh synchronous CMS cycle
1894 _collectorState = Resetting; // skip to reset to start new cycle
1895 reset(false /* == !asynch */);
1896 *should_start_over = true;
1897 } // else we can continue a possibly ongoing current cycle
1898 }
1899 }
1900 }
1902 // A work method used by the foreground collector to do
1903 // a mark-sweep-compact.
1904 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1905 GenCollectedHeap* gch = GenCollectedHeap::heap();
1906 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1907 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1908 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1909 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1910 }
1912 // Sample collection interval time and reset for collection pause.
1913 if (UseAdaptiveSizePolicy) {
1914 size_policy()->msc_collection_begin();
1915 }
1917 // Temporarily widen the span of the weak reference processing to
1918 // the entire heap.
1919 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1920 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1922 // Temporarily, clear the "is_alive_non_header" field of the
1923 // reference processor.
1924 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1926 // Temporarily make reference _processing_ single threaded (non-MT).
1927 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1929 // Temporarily make refs discovery atomic
1930 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1932 ref_processor()->set_enqueuing_is_done(false);
1933 ref_processor()->enable_discovery();
1934 // If an asynchronous collection finishes, the _modUnionTable is
1935 // all clear. If we are assuming the collection from an asynchronous
1936 // collection, clear the _modUnionTable.
1937 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1938 "_modUnionTable should be clear if the baton was not passed");
1939 _modUnionTable.clear_all();
1941 // We must adjust the allocation statistics being maintained
1942 // in the free list space. We do so by reading and clearing
1943 // the sweep timer and updating the block flux rate estimates below.
1944 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1945 _sweep_timer.stop();
1946 // Note that we do not use this sample to update the _sweep_estimate.
1947 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1948 _sweep_estimate.padded_average());
1950 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1951 ref_processor(), clear_all_soft_refs);
1952 #ifdef ASSERT
1953 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1954 size_t free_size = cms_space->free();
1955 assert(free_size ==
1956 pointer_delta(cms_space->end(), cms_space->compaction_top())
1957 * HeapWordSize,
1958 "All the free space should be compacted into one chunk at top");
1959 assert(cms_space->dictionary()->totalChunkSize(
1960 debug_only(cms_space->freelistLock())) == 0 ||
1961 cms_space->totalSizeInIndexedFreeLists() == 0,
1962 "All the free space should be in a single chunk");
1963 size_t num = cms_space->totalCount();
1964 assert((free_size == 0 && num == 0) ||
1965 (free_size > 0 && (num == 1 || num == 2)),
1966 "There should be at most 2 free chunks after compaction");
1967 #endif // ASSERT
1968 _collectorState = Resetting;
1969 assert(_restart_addr == NULL,
1970 "Should have been NULL'd before baton was passed");
1971 reset(false /* == !asynch */);
1972 _cmsGen->reset_after_compaction();
1974 if (verifying() && !cms_should_unload_classes()) {
1975 perm_gen_verify_bit_map()->clear_all();
1976 }
1978 // Clear any data recorded in the PLAB chunk arrays.
1979 if (_survivor_plab_array != NULL) {
1980 reset_survivor_plab_arrays();
1981 }
1983 // Adjust the per-size allocation stats for the next epoch.
1984 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
1985 // Restart the "sweep timer" for next epoch.
1986 _sweep_timer.reset();
1987 _sweep_timer.start();
1989 // Sample collection pause time and reset for collection interval.
1990 if (UseAdaptiveSizePolicy) {
1991 size_policy()->msc_collection_end(gch->gc_cause());
1992 }
1994 // For a mark-sweep-compact, compute_new_size() will be called
1995 // in the heap's do_collection() method.
1996 }
1998 // A work method used by the foreground collector to do
1999 // a mark-sweep, after taking over from a possibly on-going
2000 // concurrent mark-sweep collection.
2001 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2002 CollectorState first_state, bool should_start_over) {
2003 if (PrintGC && Verbose) {
2004 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2005 "collector with count %d",
2006 _full_gcs_since_conc_gc);
2007 }
2008 switch (_collectorState) {
2009 case Idling:
2010 if (first_state == Idling || should_start_over) {
2011 // The background GC was not active, or should
2012 // restarted from scratch; start the cycle.
2013 _collectorState = InitialMarking;
2014 }
2015 // If first_state was not Idling, then a background GC
2016 // was in progress and has now finished. No need to do it
2017 // again. Leave the state as Idling.
2018 break;
2019 case Precleaning:
2020 // In the foreground case don't do the precleaning since
2021 // it is not done concurrently and there is extra work
2022 // required.
2023 _collectorState = FinalMarking;
2024 }
2025 if (PrintGCDetails &&
2026 (_collectorState > Idling ||
2027 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2028 gclog_or_tty->print(" (concurrent mode failure)");
2029 }
2030 collect_in_foreground(clear_all_soft_refs);
2032 // For a mark-sweep, compute_new_size() will be called
2033 // in the heap's do_collection() method.
2034 }
2037 void CMSCollector::getFreelistLocks() const {
2038 // Get locks for all free lists in all generations that this
2039 // collector is responsible for
2040 _cmsGen->freelistLock()->lock_without_safepoint_check();
2041 _permGen->freelistLock()->lock_without_safepoint_check();
2042 }
2044 void CMSCollector::releaseFreelistLocks() const {
2045 // Release locks for all free lists in all generations that this
2046 // collector is responsible for
2047 _cmsGen->freelistLock()->unlock();
2048 _permGen->freelistLock()->unlock();
2049 }
2051 bool CMSCollector::haveFreelistLocks() const {
2052 // Check locks for all free lists in all generations that this
2053 // collector is responsible for
2054 assert_lock_strong(_cmsGen->freelistLock());
2055 assert_lock_strong(_permGen->freelistLock());
2056 PRODUCT_ONLY(ShouldNotReachHere());
2057 return true;
2058 }
2060 // A utility class that is used by the CMS collector to
2061 // temporarily "release" the foreground collector from its
2062 // usual obligation to wait for the background collector to
2063 // complete an ongoing phase before proceeding.
2064 class ReleaseForegroundGC: public StackObj {
2065 private:
2066 CMSCollector* _c;
2067 public:
2068 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2069 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2070 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2071 // allow a potentially blocked foreground collector to proceed
2072 _c->_foregroundGCShouldWait = false;
2073 if (_c->_foregroundGCIsActive) {
2074 CGC_lock->notify();
2075 }
2076 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2077 "Possible deadlock");
2078 }
2080 ~ReleaseForegroundGC() {
2081 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2082 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2083 _c->_foregroundGCShouldWait = true;
2084 }
2085 };
2087 // There are separate collect_in_background and collect_in_foreground because of
2088 // the different locking requirements of the background collector and the
2089 // foreground collector. There was originally an attempt to share
2090 // one "collect" method between the background collector and the foreground
2091 // collector but the if-then-else required made it cleaner to have
2092 // separate methods.
2093 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2094 assert(Thread::current()->is_ConcurrentGC_thread(),
2095 "A CMS asynchronous collection is only allowed on a CMS thread.");
2097 GenCollectedHeap* gch = GenCollectedHeap::heap();
2098 {
2099 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100 MutexLockerEx hl(Heap_lock, safepoint_check);
2101 MutexLockerEx x(CGC_lock, safepoint_check);
2102 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103 // The foreground collector is active or we're
2104 // not using asynchronous collections. Skip this
2105 // background collection.
2106 assert(!_foregroundGCShouldWait, "Should be clear");
2107 return;
2108 } else {
2109 assert(_collectorState == Idling, "Should be idling before start.");
2110 _collectorState = InitialMarking;
2111 // Reset the expansion cause, now that we are about to begin
2112 // a new cycle.
2113 clear_expansion_cause();
2114 }
2115 _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
2116 // This controls class unloading in response to an explicit gc request.
2117 // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
2118 // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119 // See CR 6541037 and related CRs.
2120 _unload_classes = _full_gc_requested // ... for this cycle
2121 && ExplicitGCInvokesConcurrentAndUnloadsClasses;
2122 _full_gc_requested = false; // acks all outstanding full gc requests
2123 // Signal that we are about to start a collection
2124 gch->increment_total_full_collections(); // ... starting a collection cycle
2125 _collection_count_start = gch->total_full_collections();
2126 }
2128 // Used for PrintGC
2129 size_t prev_used;
2130 if (PrintGC && Verbose) {
2131 prev_used = _cmsGen->used(); // XXXPERM
2132 }
2134 // The change of the collection state is normally done at this level;
2135 // the exceptions are phases that are executed while the world is
2136 // stopped. For those phases the change of state is done while the
2137 // world is stopped. For baton passing purposes this allows the
2138 // background collector to finish the phase and change state atomically.
2139 // The foreground collector cannot wait on a phase that is done
2140 // while the world is stopped because the foreground collector already
2141 // has the world stopped and would deadlock.
2142 while (_collectorState != Idling) {
2143 if (TraceCMSState) {
2144 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2145 Thread::current(), _collectorState);
2146 }
2147 // The foreground collector
2148 // holds the Heap_lock throughout its collection.
2149 // holds the CMS token (but not the lock)
2150 // except while it is waiting for the background collector to yield.
2151 //
2152 // The foreground collector should be blocked (not for long)
2153 // if the background collector is about to start a phase
2154 // executed with world stopped. If the background
2155 // collector has already started such a phase, the
2156 // foreground collector is blocked waiting for the
2157 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2158 // are executed in the VM thread.
2159 //
2160 // The locking order is
2161 // PendingListLock (PLL) -- if applicable (FinalMarking)
2162 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2163 // CMS token (claimed in
2164 // stop_world_and_do() -->
2165 // safepoint_synchronize() -->
2166 // CMSThread::synchronize())
2168 {
2169 // Check if the FG collector wants us to yield.
2170 CMSTokenSync x(true); // is cms thread
2171 if (waitForForegroundGC()) {
2172 // We yielded to a foreground GC, nothing more to be
2173 // done this round.
2174 assert(_foregroundGCShouldWait == false, "We set it to false in "
2175 "waitForForegroundGC()");
2176 if (TraceCMSState) {
2177 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2178 " exiting collection CMS state %d",
2179 Thread::current(), _collectorState);
2180 }
2181 return;
2182 } else {
2183 // The background collector can run but check to see if the
2184 // foreground collector has done a collection while the
2185 // background collector was waiting to get the CGC_lock
2186 // above. If yes, break so that _foregroundGCShouldWait
2187 // is cleared before returning.
2188 if (_collectorState == Idling) {
2189 break;
2190 }
2191 }
2192 }
2194 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2195 "should be waiting");
2197 switch (_collectorState) {
2198 case InitialMarking:
2199 {
2200 ReleaseForegroundGC x(this);
2201 stats().record_cms_begin();
2203 VM_CMS_Initial_Mark initial_mark_op(this);
2204 VMThread::execute(&initial_mark_op);
2205 }
2206 // The collector state may be any legal state at this point
2207 // since the background collector may have yielded to the
2208 // foreground collector.
2209 break;
2210 case Marking:
2211 // initial marking in checkpointRootsInitialWork has been completed
2212 if (markFromRoots(true)) { // we were successful
2213 assert(_collectorState == Precleaning, "Collector state should "
2214 "have changed");
2215 } else {
2216 assert(_foregroundGCIsActive, "Internal state inconsistency");
2217 }
2218 break;
2219 case Precleaning:
2220 if (UseAdaptiveSizePolicy) {
2221 size_policy()->concurrent_precleaning_begin();
2222 }
2223 // marking from roots in markFromRoots has been completed
2224 preclean();
2225 if (UseAdaptiveSizePolicy) {
2226 size_policy()->concurrent_precleaning_end();
2227 }
2228 assert(_collectorState == AbortablePreclean ||
2229 _collectorState == FinalMarking,
2230 "Collector state should have changed");
2231 break;
2232 case AbortablePreclean:
2233 if (UseAdaptiveSizePolicy) {
2234 size_policy()->concurrent_phases_resume();
2235 }
2236 abortable_preclean();
2237 if (UseAdaptiveSizePolicy) {
2238 size_policy()->concurrent_precleaning_end();
2239 }
2240 assert(_collectorState == FinalMarking, "Collector state should "
2241 "have changed");
2242 break;
2243 case FinalMarking:
2244 {
2245 ReleaseForegroundGC x(this);
2247 VM_CMS_Final_Remark final_remark_op(this);
2248 VMThread::execute(&final_remark_op);
2249 }
2250 assert(_foregroundGCShouldWait, "block post-condition");
2251 break;
2252 case Sweeping:
2253 if (UseAdaptiveSizePolicy) {
2254 size_policy()->concurrent_sweeping_begin();
2255 }
2256 // final marking in checkpointRootsFinal has been completed
2257 sweep(true);
2258 assert(_collectorState == Resizing, "Collector state change "
2259 "to Resizing must be done under the free_list_lock");
2260 _full_gcs_since_conc_gc = 0;
2262 // Stop the timers for adaptive size policy for the concurrent phases
2263 if (UseAdaptiveSizePolicy) {
2264 size_policy()->concurrent_sweeping_end();
2265 size_policy()->concurrent_phases_end(gch->gc_cause(),
2266 gch->prev_gen(_cmsGen)->capacity(),
2267 _cmsGen->free());
2268 }
2270 case Resizing: {
2271 // Sweeping has been completed...
2272 // At this point the background collection has completed.
2273 // Don't move the call to compute_new_size() down
2274 // into code that might be executed if the background
2275 // collection was preempted.
2276 {
2277 ReleaseForegroundGC x(this); // unblock FG collection
2278 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2279 CMSTokenSync z(true); // not strictly needed.
2280 if (_collectorState == Resizing) {
2281 compute_new_size();
2282 _collectorState = Resetting;
2283 } else {
2284 assert(_collectorState == Idling, "The state should only change"
2285 " because the foreground collector has finished the collection");
2286 }
2287 }
2288 break;
2289 }
2290 case Resetting:
2291 // CMS heap resizing has been completed
2292 reset(true);
2293 assert(_collectorState == Idling, "Collector state should "
2294 "have changed");
2295 stats().record_cms_end();
2296 // Don't move the concurrent_phases_end() and compute_new_size()
2297 // calls to here because a preempted background collection
2298 // has it's state set to "Resetting".
2299 break;
2300 case Idling:
2301 default:
2302 ShouldNotReachHere();
2303 break;
2304 }
2305 if (TraceCMSState) {
2306 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2307 Thread::current(), _collectorState);
2308 }
2309 assert(_foregroundGCShouldWait, "block post-condition");
2310 }
2312 // Should this be in gc_epilogue?
2313 collector_policy()->counters()->update_counters();
2315 {
2316 // Clear _foregroundGCShouldWait and, in the event that the
2317 // foreground collector is waiting, notify it, before
2318 // returning.
2319 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2320 _foregroundGCShouldWait = false;
2321 if (_foregroundGCIsActive) {
2322 CGC_lock->notify();
2323 }
2324 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2325 "Possible deadlock");
2326 }
2327 if (TraceCMSState) {
2328 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2329 " exiting collection CMS state %d",
2330 Thread::current(), _collectorState);
2331 }
2332 if (PrintGC && Verbose) {
2333 _cmsGen->print_heap_change(prev_used);
2334 }
2335 }
2337 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2338 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2339 "Foreground collector should be waiting, not executing");
2340 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2341 "may only be done by the VM Thread with the world stopped");
2342 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2343 "VM thread should have CMS token");
2345 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2346 true, gclog_or_tty);)
2347 if (UseAdaptiveSizePolicy) {
2348 size_policy()->ms_collection_begin();
2349 }
2350 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2352 HandleMark hm; // Discard invalid handles created during verification
2354 if (VerifyBeforeGC &&
2355 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2356 Universe::verify(true);
2357 }
2359 bool init_mark_was_synchronous = false; // until proven otherwise
2360 while (_collectorState != Idling) {
2361 if (TraceCMSState) {
2362 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2363 Thread::current(), _collectorState);
2364 }
2365 switch (_collectorState) {
2366 case InitialMarking:
2367 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2368 checkpointRootsInitial(false);
2369 assert(_collectorState == Marking, "Collector state should have changed"
2370 " within checkpointRootsInitial()");
2371 break;
2372 case Marking:
2373 // initial marking in checkpointRootsInitialWork has been completed
2374 if (VerifyDuringGC &&
2375 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2376 gclog_or_tty->print("Verify before initial mark: ");
2377 Universe::verify(true);
2378 }
2379 {
2380 bool res = markFromRoots(false);
2381 assert(res && _collectorState == FinalMarking, "Collector state should "
2382 "have changed");
2383 break;
2384 }
2385 case FinalMarking:
2386 if (VerifyDuringGC &&
2387 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2388 gclog_or_tty->print("Verify before re-mark: ");
2389 Universe::verify(true);
2390 }
2391 checkpointRootsFinal(false, clear_all_soft_refs,
2392 init_mark_was_synchronous);
2393 assert(_collectorState == Sweeping, "Collector state should not "
2394 "have changed within checkpointRootsFinal()");
2395 break;
2396 case Sweeping:
2397 // final marking in checkpointRootsFinal has been completed
2398 if (VerifyDuringGC &&
2399 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2400 gclog_or_tty->print("Verify before sweep: ");
2401 Universe::verify(true);
2402 }
2403 sweep(false);
2404 assert(_collectorState == Resizing, "Incorrect state");
2405 break;
2406 case Resizing: {
2407 // Sweeping has been completed; the actual resize in this case
2408 // is done separately; nothing to be done in this state.
2409 _collectorState = Resetting;
2410 break;
2411 }
2412 case Resetting:
2413 // The heap has been resized.
2414 if (VerifyDuringGC &&
2415 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2416 gclog_or_tty->print("Verify before reset: ");
2417 Universe::verify(true);
2418 }
2419 reset(false);
2420 assert(_collectorState == Idling, "Collector state should "
2421 "have changed");
2422 break;
2423 case Precleaning:
2424 case AbortablePreclean:
2425 // Elide the preclean phase
2426 _collectorState = FinalMarking;
2427 break;
2428 default:
2429 ShouldNotReachHere();
2430 }
2431 if (TraceCMSState) {
2432 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2433 Thread::current(), _collectorState);
2434 }
2435 }
2437 if (UseAdaptiveSizePolicy) {
2438 GenCollectedHeap* gch = GenCollectedHeap::heap();
2439 size_policy()->ms_collection_end(gch->gc_cause());
2440 }
2442 if (VerifyAfterGC &&
2443 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2444 Universe::verify(true);
2445 }
2446 if (TraceCMSState) {
2447 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2448 " exiting collection CMS state %d",
2449 Thread::current(), _collectorState);
2450 }
2451 }
2453 bool CMSCollector::waitForForegroundGC() {
2454 bool res = false;
2455 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2456 "CMS thread should have CMS token");
2457 // Block the foreground collector until the
2458 // background collectors decides whether to
2459 // yield.
2460 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2461 _foregroundGCShouldWait = true;
2462 if (_foregroundGCIsActive) {
2463 // The background collector yields to the
2464 // foreground collector and returns a value
2465 // indicating that it has yielded. The foreground
2466 // collector can proceed.
2467 res = true;
2468 _foregroundGCShouldWait = false;
2469 ConcurrentMarkSweepThread::clear_CMS_flag(
2470 ConcurrentMarkSweepThread::CMS_cms_has_token);
2471 ConcurrentMarkSweepThread::set_CMS_flag(
2472 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2473 // Get a possibly blocked foreground thread going
2474 CGC_lock->notify();
2475 if (TraceCMSState) {
2476 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2477 Thread::current(), _collectorState);
2478 }
2479 while (_foregroundGCIsActive) {
2480 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2481 }
2482 ConcurrentMarkSweepThread::set_CMS_flag(
2483 ConcurrentMarkSweepThread::CMS_cms_has_token);
2484 ConcurrentMarkSweepThread::clear_CMS_flag(
2485 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2486 }
2487 if (TraceCMSState) {
2488 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2489 Thread::current(), _collectorState);
2490 }
2491 return res;
2492 }
2494 // Because of the need to lock the free lists and other structures in
2495 // the collector, common to all the generations that the collector is
2496 // collecting, we need the gc_prologues of individual CMS generations
2497 // delegate to their collector. It may have been simpler had the
2498 // current infrastructure allowed one to call a prologue on a
2499 // collector. In the absence of that we have the generation's
2500 // prologue delegate to the collector, which delegates back
2501 // some "local" work to a worker method in the individual generations
2502 // that it's responsible for collecting, while itself doing any
2503 // work common to all generations it's responsible for. A similar
2504 // comment applies to the gc_epilogue()'s.
2505 // The role of the varaible _between_prologue_and_epilogue is to
2506 // enforce the invocation protocol.
2507 void CMSCollector::gc_prologue(bool full) {
2508 // Call gc_prologue_work() for each CMSGen and PermGen that
2509 // we are responsible for.
2511 // The following locking discipline assumes that we are only called
2512 // when the world is stopped.
2513 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2515 // The CMSCollector prologue must call the gc_prologues for the
2516 // "generations" (including PermGen if any) that it's responsible
2517 // for.
2519 assert( Thread::current()->is_VM_thread()
2520 || ( CMSScavengeBeforeRemark
2521 && Thread::current()->is_ConcurrentGC_thread()),
2522 "Incorrect thread type for prologue execution");
2524 if (_between_prologue_and_epilogue) {
2525 // We have already been invoked; this is a gc_prologue delegation
2526 // from yet another CMS generation that we are responsible for, just
2527 // ignore it since all relevant work has already been done.
2528 return;
2529 }
2531 // set a bit saying prologue has been called; cleared in epilogue
2532 _between_prologue_and_epilogue = true;
2533 // Claim locks for common data structures, then call gc_prologue_work()
2534 // for each CMSGen and PermGen that we are responsible for.
2536 getFreelistLocks(); // gets free list locks on constituent spaces
2537 bitMapLock()->lock_without_safepoint_check();
2539 // Should call gc_prologue_work() for all cms gens we are responsible for
2540 bool registerClosure = _collectorState >= Marking
2541 && _collectorState < Sweeping;
2542 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2543 : &_modUnionClosure;
2544 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2545 _permGen->gc_prologue_work(full, registerClosure, muc);
2547 if (!full) {
2548 stats().record_gc0_begin();
2549 }
2550 }
2552 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2553 // Delegate to CMScollector which knows how to coordinate between
2554 // this and any other CMS generations that it is responsible for
2555 // collecting.
2556 collector()->gc_prologue(full);
2557 }
2559 // This is a "private" interface for use by this generation's CMSCollector.
2560 // Not to be called directly by any other entity (for instance,
2561 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2562 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2563 bool registerClosure, ModUnionClosure* modUnionClosure) {
2564 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2565 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2566 "Should be NULL");
2567 if (registerClosure) {
2568 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2569 }
2570 cmsSpace()->gc_prologue();
2571 // Clear stat counters
2572 NOT_PRODUCT(
2573 assert(_numObjectsPromoted == 0, "check");
2574 assert(_numWordsPromoted == 0, "check");
2575 if (Verbose && PrintGC) {
2576 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2577 SIZE_FORMAT" bytes concurrently",
2578 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2579 }
2580 _numObjectsAllocated = 0;
2581 _numWordsAllocated = 0;
2582 )
2583 }
2585 void CMSCollector::gc_epilogue(bool full) {
2586 // The following locking discipline assumes that we are only called
2587 // when the world is stopped.
2588 assert(SafepointSynchronize::is_at_safepoint(),
2589 "world is stopped assumption");
2591 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2592 // if linear allocation blocks need to be appropriately marked to allow the
2593 // the blocks to be parsable. We also check here whether we need to nudge the
2594 // CMS collector thread to start a new cycle (if it's not already active).
2595 assert( Thread::current()->is_VM_thread()
2596 || ( CMSScavengeBeforeRemark
2597 && Thread::current()->is_ConcurrentGC_thread()),
2598 "Incorrect thread type for epilogue execution");
2600 if (!_between_prologue_and_epilogue) {
2601 // We have already been invoked; this is a gc_epilogue delegation
2602 // from yet another CMS generation that we are responsible for, just
2603 // ignore it since all relevant work has already been done.
2604 return;
2605 }
2606 assert(haveFreelistLocks(), "must have freelist locks");
2607 assert_lock_strong(bitMapLock());
2609 _cmsGen->gc_epilogue_work(full);
2610 _permGen->gc_epilogue_work(full);
2612 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2613 // in case sampling was not already enabled, enable it
2614 _start_sampling = true;
2615 }
2616 // reset _eden_chunk_array so sampling starts afresh
2617 _eden_chunk_index = 0;
2619 size_t cms_used = _cmsGen->cmsSpace()->used();
2620 size_t perm_used = _permGen->cmsSpace()->used();
2622 // update performance counters - this uses a special version of
2623 // update_counters() that allows the utilization to be passed as a
2624 // parameter, avoiding multiple calls to used().
2625 //
2626 _cmsGen->update_counters(cms_used);
2627 _permGen->update_counters(perm_used);
2629 if (CMSIncrementalMode) {
2630 icms_update_allocation_limits();
2631 }
2633 bitMapLock()->unlock();
2634 releaseFreelistLocks();
2636 _between_prologue_and_epilogue = false; // ready for next cycle
2637 }
2639 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2640 collector()->gc_epilogue(full);
2642 // Also reset promotion tracking in par gc thread states.
2643 if (ParallelGCThreads > 0) {
2644 for (uint i = 0; i < ParallelGCThreads; i++) {
2645 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2646 }
2647 }
2648 }
2650 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2651 assert(!incremental_collection_failed(), "Should have been cleared");
2652 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2653 cmsSpace()->gc_epilogue();
2654 // Print stat counters
2655 NOT_PRODUCT(
2656 assert(_numObjectsAllocated == 0, "check");
2657 assert(_numWordsAllocated == 0, "check");
2658 if (Verbose && PrintGC) {
2659 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2660 SIZE_FORMAT" bytes",
2661 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2662 }
2663 _numObjectsPromoted = 0;
2664 _numWordsPromoted = 0;
2665 )
2667 if (PrintGC && Verbose) {
2668 // Call down the chain in contiguous_available needs the freelistLock
2669 // so print this out before releasing the freeListLock.
2670 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2671 contiguous_available());
2672 }
2673 }
2675 #ifndef PRODUCT
2676 bool CMSCollector::have_cms_token() {
2677 Thread* thr = Thread::current();
2678 if (thr->is_VM_thread()) {
2679 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2680 } else if (thr->is_ConcurrentGC_thread()) {
2681 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2682 } else if (thr->is_GC_task_thread()) {
2683 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2684 ParGCRareEvent_lock->owned_by_self();
2685 }
2686 return false;
2687 }
2688 #endif
2690 // Check reachability of the given heap address in CMS generation,
2691 // treating all other generations as roots.
2692 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2693 // We could "guarantee" below, rather than assert, but i'll
2694 // leave these as "asserts" so that an adventurous debugger
2695 // could try this in the product build provided some subset of
2696 // the conditions were met, provided they were intersted in the
2697 // results and knew that the computation below wouldn't interfere
2698 // with other concurrent computations mutating the structures
2699 // being read or written.
2700 assert(SafepointSynchronize::is_at_safepoint(),
2701 "Else mutations in object graph will make answer suspect");
2702 assert(have_cms_token(), "Should hold cms token");
2703 assert(haveFreelistLocks(), "must hold free list locks");
2704 assert_lock_strong(bitMapLock());
2706 // Clear the marking bit map array before starting, but, just
2707 // for kicks, first report if the given address is already marked
2708 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2709 _markBitMap.isMarked(addr) ? "" : " not");
2711 if (verify_after_remark()) {
2712 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2713 bool result = verification_mark_bm()->isMarked(addr);
2714 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2715 result ? "IS" : "is NOT");
2716 return result;
2717 } else {
2718 gclog_or_tty->print_cr("Could not compute result");
2719 return false;
2720 }
2721 }
2723 ////////////////////////////////////////////////////////
2724 // CMS Verification Support
2725 ////////////////////////////////////////////////////////
2726 // Following the remark phase, the following invariant
2727 // should hold -- each object in the CMS heap which is
2728 // marked in markBitMap() should be marked in the verification_mark_bm().
2730 class VerifyMarkedClosure: public BitMapClosure {
2731 CMSBitMap* _marks;
2732 bool _failed;
2734 public:
2735 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2737 void do_bit(size_t offset) {
2738 HeapWord* addr = _marks->offsetToHeapWord(offset);
2739 if (!_marks->isMarked(addr)) {
2740 oop(addr)->print();
2741 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2742 _failed = true;
2743 }
2744 }
2746 bool failed() { return _failed; }
2747 };
2749 bool CMSCollector::verify_after_remark() {
2750 gclog_or_tty->print(" [Verifying CMS Marking... ");
2751 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2752 static bool init = false;
2754 assert(SafepointSynchronize::is_at_safepoint(),
2755 "Else mutations in object graph will make answer suspect");
2756 assert(have_cms_token(),
2757 "Else there may be mutual interference in use of "
2758 " verification data structures");
2759 assert(_collectorState > Marking && _collectorState <= Sweeping,
2760 "Else marking info checked here may be obsolete");
2761 assert(haveFreelistLocks(), "must hold free list locks");
2762 assert_lock_strong(bitMapLock());
2765 // Allocate marking bit map if not already allocated
2766 if (!init) { // first time
2767 if (!verification_mark_bm()->allocate(_span)) {
2768 return false;
2769 }
2770 init = true;
2771 }
2773 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2775 // Turn off refs discovery -- so we will be tracing through refs.
2776 // This is as intended, because by this time
2777 // GC must already have cleared any refs that need to be cleared,
2778 // and traced those that need to be marked; moreover,
2779 // the marking done here is not going to intefere in any
2780 // way with the marking information used by GC.
2781 NoRefDiscovery no_discovery(ref_processor());
2783 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2785 // Clear any marks from a previous round
2786 verification_mark_bm()->clear_all();
2787 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2788 assert(overflow_list_is_empty(), "overflow list should be empty");
2790 GenCollectedHeap* gch = GenCollectedHeap::heap();
2791 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2792 // Update the saved marks which may affect the root scans.
2793 gch->save_marks();
2795 if (CMSRemarkVerifyVariant == 1) {
2796 // In this first variant of verification, we complete
2797 // all marking, then check if the new marks-verctor is
2798 // a subset of the CMS marks-vector.
2799 verify_after_remark_work_1();
2800 } else if (CMSRemarkVerifyVariant == 2) {
2801 // In this second variant of verification, we flag an error
2802 // (i.e. an object reachable in the new marks-vector not reachable
2803 // in the CMS marks-vector) immediately, also indicating the
2804 // identify of an object (A) that references the unmarked object (B) --
2805 // presumably, a mutation to A failed to be picked up by preclean/remark?
2806 verify_after_remark_work_2();
2807 } else {
2808 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2809 CMSRemarkVerifyVariant);
2810 }
2811 gclog_or_tty->print(" done] ");
2812 return true;
2813 }
2815 void CMSCollector::verify_after_remark_work_1() {
2816 ResourceMark rm;
2817 HandleMark hm;
2818 GenCollectedHeap* gch = GenCollectedHeap::heap();
2820 // Mark from roots one level into CMS
2821 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
2822 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2824 gch->gen_process_strong_roots(_cmsGen->level(),
2825 true, // younger gens are roots
2826 true, // collecting perm gen
2827 SharedHeap::ScanningOption(roots_scanning_options()),
2828 NULL, ¬Older);
2830 // Now mark from the roots
2831 assert(_revisitStack.isEmpty(), "Should be empty");
2832 MarkFromRootsClosure markFromRootsClosure(this, _span,
2833 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2834 false /* don't yield */, true /* verifying */);
2835 assert(_restart_addr == NULL, "Expected pre-condition");
2836 verification_mark_bm()->iterate(&markFromRootsClosure);
2837 while (_restart_addr != NULL) {
2838 // Deal with stack overflow: by restarting at the indicated
2839 // address.
2840 HeapWord* ra = _restart_addr;
2841 markFromRootsClosure.reset(ra);
2842 _restart_addr = NULL;
2843 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2844 }
2845 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2846 verify_work_stacks_empty();
2847 // Should reset the revisit stack above, since no class tree
2848 // surgery is forthcoming.
2849 _revisitStack.reset(); // throwing away all contents
2851 // Marking completed -- now verify that each bit marked in
2852 // verification_mark_bm() is also marked in markBitMap(); flag all
2853 // errors by printing corresponding objects.
2854 VerifyMarkedClosure vcl(markBitMap());
2855 verification_mark_bm()->iterate(&vcl);
2856 if (vcl.failed()) {
2857 gclog_or_tty->print("Verification failed");
2858 Universe::heap()->print();
2859 fatal(" ... aborting");
2860 }
2861 }
2863 void CMSCollector::verify_after_remark_work_2() {
2864 ResourceMark rm;
2865 HandleMark hm;
2866 GenCollectedHeap* gch = GenCollectedHeap::heap();
2868 // Mark from roots one level into CMS
2869 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2870 markBitMap(), true /* nmethods */);
2871 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2872 gch->gen_process_strong_roots(_cmsGen->level(),
2873 true, // younger gens are roots
2874 true, // collecting perm gen
2875 SharedHeap::ScanningOption(roots_scanning_options()),
2876 NULL, ¬Older);
2878 // Now mark from the roots
2879 assert(_revisitStack.isEmpty(), "Should be empty");
2880 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2881 verification_mark_bm(), markBitMap(), verification_mark_stack());
2882 assert(_restart_addr == NULL, "Expected pre-condition");
2883 verification_mark_bm()->iterate(&markFromRootsClosure);
2884 while (_restart_addr != NULL) {
2885 // Deal with stack overflow: by restarting at the indicated
2886 // address.
2887 HeapWord* ra = _restart_addr;
2888 markFromRootsClosure.reset(ra);
2889 _restart_addr = NULL;
2890 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2891 }
2892 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2893 verify_work_stacks_empty();
2894 // Should reset the revisit stack above, since no class tree
2895 // surgery is forthcoming.
2896 _revisitStack.reset(); // throwing away all contents
2898 // Marking completed -- now verify that each bit marked in
2899 // verification_mark_bm() is also marked in markBitMap(); flag all
2900 // errors by printing corresponding objects.
2901 VerifyMarkedClosure vcl(markBitMap());
2902 verification_mark_bm()->iterate(&vcl);
2903 assert(!vcl.failed(), "Else verification above should not have succeeded");
2904 }
2906 void ConcurrentMarkSweepGeneration::save_marks() {
2907 // delegate to CMS space
2908 cmsSpace()->save_marks();
2909 for (uint i = 0; i < ParallelGCThreads; i++) {
2910 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2911 }
2912 }
2914 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2915 return cmsSpace()->no_allocs_since_save_marks();
2916 }
2918 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2919 \
2920 void ConcurrentMarkSweepGeneration:: \
2921 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2922 cl->set_generation(this); \
2923 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2924 cl->reset_generation(); \
2925 save_marks(); \
2926 }
2928 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2930 void
2931 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2932 {
2933 // Not currently implemented; need to do the following. -- ysr.
2934 // dld -- I think that is used for some sort of allocation profiler. So it
2935 // really means the objects allocated by the mutator since the last
2936 // GC. We could potentially implement this cheaply by recording only
2937 // the direct allocations in a side data structure.
2938 //
2939 // I think we probably ought not to be required to support these
2940 // iterations at any arbitrary point; I think there ought to be some
2941 // call to enable/disable allocation profiling in a generation/space,
2942 // and the iterator ought to return the objects allocated in the
2943 // gen/space since the enable call, or the last iterator call (which
2944 // will probably be at a GC.) That way, for gens like CM&S that would
2945 // require some extra data structure to support this, we only pay the
2946 // cost when it's in use...
2947 cmsSpace()->object_iterate_since_last_GC(blk);
2948 }
2950 void
2951 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2952 cl->set_generation(this);
2953 younger_refs_in_space_iterate(_cmsSpace, cl);
2954 cl->reset_generation();
2955 }
2957 void
2958 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2959 if (freelistLock()->owned_by_self()) {
2960 Generation::oop_iterate(mr, cl);
2961 } else {
2962 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2963 Generation::oop_iterate(mr, cl);
2964 }
2965 }
2967 void
2968 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
2969 if (freelistLock()->owned_by_self()) {
2970 Generation::oop_iterate(cl);
2971 } else {
2972 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2973 Generation::oop_iterate(cl);
2974 }
2975 }
2977 void
2978 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2979 if (freelistLock()->owned_by_self()) {
2980 Generation::object_iterate(cl);
2981 } else {
2982 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2983 Generation::object_iterate(cl);
2984 }
2985 }
2987 void
2988 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
2989 }
2991 void
2992 ConcurrentMarkSweepGeneration::post_compact() {
2993 }
2995 void
2996 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2997 // Fix the linear allocation blocks to look like free blocks.
2999 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3000 // are not called when the heap is verified during universe initialization and
3001 // at vm shutdown.
3002 if (freelistLock()->owned_by_self()) {
3003 cmsSpace()->prepare_for_verify();
3004 } else {
3005 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3006 cmsSpace()->prepare_for_verify();
3007 }
3008 }
3010 void
3011 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3012 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3013 // are not called when the heap is verified during universe initialization and
3014 // at vm shutdown.
3015 if (freelistLock()->owned_by_self()) {
3016 cmsSpace()->verify(false /* ignored */);
3017 } else {
3018 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3019 cmsSpace()->verify(false /* ignored */);
3020 }
3021 }
3023 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3024 _cmsGen->verify(allow_dirty);
3025 _permGen->verify(allow_dirty);
3026 }
3028 #ifndef PRODUCT
3029 bool CMSCollector::overflow_list_is_empty() const {
3030 assert(_num_par_pushes >= 0, "Inconsistency");
3031 if (_overflow_list == NULL) {
3032 assert(_num_par_pushes == 0, "Inconsistency");
3033 }
3034 return _overflow_list == NULL;
3035 }
3037 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3038 // merely consolidate assertion checks that appear to occur together frequently.
3039 void CMSCollector::verify_work_stacks_empty() const {
3040 assert(_markStack.isEmpty(), "Marking stack should be empty");
3041 assert(overflow_list_is_empty(), "Overflow list should be empty");
3042 }
3044 void CMSCollector::verify_overflow_empty() const {
3045 assert(overflow_list_is_empty(), "Overflow list should be empty");
3046 assert(no_preserved_marks(), "No preserved marks");
3047 }
3048 #endif // PRODUCT
3050 void CMSCollector::setup_cms_unloading_and_verification_state() {
3051 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052 || VerifyBeforeExit;
3053 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054 | SharedHeap::SO_CodeCache;
3056 if (cms_should_unload_classes()) { // Should unload classes this cycle
3057 remove_root_scanning_option(rso); // Shrink the root set appropriately
3058 set_verifying(should_verify); // Set verification state for this cycle
3059 return; // Nothing else needs to be done at this time
3060 }
3062 // Not unloading classes this cycle
3063 assert(!cms_should_unload_classes(), "Inconsitency!");
3064 if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
3065 // We were not verifying, or we _were_ unloading classes in the last cycle,
3066 // AND some verification options are enabled this cycle; in this case,
3067 // we must make sure that the deadness map is allocated if not already so,
3068 // and cleared (if already allocated previously --
3069 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3070 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3071 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3072 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3073 "permanent generation verification disabled");
3074 return; // Note that we leave verification disabled, so we'll retry this
3075 // allocation next cycle. We _could_ remember this failure
3076 // and skip further attempts and permanently disable verification
3077 // attempts if that is considered more desirable.
3078 }
3079 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3080 "_perm_gen_ver_bit_map inconsistency?");
3081 } else {
3082 perm_gen_verify_bit_map()->clear_all();
3083 }
3084 // Include symbols, strings and code cache elements to prevent their resurrection.
3085 add_root_scanning_option(rso);
3086 set_verifying(true);
3087 } else if (verifying() && !should_verify) {
3088 // We were verifying, but some verification flags got disabled.
3089 set_verifying(false);
3090 // Exclude symbols, strings and code cache elements from root scanning to
3091 // reduce IM and RM pauses.
3092 remove_root_scanning_option(rso);
3093 }
3094 }
3097 #ifndef PRODUCT
3098 HeapWord* CMSCollector::block_start(const void* p) const {
3099 const HeapWord* addr = (HeapWord*)p;
3100 if (_span.contains(p)) {
3101 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3102 return _cmsGen->cmsSpace()->block_start(p);
3103 } else {
3104 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3105 "Inconsistent _span?");
3106 return _permGen->cmsSpace()->block_start(p);
3107 }
3108 }
3109 return NULL;
3110 }
3111 #endif
3113 HeapWord*
3114 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3115 bool tlab,
3116 bool parallel) {
3117 assert(!tlab, "Can't deal with TLAB allocation");
3118 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3119 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3120 CMSExpansionCause::_satisfy_allocation);
3121 if (GCExpandToAllocateDelayMillis > 0) {
3122 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3123 }
3124 return have_lock_and_allocate(word_size, tlab);
3125 }
3127 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3128 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3129 // to CardGeneration and share it...
3130 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3131 CMSExpansionCause::Cause cause)
3132 {
3133 assert_locked_or_safepoint(Heap_lock);
3135 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
3136 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
3137 bool success = false;
3138 if (aligned_expand_bytes > aligned_bytes) {
3139 success = grow_by(aligned_expand_bytes);
3140 }
3141 if (!success) {
3142 success = grow_by(aligned_bytes);
3143 }
3144 if (!success) {
3145 size_t remaining_bytes = _virtual_space.uncommitted_size();
3146 if (remaining_bytes > 0) {
3147 success = grow_by(remaining_bytes);
3148 }
3149 }
3150 if (GC_locker::is_active()) {
3151 if (PrintGC && Verbose) {
3152 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
3153 }
3154 }
3155 // remember why we expanded; this information is used
3156 // by shouldConcurrentCollect() when making decisions on whether to start
3157 // a new CMS cycle.
3158 if (success) {
3159 set_expansion_cause(cause);
3160 if (PrintGCDetails && Verbose) {
3161 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3162 CMSExpansionCause::to_string(cause));
3163 }
3164 }
3165 }
3167 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3168 HeapWord* res = NULL;
3169 MutexLocker x(ParGCRareEvent_lock);
3170 while (true) {
3171 // Expansion by some other thread might make alloc OK now:
3172 res = ps->lab.alloc(word_sz);
3173 if (res != NULL) return res;
3174 // If there's not enough expansion space available, give up.
3175 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3176 return NULL;
3177 }
3178 // Otherwise, we try expansion.
3179 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3180 CMSExpansionCause::_allocate_par_lab);
3181 // Now go around the loop and try alloc again;
3182 // A competing par_promote might beat us to the expansion space,
3183 // so we may go around the loop again if promotion fails agaion.
3184 if (GCExpandToAllocateDelayMillis > 0) {
3185 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3186 }
3187 }
3188 }
3191 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3192 PromotionInfo* promo) {
3193 MutexLocker x(ParGCRareEvent_lock);
3194 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3195 while (true) {
3196 // Expansion by some other thread might make alloc OK now:
3197 if (promo->ensure_spooling_space()) {
3198 assert(promo->has_spooling_space(),
3199 "Post-condition of successful ensure_spooling_space()");
3200 return true;
3201 }
3202 // If there's not enough expansion space available, give up.
3203 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3204 return false;
3205 }
3206 // Otherwise, we try expansion.
3207 expand(refill_size_bytes, MinHeapDeltaBytes,
3208 CMSExpansionCause::_allocate_par_spooling_space);
3209 // Now go around the loop and try alloc again;
3210 // A competing allocation might beat us to the expansion space,
3211 // so we may go around the loop again if allocation fails again.
3212 if (GCExpandToAllocateDelayMillis > 0) {
3213 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3214 }
3215 }
3216 }
3220 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3221 assert_locked_or_safepoint(Heap_lock);
3222 size_t size = ReservedSpace::page_align_size_down(bytes);
3223 if (size > 0) {
3224 shrink_by(size);
3225 }
3226 }
3228 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3229 assert_locked_or_safepoint(Heap_lock);
3230 bool result = _virtual_space.expand_by(bytes);
3231 if (result) {
3232 HeapWord* old_end = _cmsSpace->end();
3233 size_t new_word_size =
3234 heap_word_size(_virtual_space.committed_size());
3235 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3236 _bts->resize(new_word_size); // resize the block offset shared array
3237 Universe::heap()->barrier_set()->resize_covered_region(mr);
3238 // Hmmmm... why doesn't CFLS::set_end verify locking?
3239 // This is quite ugly; FIX ME XXX
3240 _cmsSpace->assert_locked();
3241 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3243 // update the space and generation capacity counters
3244 if (UsePerfData) {
3245 _space_counters->update_capacity();
3246 _gen_counters->update_all();
3247 }
3249 if (Verbose && PrintGC) {
3250 size_t new_mem_size = _virtual_space.committed_size();
3251 size_t old_mem_size = new_mem_size - bytes;
3252 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3253 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3254 }
3255 }
3256 return result;
3257 }
3259 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3260 assert_locked_or_safepoint(Heap_lock);
3261 bool success = true;
3262 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3263 if (remaining_bytes > 0) {
3264 success = grow_by(remaining_bytes);
3265 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3266 }
3267 return success;
3268 }
3270 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3271 assert_locked_or_safepoint(Heap_lock);
3272 assert_lock_strong(freelistLock());
3273 // XXX Fix when compaction is implemented.
3274 warning("Shrinking of CMS not yet implemented");
3275 return;
3276 }
3279 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3280 // phases.
3281 class CMSPhaseAccounting: public StackObj {
3282 public:
3283 CMSPhaseAccounting(CMSCollector *collector,
3284 const char *phase,
3285 bool print_cr = true);
3286 ~CMSPhaseAccounting();
3288 private:
3289 CMSCollector *_collector;
3290 const char *_phase;
3291 elapsedTimer _wallclock;
3292 bool _print_cr;
3294 public:
3295 // Not MT-safe; so do not pass around these StackObj's
3296 // where they may be accessed by other threads.
3297 jlong wallclock_millis() {
3298 assert(_wallclock.is_active(), "Wall clock should not stop");
3299 _wallclock.stop(); // to record time
3300 jlong ret = _wallclock.milliseconds();
3301 _wallclock.start(); // restart
3302 return ret;
3303 }
3304 };
3306 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3307 const char *phase,
3308 bool print_cr) :
3309 _collector(collector), _phase(phase), _print_cr(print_cr) {
3311 if (PrintCMSStatistics != 0) {
3312 _collector->resetYields();
3313 }
3314 if (PrintGCDetails && PrintGCTimeStamps) {
3315 gclog_or_tty->date_stamp(PrintGCDateStamps);
3316 gclog_or_tty->stamp();
3317 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3318 _collector->cmsGen()->short_name(), _phase);
3319 }
3320 _collector->resetTimer();
3321 _wallclock.start();
3322 _collector->startTimer();
3323 }
3325 CMSPhaseAccounting::~CMSPhaseAccounting() {
3326 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3327 _collector->stopTimer();
3328 _wallclock.stop();
3329 if (PrintGCDetails) {
3330 gclog_or_tty->date_stamp(PrintGCDateStamps);
3331 if (PrintGCTimeStamps) {
3332 gclog_or_tty->stamp();
3333 gclog_or_tty->print(": ");
3334 }
3335 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3336 _collector->cmsGen()->short_name(),
3337 _phase, _collector->timerValue(), _wallclock.seconds());
3338 if (_print_cr) {
3339 gclog_or_tty->print_cr("");
3340 }
3341 if (PrintCMSStatistics != 0) {
3342 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3343 _collector->yields());
3344 }
3345 }
3346 }
3348 // CMS work
3350 // Checkpoint the roots into this generation from outside
3351 // this generation. [Note this initial checkpoint need only
3352 // be approximate -- we'll do a catch up phase subsequently.]
3353 void CMSCollector::checkpointRootsInitial(bool asynch) {
3354 assert(_collectorState == InitialMarking, "Wrong collector state");
3355 check_correct_thread_executing();
3356 ReferenceProcessor* rp = ref_processor();
3357 SpecializationStats::clear();
3358 assert(_restart_addr == NULL, "Control point invariant");
3359 if (asynch) {
3360 // acquire locks for subsequent manipulations
3361 MutexLockerEx x(bitMapLock(),
3362 Mutex::_no_safepoint_check_flag);
3363 checkpointRootsInitialWork(asynch);
3364 rp->verify_no_references_recorded();
3365 rp->enable_discovery(); // enable ("weak") refs discovery
3366 _collectorState = Marking;
3367 } else {
3368 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3369 // which recognizes if we are a CMS generation, and doesn't try to turn on
3370 // discovery; verify that they aren't meddling.
3371 assert(!rp->discovery_is_atomic(),
3372 "incorrect setting of discovery predicate");
3373 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3374 "ref discovery for this generation kind");
3375 // already have locks
3376 checkpointRootsInitialWork(asynch);
3377 rp->enable_discovery(); // now enable ("weak") refs discovery
3378 _collectorState = Marking;
3379 }
3380 SpecializationStats::print();
3381 }
3383 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3384 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3385 assert(_collectorState == InitialMarking, "just checking");
3387 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3388 // precede our marking with a collection of all
3389 // younger generations to keep floating garbage to a minimum.
3390 // XXX: we won't do this for now -- it's an optimization to be done later.
3392 // already have locks
3393 assert_lock_strong(bitMapLock());
3394 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3396 // Setup the verification and class unloading state for this
3397 // CMS collection cycle.
3398 setup_cms_unloading_and_verification_state();
3400 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3401 PrintGCDetails && Verbose, true, gclog_or_tty);)
3402 if (UseAdaptiveSizePolicy) {
3403 size_policy()->checkpoint_roots_initial_begin();
3404 }
3406 // Reset all the PLAB chunk arrays if necessary.
3407 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3408 reset_survivor_plab_arrays();
3409 }
3411 ResourceMark rm;
3412 HandleMark hm;
3414 FalseClosure falseClosure;
3415 // In the case of a synchronous collection, we will elide the
3416 // remark step, so it's important to catch all the nmethod oops
3417 // in this step; hence the last argument to the constrcutor below.
3418 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
3419 GenCollectedHeap* gch = GenCollectedHeap::heap();
3421 verify_work_stacks_empty();
3422 verify_overflow_empty();
3424 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3425 // Update the saved marks which may affect the root scans.
3426 gch->save_marks();
3428 // weak reference processing has not started yet.
3429 ref_processor()->set_enqueuing_is_done(false);
3431 {
3432 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3433 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3434 gch->gen_process_strong_roots(_cmsGen->level(),
3435 true, // younger gens are roots
3436 true, // collecting perm gen
3437 SharedHeap::ScanningOption(roots_scanning_options()),
3438 NULL, ¬Older);
3439 }
3441 // Clear mod-union table; it will be dirtied in the prologue of
3442 // CMS generation per each younger generation collection.
3444 assert(_modUnionTable.isAllClear(),
3445 "Was cleared in most recent final checkpoint phase"
3446 " or no bits are set in the gc_prologue before the start of the next "
3447 "subsequent marking phase.");
3449 // Temporarily disabled, since pre/post-consumption closures don't
3450 // care about precleaned cards
3451 #if 0
3452 {
3453 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3454 (HeapWord*)_virtual_space.high());
3455 _ct->ct_bs()->preclean_dirty_cards(mr);
3456 }
3457 #endif
3459 // Save the end of the used_region of the constituent generations
3460 // to be used to limit the extent of sweep in each generation.
3461 save_sweep_limits();
3462 if (UseAdaptiveSizePolicy) {
3463 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3464 }
3465 verify_overflow_empty();
3466 }
3468 bool CMSCollector::markFromRoots(bool asynch) {
3469 // we might be tempted to assert that:
3470 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3471 // "inconsistent argument?");
3472 // However that wouldn't be right, because it's possible that
3473 // a safepoint is indeed in progress as a younger generation
3474 // stop-the-world GC happens even as we mark in this generation.
3475 assert(_collectorState == Marking, "inconsistent state?");
3476 check_correct_thread_executing();
3477 verify_overflow_empty();
3479 bool res;
3480 if (asynch) {
3482 // Start the timers for adaptive size policy for the concurrent phases
3483 // Do it here so that the foreground MS can use the concurrent
3484 // timer since a foreground MS might has the sweep done concurrently
3485 // or STW.
3486 if (UseAdaptiveSizePolicy) {
3487 size_policy()->concurrent_marking_begin();
3488 }
3490 // Weak ref discovery note: We may be discovering weak
3491 // refs in this generation concurrent (but interleaved) with
3492 // weak ref discovery by a younger generation collector.
3494 CMSTokenSyncWithLocks ts(true, bitMapLock());
3495 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3496 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3497 res = markFromRootsWork(asynch);
3498 if (res) {
3499 _collectorState = Precleaning;
3500 } else { // We failed and a foreground collection wants to take over
3501 assert(_foregroundGCIsActive, "internal state inconsistency");
3502 assert(_restart_addr == NULL, "foreground will restart from scratch");
3503 if (PrintGCDetails) {
3504 gclog_or_tty->print_cr("bailing out to foreground collection");
3505 }
3506 }
3507 if (UseAdaptiveSizePolicy) {
3508 size_policy()->concurrent_marking_end();
3509 }
3510 } else {
3511 assert(SafepointSynchronize::is_at_safepoint(),
3512 "inconsistent with asynch == false");
3513 if (UseAdaptiveSizePolicy) {
3514 size_policy()->ms_collection_marking_begin();
3515 }
3516 // already have locks
3517 res = markFromRootsWork(asynch);
3518 _collectorState = FinalMarking;
3519 if (UseAdaptiveSizePolicy) {
3520 GenCollectedHeap* gch = GenCollectedHeap::heap();
3521 size_policy()->ms_collection_marking_end(gch->gc_cause());
3522 }
3523 }
3524 verify_overflow_empty();
3525 return res;
3526 }
3528 bool CMSCollector::markFromRootsWork(bool asynch) {
3529 // iterate over marked bits in bit map, doing a full scan and mark
3530 // from these roots using the following algorithm:
3531 // . if oop is to the right of the current scan pointer,
3532 // mark corresponding bit (we'll process it later)
3533 // . else (oop is to left of current scan pointer)
3534 // push oop on marking stack
3535 // . drain the marking stack
3537 // Note that when we do a marking step we need to hold the
3538 // bit map lock -- recall that direct allocation (by mutators)
3539 // and promotion (by younger generation collectors) is also
3540 // marking the bit map. [the so-called allocate live policy.]
3541 // Because the implementation of bit map marking is not
3542 // robust wrt simultaneous marking of bits in the same word,
3543 // we need to make sure that there is no such interference
3544 // between concurrent such updates.
3546 // already have locks
3547 assert_lock_strong(bitMapLock());
3549 // Clear the revisit stack, just in case there are any
3550 // obsolete contents from a short-circuited previous CMS cycle.
3551 _revisitStack.reset();
3552 verify_work_stacks_empty();
3553 verify_overflow_empty();
3554 assert(_revisitStack.isEmpty(), "tabula rasa");
3556 bool result = false;
3557 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3558 result = do_marking_mt(asynch);
3559 } else {
3560 result = do_marking_st(asynch);
3561 }
3562 return result;
3563 }
3565 // Forward decl
3566 class CMSConcMarkingTask;
3568 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3569 CMSCollector* _collector;
3570 CMSConcMarkingTask* _task;
3571 bool _yield;
3572 protected:
3573 virtual void yield();
3574 public:
3575 // "n_threads" is the number of threads to be terminated.
3576 // "queue_set" is a set of work queues of other threads.
3577 // "collector" is the CMS collector associated with this task terminator.
3578 // "yield" indicates whether we need the gang as a whole to yield.
3579 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3580 CMSCollector* collector, bool yield) :
3581 ParallelTaskTerminator(n_threads, queue_set),
3582 _collector(collector),
3583 _yield(yield) { }
3585 void set_task(CMSConcMarkingTask* task) {
3586 _task = task;
3587 }
3588 };
3590 // MT Concurrent Marking Task
3591 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3592 CMSCollector* _collector;
3593 YieldingFlexibleWorkGang* _workers; // the whole gang
3594 int _n_workers; // requested/desired # workers
3595 bool _asynch;
3596 bool _result;
3597 CompactibleFreeListSpace* _cms_space;
3598 CompactibleFreeListSpace* _perm_space;
3599 HeapWord* _global_finger;
3601 // Exposed here for yielding support
3602 Mutex* const _bit_map_lock;
3604 // The per thread work queues, available here for stealing
3605 OopTaskQueueSet* _task_queues;
3606 CMSConcMarkingTerminator _term;
3608 public:
3609 CMSConcMarkingTask(CMSCollector* collector,
3610 CompactibleFreeListSpace* cms_space,
3611 CompactibleFreeListSpace* perm_space,
3612 bool asynch, int n_workers,
3613 YieldingFlexibleWorkGang* workers,
3614 OopTaskQueueSet* task_queues):
3615 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3616 _collector(collector),
3617 _cms_space(cms_space),
3618 _perm_space(perm_space),
3619 _asynch(asynch), _n_workers(n_workers), _result(true),
3620 _workers(workers), _task_queues(task_queues),
3621 _term(n_workers, task_queues, _collector, asynch),
3622 _bit_map_lock(collector->bitMapLock())
3623 {
3624 assert(n_workers <= workers->total_workers(),
3625 "Else termination won't work correctly today"); // XXX FIX ME!
3626 _requested_size = n_workers;
3627 _term.set_task(this);
3628 assert(_cms_space->bottom() < _perm_space->bottom(),
3629 "Finger incorrectly initialized below");
3630 _global_finger = _cms_space->bottom();
3631 }
3634 OopTaskQueueSet* task_queues() { return _task_queues; }
3636 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3638 HeapWord** global_finger_addr() { return &_global_finger; }
3640 CMSConcMarkingTerminator* terminator() { return &_term; }
3642 void work(int i);
3644 virtual void coordinator_yield(); // stuff done by coordinator
3645 bool result() { return _result; }
3647 void reset(HeapWord* ra) {
3648 _term.reset_for_reuse();
3649 }
3651 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3652 OopTaskQueue* work_q);
3654 private:
3655 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3656 void do_work_steal(int i);
3657 void bump_global_finger(HeapWord* f);
3658 };
3660 void CMSConcMarkingTerminator::yield() {
3661 if (ConcurrentMarkSweepThread::should_yield() &&
3662 !_collector->foregroundGCIsActive() &&
3663 _yield) {
3664 _task->yield();
3665 } else {
3666 ParallelTaskTerminator::yield();
3667 }
3668 }
3670 ////////////////////////////////////////////////////////////////
3671 // Concurrent Marking Algorithm Sketch
3672 ////////////////////////////////////////////////////////////////
3673 // Until all tasks exhausted (both spaces):
3674 // -- claim next available chunk
3675 // -- bump global finger via CAS
3676 // -- find first object that starts in this chunk
3677 // and start scanning bitmap from that position
3678 // -- scan marked objects for oops
3679 // -- CAS-mark target, and if successful:
3680 // . if target oop is above global finger (volatile read)
3681 // nothing to do
3682 // . if target oop is in chunk and above local finger
3683 // then nothing to do
3684 // . else push on work-queue
3685 // -- Deal with possible overflow issues:
3686 // . local work-queue overflow causes stuff to be pushed on
3687 // global (common) overflow queue
3688 // . always first empty local work queue
3689 // . then get a batch of oops from global work queue if any
3690 // . then do work stealing
3691 // -- When all tasks claimed (both spaces)
3692 // and local work queue empty,
3693 // then in a loop do:
3694 // . check global overflow stack; steal a batch of oops and trace
3695 // . try to steal from other threads oif GOS is empty
3696 // . if neither is available, offer termination
3697 // -- Terminate and return result
3698 //
3699 void CMSConcMarkingTask::work(int i) {
3700 elapsedTimer _timer;
3701 ResourceMark rm;
3702 HandleMark hm;
3704 DEBUG_ONLY(_collector->verify_overflow_empty();)
3706 // Before we begin work, our work queue should be empty
3707 assert(work_queue(i)->size() == 0, "Expected to be empty");
3708 // Scan the bitmap covering _cms_space, tracing through grey objects.
3709 _timer.start();
3710 do_scan_and_mark(i, _cms_space);
3711 _timer.stop();
3712 if (PrintCMSStatistics != 0) {
3713 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3714 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3715 }
3717 // ... do the same for the _perm_space
3718 _timer.reset();
3719 _timer.start();
3720 do_scan_and_mark(i, _perm_space);
3721 _timer.stop();
3722 if (PrintCMSStatistics != 0) {
3723 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3724 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3725 }
3727 // ... do work stealing
3728 _timer.reset();
3729 _timer.start();
3730 do_work_steal(i);
3731 _timer.stop();
3732 if (PrintCMSStatistics != 0) {
3733 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3734 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3735 }
3736 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3737 assert(work_queue(i)->size() == 0, "Should have been emptied");
3738 // Note that under the current task protocol, the
3739 // following assertion is true even of the spaces
3740 // expanded since the completion of the concurrent
3741 // marking. XXX This will likely change under a strict
3742 // ABORT semantics.
3743 assert(_global_finger > _cms_space->end() &&
3744 _global_finger >= _perm_space->end(),
3745 "All tasks have been completed");
3746 DEBUG_ONLY(_collector->verify_overflow_empty();)
3747 }
3749 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3750 HeapWord* read = _global_finger;
3751 HeapWord* cur = read;
3752 while (f > read) {
3753 cur = read;
3754 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3755 if (cur == read) {
3756 // our cas succeeded
3757 assert(_global_finger >= f, "protocol consistency");
3758 break;
3759 }
3760 }
3761 }
3763 // This is really inefficient, and should be redone by
3764 // using (not yet available) block-read and -write interfaces to the
3765 // stack and the work_queue. XXX FIX ME !!!
3766 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3767 OopTaskQueue* work_q) {
3768 // Fast lock-free check
3769 if (ovflw_stk->length() == 0) {
3770 return false;
3771 }
3772 assert(work_q->size() == 0, "Shouldn't steal");
3773 MutexLockerEx ml(ovflw_stk->par_lock(),
3774 Mutex::_no_safepoint_check_flag);
3775 // Grab up to 1/4 the size of the work queue
3776 size_t num = MIN2((size_t)work_q->max_elems()/4,
3777 (size_t)ParGCDesiredObjsFromOverflowList);
3778 num = MIN2(num, ovflw_stk->length());
3779 for (int i = (int) num; i > 0; i--) {
3780 oop cur = ovflw_stk->pop();
3781 assert(cur != NULL, "Counted wrong?");
3782 work_q->push(cur);
3783 }
3784 return num > 0;
3785 }
3787 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3788 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3789 int n_tasks = pst->n_tasks();
3790 // We allow that there may be no tasks to do here because
3791 // we are restarting after a stack overflow.
3792 assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
3793 int nth_task = 0;
3795 HeapWord* start = sp->bottom();
3796 size_t chunk_size = sp->marking_task_size();
3797 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3798 // Having claimed the nth task in this space,
3799 // compute the chunk that it corresponds to:
3800 MemRegion span = MemRegion(start + nth_task*chunk_size,
3801 start + (nth_task+1)*chunk_size);
3802 // Try and bump the global finger via a CAS;
3803 // note that we need to do the global finger bump
3804 // _before_ taking the intersection below, because
3805 // the task corresponding to that region will be
3806 // deemed done even if the used_region() expands
3807 // because of allocation -- as it almost certainly will
3808 // during start-up while the threads yield in the
3809 // closure below.
3810 HeapWord* finger = span.end();
3811 bump_global_finger(finger); // atomically
3812 // There are null tasks here corresponding to chunks
3813 // beyond the "top" address of the space.
3814 span = span.intersection(sp->used_region());
3815 if (!span.is_empty()) { // Non-null task
3816 // We want to skip the first object because
3817 // the protocol is to scan any object in its entirety
3818 // that _starts_ in this span; a fortiori, any
3819 // object starting in an earlier span is scanned
3820 // as part of an earlier claimed task.
3821 // Below we use the "careful" version of block_start
3822 // so we do not try to navigate uninitialized objects.
3823 HeapWord* prev_obj = sp->block_start_careful(span.start());
3824 // Below we use a variant of block_size that uses the
3825 // Printezis bits to avoid waiting for allocated
3826 // objects to become initialized/parsable.
3827 while (prev_obj < span.start()) {
3828 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3829 if (sz > 0) {
3830 prev_obj += sz;
3831 } else {
3832 // In this case we may end up doing a bit of redundant
3833 // scanning, but that appears unavoidable, short of
3834 // locking the free list locks; see bug 6324141.
3835 break;
3836 }
3837 }
3838 if (prev_obj < span.end()) {
3839 MemRegion my_span = MemRegion(prev_obj, span.end());
3840 // Do the marking work within a non-empty span --
3841 // the last argument to the constructor indicates whether the
3842 // iteration should be incremental with periodic yields.
3843 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3844 &_collector->_markBitMap,
3845 work_queue(i),
3846 &_collector->_markStack,
3847 &_collector->_revisitStack,
3848 _asynch);
3849 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3850 } // else nothing to do for this task
3851 } // else nothing to do for this task
3852 }
3853 // We'd be tempted to assert here that since there are no
3854 // more tasks left to claim in this space, the global_finger
3855 // must exceed space->top() and a fortiori space->end(). However,
3856 // that would not quite be correct because the bumping of
3857 // global_finger occurs strictly after the claiming of a task,
3858 // so by the time we reach here the global finger may not yet
3859 // have been bumped up by the thread that claimed the last
3860 // task.
3861 pst->all_tasks_completed();
3862 }
3864 class Par_ConcMarkingClosure: public OopClosure {
3865 CMSCollector* _collector;
3866 MemRegion _span;
3867 CMSBitMap* _bit_map;
3868 CMSMarkStack* _overflow_stack;
3869 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3870 OopTaskQueue* _work_queue;
3872 public:
3873 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3874 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3875 _collector(collector),
3876 _span(_collector->_span),
3877 _work_queue(work_queue),
3878 _bit_map(bit_map),
3879 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3881 void do_oop(oop* p);
3882 void trim_queue(size_t max);
3883 void handle_stack_overflow(HeapWord* lost);
3884 };
3886 // Grey object rescan during work stealing phase --
3887 // the salient assumption here is that stolen oops must
3888 // always be initialized, so we do not need to check for
3889 // uninitialized objects before scanning here.
3890 void Par_ConcMarkingClosure::do_oop(oop* p) {
3891 oop this_oop = *p;
3892 assert(this_oop->is_oop_or_null(),
3893 "expected an oop or NULL");
3894 HeapWord* addr = (HeapWord*)this_oop;
3895 // Check if oop points into the CMS generation
3896 // and is not marked
3897 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3898 // a white object ...
3899 // If we manage to "claim" the object, by being the
3900 // first thread to mark it, then we push it on our
3901 // marking stack
3902 if (_bit_map->par_mark(addr)) { // ... now grey
3903 // push on work queue (grey set)
3904 bool simulate_overflow = false;
3905 NOT_PRODUCT(
3906 if (CMSMarkStackOverflowALot &&
3907 _collector->simulate_overflow()) {
3908 // simulate a stack overflow
3909 simulate_overflow = true;
3910 }
3911 )
3912 if (simulate_overflow ||
3913 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
3914 // stack overflow
3915 if (PrintCMSStatistics != 0) {
3916 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3917 SIZE_FORMAT, _overflow_stack->capacity());
3918 }
3919 // We cannot assert that the overflow stack is full because
3920 // it may have been emptied since.
3921 assert(simulate_overflow ||
3922 _work_queue->size() == _work_queue->max_elems(),
3923 "Else push should have succeeded");
3924 handle_stack_overflow(addr);
3925 }
3926 } // Else, some other thread got there first
3927 }
3928 }
3930 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3931 while (_work_queue->size() > max) {
3932 oop new_oop;
3933 if (_work_queue->pop_local(new_oop)) {
3934 assert(new_oop->is_oop(), "Should be an oop");
3935 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3936 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3937 assert(new_oop->is_parsable(), "Should be parsable");
3938 new_oop->oop_iterate(this); // do_oop() above
3939 }
3940 }
3941 }
3943 // Upon stack overflow, we discard (part of) the stack,
3944 // remembering the least address amongst those discarded
3945 // in CMSCollector's _restart_address.
3946 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3947 // We need to do this under a mutex to prevent other
3948 // workers from interfering with the expansion below.
3949 MutexLockerEx ml(_overflow_stack->par_lock(),
3950 Mutex::_no_safepoint_check_flag);
3951 // Remember the least grey address discarded
3952 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3953 _collector->lower_restart_addr(ra);
3954 _overflow_stack->reset(); // discard stack contents
3955 _overflow_stack->expand(); // expand the stack if possible
3956 }
3959 void CMSConcMarkingTask::do_work_steal(int i) {
3960 OopTaskQueue* work_q = work_queue(i);
3961 oop obj_to_scan;
3962 CMSBitMap* bm = &(_collector->_markBitMap);
3963 CMSMarkStack* ovflw = &(_collector->_markStack);
3964 int* seed = _collector->hash_seed(i);
3965 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
3966 while (true) {
3967 cl.trim_queue(0);
3968 assert(work_q->size() == 0, "Should have been emptied above");
3969 if (get_work_from_overflow_stack(ovflw, work_q)) {
3970 // Can't assert below because the work obtained from the
3971 // overflow stack may already have been stolen from us.
3972 // assert(work_q->size() > 0, "Work from overflow stack");
3973 continue;
3974 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3975 assert(obj_to_scan->is_oop(), "Should be an oop");
3976 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3977 obj_to_scan->oop_iterate(&cl);
3978 } else if (terminator()->offer_termination()) {
3979 assert(work_q->size() == 0, "Impossible!");
3980 break;
3981 }
3982 }
3983 }
3985 // This is run by the CMS (coordinator) thread.
3986 void CMSConcMarkingTask::coordinator_yield() {
3987 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3988 "CMS thread should hold CMS token");
3990 // First give up the locks, then yield, then re-lock
3991 // We should probably use a constructor/destructor idiom to
3992 // do this unlock/lock or modify the MutexUnlocker class to
3993 // serve our purpose. XXX
3994 assert_lock_strong(_bit_map_lock);
3995 _bit_map_lock->unlock();
3996 ConcurrentMarkSweepThread::desynchronize(true);
3997 ConcurrentMarkSweepThread::acknowledge_yield_request();
3998 _collector->stopTimer();
3999 if (PrintCMSStatistics != 0) {
4000 _collector->incrementYields();
4001 }
4002 _collector->icms_wait();
4004 // It is possible for whichever thread initiated the yield request
4005 // not to get a chance to wake up and take the bitmap lock between
4006 // this thread releasing it and reacquiring it. So, while the
4007 // should_yield() flag is on, let's sleep for a bit to give the
4008 // other thread a chance to wake up. The limit imposed on the number
4009 // of iterations is defensive, to avoid any unforseen circumstances
4010 // putting us into an infinite loop. Since it's always been this
4011 // (coordinator_yield()) method that was observed to cause the
4012 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4013 // which is by default non-zero. For the other seven methods that
4014 // also perform the yield operation, as are using a different
4015 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4016 // can enable the sleeping for those methods too, if necessary.
4017 // See 6442774.
4018 //
4019 // We really need to reconsider the synchronization between the GC
4020 // thread and the yield-requesting threads in the future and we
4021 // should really use wait/notify, which is the recommended
4022 // way of doing this type of interaction. Additionally, we should
4023 // consolidate the eight methods that do the yield operation and they
4024 // are almost identical into one for better maintenability and
4025 // readability. See 6445193.
4026 //
4027 // Tony 2006.06.29
4028 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4029 ConcurrentMarkSweepThread::should_yield() &&
4030 !CMSCollector::foregroundGCIsActive(); ++i) {
4031 os::sleep(Thread::current(), 1, false);
4032 ConcurrentMarkSweepThread::acknowledge_yield_request();
4033 }
4035 ConcurrentMarkSweepThread::synchronize(true);
4036 _bit_map_lock->lock_without_safepoint_check();
4037 _collector->startTimer();
4038 }
4040 bool CMSCollector::do_marking_mt(bool asynch) {
4041 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4042 // In the future this would be determined ergonomically, based
4043 // on #cpu's, # active mutator threads (and load), and mutation rate.
4044 int num_workers = ParallelCMSThreads;
4046 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4047 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4049 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4050 asynch, num_workers /* number requested XXX */,
4051 conc_workers(), task_queues());
4053 // Since the actual number of workers we get may be different
4054 // from the number we requested above, do we need to do anything different
4055 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4056 // class?? XXX
4057 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4058 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4060 // Refs discovery is already non-atomic.
4061 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4062 // Mutate the Refs discovery so it is MT during the
4063 // multi-threaded marking phase.
4064 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4066 conc_workers()->start_task(&tsk);
4067 while (tsk.yielded()) {
4068 tsk.coordinator_yield();
4069 conc_workers()->continue_task(&tsk);
4070 }
4071 // If the task was aborted, _restart_addr will be non-NULL
4072 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4073 while (_restart_addr != NULL) {
4074 // XXX For now we do not make use of ABORTED state and have not
4075 // yet implemented the right abort semantics (even in the original
4076 // single-threaded CMS case). That needs some more investigation
4077 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4078 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4079 // If _restart_addr is non-NULL, a marking stack overflow
4080 // occured; we need to do a fresh marking iteration from the
4081 // indicated restart address.
4082 if (_foregroundGCIsActive && asynch) {
4083 // We may be running into repeated stack overflows, having
4084 // reached the limit of the stack size, while making very
4085 // slow forward progress. It may be best to bail out and
4086 // let the foreground collector do its job.
4087 // Clear _restart_addr, so that foreground GC
4088 // works from scratch. This avoids the headache of
4089 // a "rescan" which would otherwise be needed because
4090 // of the dirty mod union table & card table.
4091 _restart_addr = NULL;
4092 return false;
4093 }
4094 // Adjust the task to restart from _restart_addr
4095 tsk.reset(_restart_addr);
4096 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4097 _restart_addr);
4098 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4099 _restart_addr);
4100 _restart_addr = NULL;
4101 // Get the workers going again
4102 conc_workers()->start_task(&tsk);
4103 while (tsk.yielded()) {
4104 tsk.coordinator_yield();
4105 conc_workers()->continue_task(&tsk);
4106 }
4107 }
4108 assert(tsk.completed(), "Inconsistency");
4109 assert(tsk.result() == true, "Inconsistency");
4110 return true;
4111 }
4113 bool CMSCollector::do_marking_st(bool asynch) {
4114 ResourceMark rm;
4115 HandleMark hm;
4117 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4118 &_markStack, &_revisitStack, CMSYield && asynch);
4119 // the last argument to iterate indicates whether the iteration
4120 // should be incremental with periodic yields.
4121 _markBitMap.iterate(&markFromRootsClosure);
4122 // If _restart_addr is non-NULL, a marking stack overflow
4123 // occured; we need to do a fresh iteration from the
4124 // indicated restart address.
4125 while (_restart_addr != NULL) {
4126 if (_foregroundGCIsActive && asynch) {
4127 // We may be running into repeated stack overflows, having
4128 // reached the limit of the stack size, while making very
4129 // slow forward progress. It may be best to bail out and
4130 // let the foreground collector do its job.
4131 // Clear _restart_addr, so that foreground GC
4132 // works from scratch. This avoids the headache of
4133 // a "rescan" which would otherwise be needed because
4134 // of the dirty mod union table & card table.
4135 _restart_addr = NULL;
4136 return false; // indicating failure to complete marking
4137 }
4138 // Deal with stack overflow:
4139 // we restart marking from _restart_addr
4140 HeapWord* ra = _restart_addr;
4141 markFromRootsClosure.reset(ra);
4142 _restart_addr = NULL;
4143 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4144 }
4145 return true;
4146 }
4148 void CMSCollector::preclean() {
4149 check_correct_thread_executing();
4150 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4151 verify_work_stacks_empty();
4152 verify_overflow_empty();
4153 _abort_preclean = false;
4154 if (CMSPrecleaningEnabled) {
4155 _eden_chunk_index = 0;
4156 size_t used = get_eden_used();
4157 size_t capacity = get_eden_capacity();
4158 // Don't start sampling unless we will get sufficiently
4159 // many samples.
4160 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4161 * CMSScheduleRemarkEdenPenetration)) {
4162 _start_sampling = true;
4163 } else {
4164 _start_sampling = false;
4165 }
4166 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4167 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4168 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4169 }
4170 CMSTokenSync x(true); // is cms thread
4171 if (CMSPrecleaningEnabled) {
4172 sample_eden();
4173 _collectorState = AbortablePreclean;
4174 } else {
4175 _collectorState = FinalMarking;
4176 }
4177 verify_work_stacks_empty();
4178 verify_overflow_empty();
4179 }
4181 // Try and schedule the remark such that young gen
4182 // occupancy is CMSScheduleRemarkEdenPenetration %.
4183 void CMSCollector::abortable_preclean() {
4184 check_correct_thread_executing();
4185 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4186 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4188 // If Eden's current occupancy is below this threshold,
4189 // immediately schedule the remark; else preclean
4190 // past the next scavenge in an effort to
4191 // schedule the pause as described avove. By choosing
4192 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4193 // we will never do an actual abortable preclean cycle.
4194 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4195 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4196 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4197 // We need more smarts in the abortable preclean
4198 // loop below to deal with cases where allocation
4199 // in young gen is very very slow, and our precleaning
4200 // is running a losing race against a horde of
4201 // mutators intent on flooding us with CMS updates
4202 // (dirty cards).
4203 // One, admittedly dumb, strategy is to give up
4204 // after a certain number of abortable precleaning loops
4205 // or after a certain maximum time. We want to make
4206 // this smarter in the next iteration.
4207 // XXX FIX ME!!! YSR
4208 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4209 while (!(should_abort_preclean() ||
4210 ConcurrentMarkSweepThread::should_terminate())) {
4211 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4212 cumworkdone += workdone;
4213 loops++;
4214 // Voluntarily terminate abortable preclean phase if we have
4215 // been at it for too long.
4216 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4217 loops >= CMSMaxAbortablePrecleanLoops) {
4218 if (PrintGCDetails) {
4219 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4220 }
4221 break;
4222 }
4223 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4224 if (PrintGCDetails) {
4225 gclog_or_tty->print(" CMS: abort preclean due to time ");
4226 }
4227 break;
4228 }
4229 // If we are doing little work each iteration, we should
4230 // take a short break.
4231 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4232 // Sleep for some time, waiting for work to accumulate
4233 stopTimer();
4234 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4235 startTimer();
4236 waited++;
4237 }
4238 }
4239 if (PrintCMSStatistics > 0) {
4240 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4241 loops, waited, cumworkdone);
4242 }
4243 }
4244 CMSTokenSync x(true); // is cms thread
4245 if (_collectorState != Idling) {
4246 assert(_collectorState == AbortablePreclean,
4247 "Spontaneous state transition?");
4248 _collectorState = FinalMarking;
4249 } // Else, a foreground collection completed this CMS cycle.
4250 return;
4251 }
4253 // Respond to an Eden sampling opportunity
4254 void CMSCollector::sample_eden() {
4255 // Make sure a young gc cannot sneak in between our
4256 // reading and recording of a sample.
4257 assert(Thread::current()->is_ConcurrentGC_thread(),
4258 "Only the cms thread may collect Eden samples");
4259 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4260 "Should collect samples while holding CMS token");
4261 if (!_start_sampling) {
4262 return;
4263 }
4264 if (_eden_chunk_array) {
4265 if (_eden_chunk_index < _eden_chunk_capacity) {
4266 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4267 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4268 "Unexpected state of Eden");
4269 // We'd like to check that what we just sampled is an oop-start address;
4270 // however, we cannot do that here since the object may not yet have been
4271 // initialized. So we'll instead do the check when we _use_ this sample
4272 // later.
4273 if (_eden_chunk_index == 0 ||
4274 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4275 _eden_chunk_array[_eden_chunk_index-1])
4276 >= CMSSamplingGrain)) {
4277 _eden_chunk_index++; // commit sample
4278 }
4279 }
4280 }
4281 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4282 size_t used = get_eden_used();
4283 size_t capacity = get_eden_capacity();
4284 assert(used <= capacity, "Unexpected state of Eden");
4285 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4286 _abort_preclean = true;
4287 }
4288 }
4289 }
4292 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4293 assert(_collectorState == Precleaning ||
4294 _collectorState == AbortablePreclean, "incorrect state");
4295 ResourceMark rm;
4296 HandleMark hm;
4297 // Do one pass of scrubbing the discovered reference lists
4298 // to remove any reference objects with strongly-reachable
4299 // referents.
4300 if (clean_refs) {
4301 ReferenceProcessor* rp = ref_processor();
4302 CMSPrecleanRefsYieldClosure yield_cl(this);
4303 assert(rp->span().equals(_span), "Spans should be equal");
4304 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4305 &_markStack);
4306 CMSDrainMarkingStackClosure complete_trace(this,
4307 _span, &_markBitMap, &_markStack,
4308 &keep_alive);
4310 // We don't want this step to interfere with a young
4311 // collection because we don't want to take CPU
4312 // or memory bandwidth away from the young GC threads
4313 // (which may be as many as there are CPUs).
4314 // Note that we don't need to protect ourselves from
4315 // interference with mutators because they can't
4316 // manipulate the discovered reference lists nor affect
4317 // the computed reachability of the referents, the
4318 // only properties manipulated by the precleaning
4319 // of these reference lists.
4320 stopTimer();
4321 CMSTokenSyncWithLocks x(true /* is cms thread */,
4322 bitMapLock());
4323 startTimer();
4324 sample_eden();
4325 // The following will yield to allow foreground
4326 // collection to proceed promptly. XXX YSR:
4327 // The code in this method may need further
4328 // tweaking for better performance and some restructuring
4329 // for cleaner interfaces.
4330 rp->preclean_discovered_references(
4331 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4332 &yield_cl);
4333 }
4335 if (clean_survivor) { // preclean the active survivor space(s)
4336 assert(_young_gen->kind() == Generation::DefNew ||
4337 _young_gen->kind() == Generation::ParNew ||
4338 _young_gen->kind() == Generation::ASParNew,
4339 "incorrect type for cast");
4340 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4341 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4342 &_markBitMap, &_modUnionTable,
4343 &_markStack, &_revisitStack,
4344 true /* precleaning phase */);
4345 stopTimer();
4346 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4347 bitMapLock());
4348 startTimer();
4349 unsigned int before_count =
4350 GenCollectedHeap::heap()->total_collections();
4351 SurvivorSpacePrecleanClosure
4352 sss_cl(this, _span, &_markBitMap, &_markStack,
4353 &pam_cl, before_count, CMSYield);
4354 dng->from()->object_iterate_careful(&sss_cl);
4355 dng->to()->object_iterate_careful(&sss_cl);
4356 }
4357 MarkRefsIntoAndScanClosure
4358 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4359 &_markStack, &_revisitStack, this, CMSYield,
4360 true /* precleaning phase */);
4361 // CAUTION: The following closure has persistent state that may need to
4362 // be reset upon a decrease in the sequence of addresses it
4363 // processes.
4364 ScanMarkedObjectsAgainCarefullyClosure
4365 smoac_cl(this, _span,
4366 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4368 // Preclean dirty cards in ModUnionTable and CardTable using
4369 // appropriate convergence criterion;
4370 // repeat CMSPrecleanIter times unless we find that
4371 // we are losing.
4372 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4373 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4374 "Bad convergence multiplier");
4375 assert(CMSPrecleanThreshold >= 100,
4376 "Unreasonably low CMSPrecleanThreshold");
4378 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4379 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4380 numIter < CMSPrecleanIter;
4381 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4382 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4383 if (CMSPermGenPrecleaningEnabled) {
4384 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4385 }
4386 if (Verbose && PrintGCDetails) {
4387 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4388 }
4389 // Either there are very few dirty cards, so re-mark
4390 // pause will be small anyway, or our pre-cleaning isn't
4391 // that much faster than the rate at which cards are being
4392 // dirtied, so we might as well stop and re-mark since
4393 // precleaning won't improve our re-mark time by much.
4394 if (curNumCards <= CMSPrecleanThreshold ||
4395 (numIter > 0 &&
4396 (curNumCards * CMSPrecleanDenominator >
4397 lastNumCards * CMSPrecleanNumerator))) {
4398 numIter++;
4399 cumNumCards += curNumCards;
4400 break;
4401 }
4402 }
4403 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4404 if (CMSPermGenPrecleaningEnabled) {
4405 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4406 }
4407 cumNumCards += curNumCards;
4408 if (PrintGCDetails && PrintCMSStatistics != 0) {
4409 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4410 curNumCards, cumNumCards, numIter);
4411 }
4412 return cumNumCards; // as a measure of useful work done
4413 }
4415 // PRECLEANING NOTES:
4416 // Precleaning involves:
4417 // . reading the bits of the modUnionTable and clearing the set bits.
4418 // . For the cards corresponding to the set bits, we scan the
4419 // objects on those cards. This means we need the free_list_lock
4420 // so that we can safely iterate over the CMS space when scanning
4421 // for oops.
4422 // . When we scan the objects, we'll be both reading and setting
4423 // marks in the marking bit map, so we'll need the marking bit map.
4424 // . For protecting _collector_state transitions, we take the CGC_lock.
4425 // Note that any races in the reading of of card table entries by the
4426 // CMS thread on the one hand and the clearing of those entries by the
4427 // VM thread or the setting of those entries by the mutator threads on the
4428 // other are quite benign. However, for efficiency it makes sense to keep
4429 // the VM thread from racing with the CMS thread while the latter is
4430 // dirty card info to the modUnionTable. We therefore also use the
4431 // CGC_lock to protect the reading of the card table and the mod union
4432 // table by the CM thread.
4433 // . We run concurrently with mutator updates, so scanning
4434 // needs to be done carefully -- we should not try to scan
4435 // potentially uninitialized objects.
4436 //
4437 // Locking strategy: While holding the CGC_lock, we scan over and
4438 // reset a maximal dirty range of the mod union / card tables, then lock
4439 // the free_list_lock and bitmap lock to do a full marking, then
4440 // release these locks; and repeat the cycle. This allows for a
4441 // certain amount of fairness in the sharing of these locks between
4442 // the CMS collector on the one hand, and the VM thread and the
4443 // mutators on the other.
4445 // NOTE: preclean_mod_union_table() and preclean_card_table()
4446 // further below are largely identical; if you need to modify
4447 // one of these methods, please check the other method too.
4449 size_t CMSCollector::preclean_mod_union_table(
4450 ConcurrentMarkSweepGeneration* gen,
4451 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4452 verify_work_stacks_empty();
4453 verify_overflow_empty();
4455 // strategy: starting with the first card, accumulate contiguous
4456 // ranges of dirty cards; clear these cards, then scan the region
4457 // covered by these cards.
4459 // Since all of the MUT is committed ahead, we can just use
4460 // that, in case the generations expand while we are precleaning.
4461 // It might also be fine to just use the committed part of the
4462 // generation, but we might potentially miss cards when the
4463 // generation is rapidly expanding while we are in the midst
4464 // of precleaning.
4465 HeapWord* startAddr = gen->reserved().start();
4466 HeapWord* endAddr = gen->reserved().end();
4468 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4470 size_t numDirtyCards, cumNumDirtyCards;
4471 HeapWord *nextAddr, *lastAddr;
4472 for (cumNumDirtyCards = numDirtyCards = 0,
4473 nextAddr = lastAddr = startAddr;
4474 nextAddr < endAddr;
4475 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4477 ResourceMark rm;
4478 HandleMark hm;
4480 MemRegion dirtyRegion;
4481 {
4482 stopTimer();
4483 CMSTokenSync ts(true);
4484 startTimer();
4485 sample_eden();
4486 // Get dirty region starting at nextOffset (inclusive),
4487 // simultaneously clearing it.
4488 dirtyRegion =
4489 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4490 assert(dirtyRegion.start() >= nextAddr,
4491 "returned region inconsistent?");
4492 }
4493 // Remember where the next search should begin.
4494 // The returned region (if non-empty) is a right open interval,
4495 // so lastOffset is obtained from the right end of that
4496 // interval.
4497 lastAddr = dirtyRegion.end();
4498 // Should do something more transparent and less hacky XXX
4499 numDirtyCards =
4500 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4502 // We'll scan the cards in the dirty region (with periodic
4503 // yields for foreground GC as needed).
4504 if (!dirtyRegion.is_empty()) {
4505 assert(numDirtyCards > 0, "consistency check");
4506 HeapWord* stop_point = NULL;
4507 {
4508 stopTimer();
4509 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4510 bitMapLock());
4511 startTimer();
4512 verify_work_stacks_empty();
4513 verify_overflow_empty();
4514 sample_eden();
4515 stop_point =
4516 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4517 }
4518 if (stop_point != NULL) {
4519 // The careful iteration stopped early either because it found an
4520 // uninitialized object, or because we were in the midst of an
4521 // "abortable preclean", which should now be aborted. Redirty
4522 // the bits corresponding to the partially-scanned or unscanned
4523 // cards. We'll either restart at the next block boundary or
4524 // abort the preclean.
4525 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4526 (_collectorState == AbortablePreclean && should_abort_preclean()),
4527 "Unparsable objects should only be in perm gen.");
4529 stopTimer();
4530 CMSTokenSyncWithLocks ts(true, bitMapLock());
4531 startTimer();
4532 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4533 if (should_abort_preclean()) {
4534 break; // out of preclean loop
4535 } else {
4536 // Compute the next address at which preclean should pick up;
4537 // might need bitMapLock in order to read P-bits.
4538 lastAddr = next_card_start_after_block(stop_point);
4539 }
4540 }
4541 } else {
4542 assert(lastAddr == endAddr, "consistency check");
4543 assert(numDirtyCards == 0, "consistency check");
4544 break;
4545 }
4546 }
4547 verify_work_stacks_empty();
4548 verify_overflow_empty();
4549 return cumNumDirtyCards;
4550 }
4552 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4553 // below are largely identical; if you need to modify
4554 // one of these methods, please check the other method too.
4556 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4557 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4558 // strategy: it's similar to precleamModUnionTable above, in that
4559 // we accumulate contiguous ranges of dirty cards, mark these cards
4560 // precleaned, then scan the region covered by these cards.
4561 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4562 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4564 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4566 size_t numDirtyCards, cumNumDirtyCards;
4567 HeapWord *lastAddr, *nextAddr;
4569 for (cumNumDirtyCards = numDirtyCards = 0,
4570 nextAddr = lastAddr = startAddr;
4571 nextAddr < endAddr;
4572 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4574 ResourceMark rm;
4575 HandleMark hm;
4577 MemRegion dirtyRegion;
4578 {
4579 // See comments in "Precleaning notes" above on why we
4580 // do this locking. XXX Could the locking overheads be
4581 // too high when dirty cards are sparse? [I don't think so.]
4582 stopTimer();
4583 CMSTokenSync x(true); // is cms thread
4584 startTimer();
4585 sample_eden();
4586 // Get and clear dirty region from card table
4587 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
4588 MemRegion(nextAddr, endAddr));
4589 assert(dirtyRegion.start() >= nextAddr,
4590 "returned region inconsistent?");
4591 }
4592 lastAddr = dirtyRegion.end();
4593 numDirtyCards =
4594 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4596 if (!dirtyRegion.is_empty()) {
4597 stopTimer();
4598 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4599 startTimer();
4600 sample_eden();
4601 verify_work_stacks_empty();
4602 verify_overflow_empty();
4603 HeapWord* stop_point =
4604 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4605 if (stop_point != NULL) {
4606 // The careful iteration stopped early because it found an
4607 // uninitialized object. Redirty the bits corresponding to the
4608 // partially-scanned or unscanned cards, and start again at the
4609 // next block boundary.
4610 assert(CMSPermGenPrecleaningEnabled ||
4611 (_collectorState == AbortablePreclean && should_abort_preclean()),
4612 "Unparsable objects should only be in perm gen.");
4613 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4614 if (should_abort_preclean()) {
4615 break; // out of preclean loop
4616 } else {
4617 // Compute the next address at which preclean should pick up.
4618 lastAddr = next_card_start_after_block(stop_point);
4619 }
4620 }
4621 } else {
4622 break;
4623 }
4624 }
4625 verify_work_stacks_empty();
4626 verify_overflow_empty();
4627 return cumNumDirtyCards;
4628 }
4630 void CMSCollector::checkpointRootsFinal(bool asynch,
4631 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4632 assert(_collectorState == FinalMarking, "incorrect state transition?");
4633 check_correct_thread_executing();
4634 // world is stopped at this checkpoint
4635 assert(SafepointSynchronize::is_at_safepoint(),
4636 "world should be stopped");
4637 verify_work_stacks_empty();
4638 verify_overflow_empty();
4640 SpecializationStats::clear();
4641 if (PrintGCDetails) {
4642 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4643 _young_gen->used() / K,
4644 _young_gen->capacity() / K);
4645 }
4646 if (asynch) {
4647 if (CMSScavengeBeforeRemark) {
4648 GenCollectedHeap* gch = GenCollectedHeap::heap();
4649 // Temporarily set flag to false, GCH->do_collection will
4650 // expect it to be false and set to true
4651 FlagSetting fl(gch->_is_gc_active, false);
4652 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4653 PrintGCDetails && Verbose, true, gclog_or_tty);)
4654 int level = _cmsGen->level() - 1;
4655 if (level >= 0) {
4656 gch->do_collection(true, // full (i.e. force, see below)
4657 false, // !clear_all_soft_refs
4658 0, // size
4659 false, // is_tlab
4660 level // max_level
4661 );
4662 }
4663 }
4664 FreelistLocker x(this);
4665 MutexLockerEx y(bitMapLock(),
4666 Mutex::_no_safepoint_check_flag);
4667 assert(!init_mark_was_synchronous, "but that's impossible!");
4668 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4669 } else {
4670 // already have all the locks
4671 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4672 init_mark_was_synchronous);
4673 }
4674 verify_work_stacks_empty();
4675 verify_overflow_empty();
4676 SpecializationStats::print();
4677 }
4679 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4680 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4682 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4684 assert(haveFreelistLocks(), "must have free list locks");
4685 assert_lock_strong(bitMapLock());
4687 if (UseAdaptiveSizePolicy) {
4688 size_policy()->checkpoint_roots_final_begin();
4689 }
4691 ResourceMark rm;
4692 HandleMark hm;
4694 GenCollectedHeap* gch = GenCollectedHeap::heap();
4696 if (cms_should_unload_classes()) {
4697 CodeCache::gc_prologue();
4698 }
4699 assert(haveFreelistLocks(), "must have free list locks");
4700 assert_lock_strong(bitMapLock());
4702 if (!init_mark_was_synchronous) {
4703 // We might assume that we need not fill TLAB's when
4704 // CMSScavengeBeforeRemark is set, because we may have just done
4705 // a scavenge which would have filled all TLAB's -- and besides
4706 // Eden would be empty. This however may not always be the case --
4707 // for instance although we asked for a scavenge, it may not have
4708 // happened because of a JNI critical section. We probably need
4709 // a policy for deciding whether we can in that case wait until
4710 // the critical section releases and then do the remark following
4711 // the scavenge, and skip it here. In the absence of that policy,
4712 // or of an indication of whether the scavenge did indeed occur,
4713 // we cannot rely on TLAB's having been filled and must do
4714 // so here just in case a scavenge did not happen.
4715 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4716 // Update the saved marks which may affect the root scans.
4717 gch->save_marks();
4719 {
4720 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4722 // Note on the role of the mod union table:
4723 // Since the marker in "markFromRoots" marks concurrently with
4724 // mutators, it is possible for some reachable objects not to have been
4725 // scanned. For instance, an only reference to an object A was
4726 // placed in object B after the marker scanned B. Unless B is rescanned,
4727 // A would be collected. Such updates to references in marked objects
4728 // are detected via the mod union table which is the set of all cards
4729 // dirtied since the first checkpoint in this GC cycle and prior to
4730 // the most recent young generation GC, minus those cleaned up by the
4731 // concurrent precleaning.
4732 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4733 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4734 do_remark_parallel();
4735 } else {
4736 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4737 gclog_or_tty);
4738 do_remark_non_parallel();
4739 }
4740 }
4741 } else {
4742 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4743 // The initial mark was stop-world, so there's no rescanning to
4744 // do; go straight on to the next step below.
4745 }
4746 verify_work_stacks_empty();
4747 verify_overflow_empty();
4749 {
4750 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4751 refProcessingWork(asynch, clear_all_soft_refs);
4752 }
4753 verify_work_stacks_empty();
4754 verify_overflow_empty();
4756 if (cms_should_unload_classes()) {
4757 CodeCache::gc_epilogue();
4758 }
4760 // If we encountered any (marking stack / work queue) overflow
4761 // events during the current CMS cycle, take appropriate
4762 // remedial measures, where possible, so as to try and avoid
4763 // recurrence of that condition.
4764 assert(_markStack.isEmpty(), "No grey objects");
4765 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4766 _ser_kac_ovflw;
4767 if (ser_ovflw > 0) {
4768 if (PrintCMSStatistics != 0) {
4769 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4770 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4771 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4772 _ser_kac_ovflw);
4773 }
4774 _markStack.expand();
4775 _ser_pmc_remark_ovflw = 0;
4776 _ser_pmc_preclean_ovflw = 0;
4777 _ser_kac_ovflw = 0;
4778 }
4779 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4780 if (PrintCMSStatistics != 0) {
4781 gclog_or_tty->print_cr("Work queue overflow (benign) "
4782 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4783 _par_pmc_remark_ovflw, _par_kac_ovflw);
4784 }
4785 _par_pmc_remark_ovflw = 0;
4786 _par_kac_ovflw = 0;
4787 }
4788 if (PrintCMSStatistics != 0) {
4789 if (_markStack._hit_limit > 0) {
4790 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4791 _markStack._hit_limit);
4792 }
4793 if (_markStack._failed_double > 0) {
4794 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4795 " current capacity "SIZE_FORMAT,
4796 _markStack._failed_double,
4797 _markStack.capacity());
4798 }
4799 }
4800 _markStack._hit_limit = 0;
4801 _markStack._failed_double = 0;
4803 if ((VerifyAfterGC || VerifyDuringGC) &&
4804 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4805 verify_after_remark();
4806 }
4808 // Change under the freelistLocks.
4809 _collectorState = Sweeping;
4810 // Call isAllClear() under bitMapLock
4811 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4812 " final marking");
4813 if (UseAdaptiveSizePolicy) {
4814 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4815 }
4816 }
4818 // Parallel remark task
4819 class CMSParRemarkTask: public AbstractGangTask {
4820 CMSCollector* _collector;
4821 WorkGang* _workers;
4822 int _n_workers;
4823 CompactibleFreeListSpace* _cms_space;
4824 CompactibleFreeListSpace* _perm_space;
4826 // The per-thread work queues, available here for stealing.
4827 OopTaskQueueSet* _task_queues;
4828 ParallelTaskTerminator _term;
4830 public:
4831 CMSParRemarkTask(CMSCollector* collector,
4832 CompactibleFreeListSpace* cms_space,
4833 CompactibleFreeListSpace* perm_space,
4834 int n_workers, WorkGang* workers,
4835 OopTaskQueueSet* task_queues):
4836 AbstractGangTask("Rescan roots and grey objects in parallel"),
4837 _collector(collector),
4838 _cms_space(cms_space), _perm_space(perm_space),
4839 _n_workers(n_workers),
4840 _workers(workers),
4841 _task_queues(task_queues),
4842 _term(workers->total_workers(), task_queues) { }
4844 OopTaskQueueSet* task_queues() { return _task_queues; }
4846 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4848 ParallelTaskTerminator* terminator() { return &_term; }
4850 void work(int i);
4852 private:
4853 // Work method in support of parallel rescan ... of young gen spaces
4854 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4855 ContiguousSpace* space,
4856 HeapWord** chunk_array, size_t chunk_top);
4858 // ... of dirty cards in old space
4859 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4860 Par_MarkRefsIntoAndScanClosure* cl);
4862 // ... work stealing for the above
4863 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4864 };
4866 void CMSParRemarkTask::work(int i) {
4867 elapsedTimer _timer;
4868 ResourceMark rm;
4869 HandleMark hm;
4871 // ---------- rescan from roots --------------
4872 _timer.start();
4873 GenCollectedHeap* gch = GenCollectedHeap::heap();
4874 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4875 _collector->_span, _collector->ref_processor(),
4876 &(_collector->_markBitMap),
4877 work_queue(i), &(_collector->_revisitStack));
4879 // Rescan young gen roots first since these are likely
4880 // coarsely partitioned and may, on that account, constitute
4881 // the critical path; thus, it's best to start off that
4882 // work first.
4883 // ---------- young gen roots --------------
4884 {
4885 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
4886 EdenSpace* eden_space = dng->eden();
4887 ContiguousSpace* from_space = dng->from();
4888 ContiguousSpace* to_space = dng->to();
4890 HeapWord** eca = _collector->_eden_chunk_array;
4891 size_t ect = _collector->_eden_chunk_index;
4892 HeapWord** sca = _collector->_survivor_chunk_array;
4893 size_t sct = _collector->_survivor_chunk_index;
4895 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4896 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4898 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
4899 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
4900 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
4902 _timer.stop();
4903 if (PrintCMSStatistics != 0) {
4904 gclog_or_tty->print_cr(
4905 "Finished young gen rescan work in %dth thread: %3.3f sec",
4906 i, _timer.seconds());
4907 }
4908 }
4910 // ---------- remaining roots --------------
4911 _timer.reset();
4912 _timer.start();
4913 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
4914 false, // yg was scanned above
4915 true, // collecting perm gen
4916 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4917 NULL, &par_mrias_cl);
4918 _timer.stop();
4919 if (PrintCMSStatistics != 0) {
4920 gclog_or_tty->print_cr(
4921 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4922 i, _timer.seconds());
4923 }
4925 // ---------- rescan dirty cards ------------
4926 _timer.reset();
4927 _timer.start();
4929 // Do the rescan tasks for each of the two spaces
4930 // (cms_space and perm_space) in turn.
4931 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
4932 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
4933 _timer.stop();
4934 if (PrintCMSStatistics != 0) {
4935 gclog_or_tty->print_cr(
4936 "Finished dirty card rescan work in %dth thread: %3.3f sec",
4937 i, _timer.seconds());
4938 }
4940 // ---------- steal work from other threads ...
4941 // ---------- ... and drain overflow list.
4942 _timer.reset();
4943 _timer.start();
4944 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
4945 _timer.stop();
4946 if (PrintCMSStatistics != 0) {
4947 gclog_or_tty->print_cr(
4948 "Finished work stealing in %dth thread: %3.3f sec",
4949 i, _timer.seconds());
4950 }
4951 }
4953 void
4954 CMSParRemarkTask::do_young_space_rescan(int i,
4955 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
4956 HeapWord** chunk_array, size_t chunk_top) {
4957 // Until all tasks completed:
4958 // . claim an unclaimed task
4959 // . compute region boundaries corresponding to task claimed
4960 // using chunk_array
4961 // . par_oop_iterate(cl) over that region
4963 ResourceMark rm;
4964 HandleMark hm;
4966 SequentialSubTasksDone* pst = space->par_seq_tasks();
4967 assert(pst->valid(), "Uninitialized use?");
4969 int nth_task = 0;
4970 int n_tasks = pst->n_tasks();
4972 HeapWord *start, *end;
4973 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4974 // We claimed task # nth_task; compute its boundaries.
4975 if (chunk_top == 0) { // no samples were taken
4976 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
4977 start = space->bottom();
4978 end = space->top();
4979 } else if (nth_task == 0) {
4980 start = space->bottom();
4981 end = chunk_array[nth_task];
4982 } else if (nth_task < (jint)chunk_top) {
4983 assert(nth_task >= 1, "Control point invariant");
4984 start = chunk_array[nth_task - 1];
4985 end = chunk_array[nth_task];
4986 } else {
4987 assert(nth_task == (jint)chunk_top, "Control point invariant");
4988 start = chunk_array[chunk_top - 1];
4989 end = space->top();
4990 }
4991 MemRegion mr(start, end);
4992 // Verify that mr is in space
4993 assert(mr.is_empty() || space->used_region().contains(mr),
4994 "Should be in space");
4995 // Verify that "start" is an object boundary
4996 assert(mr.is_empty() || oop(mr.start())->is_oop(),
4997 "Should be an oop");
4998 space->par_oop_iterate(mr, cl);
4999 }
5000 pst->all_tasks_completed();
5001 }
5003 void
5004 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5005 CompactibleFreeListSpace* sp, int i,
5006 Par_MarkRefsIntoAndScanClosure* cl) {
5007 // Until all tasks completed:
5008 // . claim an unclaimed task
5009 // . compute region boundaries corresponding to task claimed
5010 // . transfer dirty bits ct->mut for that region
5011 // . apply rescanclosure to dirty mut bits for that region
5013 ResourceMark rm;
5014 HandleMark hm;
5016 OopTaskQueue* work_q = work_queue(i);
5017 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5018 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5019 // CAUTION: This closure has state that persists across calls to
5020 // the work method dirty_range_iterate_clear() in that it has
5021 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5022 // use of that state in the imbedded UpwardsObjectClosure instance
5023 // assumes that the cards are always iterated (even if in parallel
5024 // by several threads) in monotonically increasing order per each
5025 // thread. This is true of the implementation below which picks
5026 // card ranges (chunks) in monotonically increasing order globally
5027 // and, a-fortiori, in monotonically increasing order per thread
5028 // (the latter order being a subsequence of the former).
5029 // If the work code below is ever reorganized into a more chaotic
5030 // work-partitioning form than the current "sequential tasks"
5031 // paradigm, the use of that persistent state will have to be
5032 // revisited and modified appropriately. See also related
5033 // bug 4756801 work on which should examine this code to make
5034 // sure that the changes there do not run counter to the
5035 // assumptions made here and necessary for correctness and
5036 // efficiency. Note also that this code might yield inefficient
5037 // behaviour in the case of very large objects that span one or
5038 // more work chunks. Such objects would potentially be scanned
5039 // several times redundantly. Work on 4756801 should try and
5040 // address that performance anomaly if at all possible. XXX
5041 MemRegion full_span = _collector->_span;
5042 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5043 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5044 MarkFromDirtyCardsClosure
5045 greyRescanClosure(_collector, full_span, // entire span of interest
5046 sp, bm, work_q, rs, cl);
5048 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5049 assert(pst->valid(), "Uninitialized use?");
5050 int nth_task = 0;
5051 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5052 MemRegion span = sp->used_region();
5053 HeapWord* start_addr = span.start();
5054 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5055 alignment);
5056 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5057 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5058 start_addr, "Check alignment");
5059 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5060 chunk_size, "Check alignment");
5062 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5063 // Having claimed the nth_task, compute corresponding mem-region,
5064 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5065 // The alignment restriction ensures that we do not need any
5066 // synchronization with other gang-workers while setting or
5067 // clearing bits in thus chunk of the MUT.
5068 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5069 start_addr + (nth_task+1)*chunk_size);
5070 // The last chunk's end might be way beyond end of the
5071 // used region. In that case pull back appropriately.
5072 if (this_span.end() > end_addr) {
5073 this_span.set_end(end_addr);
5074 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5075 }
5076 // Iterate over the dirty cards covering this chunk, marking them
5077 // precleaned, and setting the corresponding bits in the mod union
5078 // table. Since we have been careful to partition at Card and MUT-word
5079 // boundaries no synchronization is needed between parallel threads.
5080 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5081 &modUnionClosure);
5083 // Having transferred these marks into the modUnionTable,
5084 // rescan the marked objects on the dirty cards in the modUnionTable.
5085 // Even if this is at a synchronous collection, the initial marking
5086 // may have been done during an asynchronous collection so there
5087 // may be dirty bits in the mod-union table.
5088 _collector->_modUnionTable.dirty_range_iterate_clear(
5089 this_span, &greyRescanClosure);
5090 _collector->_modUnionTable.verifyNoOneBitsInRange(
5091 this_span.start(),
5092 this_span.end());
5093 }
5094 pst->all_tasks_completed(); // declare that i am done
5095 }
5097 // . see if we can share work_queues with ParNew? XXX
5098 void
5099 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5100 int* seed) {
5101 OopTaskQueue* work_q = work_queue(i);
5102 NOT_PRODUCT(int num_steals = 0;)
5103 oop obj_to_scan;
5104 CMSBitMap* bm = &(_collector->_markBitMap);
5105 size_t num_from_overflow_list =
5106 MIN2((size_t)work_q->max_elems()/4,
5107 (size_t)ParGCDesiredObjsFromOverflowList);
5109 while (true) {
5110 // Completely finish any left over work from (an) earlier round(s)
5111 cl->trim_queue(0);
5112 // Now check if there's any work in the overflow list
5113 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5114 work_q)) {
5115 // found something in global overflow list;
5116 // not yet ready to go stealing work from others.
5117 // We'd like to assert(work_q->size() != 0, ...)
5118 // because we just took work from the overflow list,
5119 // but of course we can't since all of that could have
5120 // been already stolen from us.
5121 // "He giveth and He taketh away."
5122 continue;
5123 }
5124 // Verify that we have no work before we resort to stealing
5125 assert(work_q->size() == 0, "Have work, shouldn't steal");
5126 // Try to steal from other queues that have work
5127 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5128 NOT_PRODUCT(num_steals++;)
5129 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5130 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5131 // Do scanning work
5132 obj_to_scan->oop_iterate(cl);
5133 // Loop around, finish this work, and try to steal some more
5134 } else if (terminator()->offer_termination()) {
5135 break; // nirvana from the infinite cycle
5136 }
5137 }
5138 NOT_PRODUCT(
5139 if (PrintCMSStatistics != 0) {
5140 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5141 }
5142 )
5143 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5144 "Else our work is not yet done");
5145 }
5147 // Return a thread-local PLAB recording array, as appropriate.
5148 void* CMSCollector::get_data_recorder(int thr_num) {
5149 if (_survivor_plab_array != NULL &&
5150 (CMSPLABRecordAlways ||
5151 (_collectorState > Marking && _collectorState < FinalMarking))) {
5152 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5153 ChunkArray* ca = &_survivor_plab_array[thr_num];
5154 ca->reset(); // clear it so that fresh data is recorded
5155 return (void*) ca;
5156 } else {
5157 return NULL;
5158 }
5159 }
5161 // Reset all the thread-local PLAB recording arrays
5162 void CMSCollector::reset_survivor_plab_arrays() {
5163 for (uint i = 0; i < ParallelGCThreads; i++) {
5164 _survivor_plab_array[i].reset();
5165 }
5166 }
5168 // Merge the per-thread plab arrays into the global survivor chunk
5169 // array which will provide the partitioning of the survivor space
5170 // for CMS rescan.
5171 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5172 assert(_survivor_plab_array != NULL, "Error");
5173 assert(_survivor_chunk_array != NULL, "Error");
5174 assert(_collectorState == FinalMarking, "Error");
5175 for (uint j = 0; j < ParallelGCThreads; j++) {
5176 _cursor[j] = 0;
5177 }
5178 HeapWord* top = surv->top();
5179 size_t i;
5180 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5181 HeapWord* min_val = top; // Higher than any PLAB address
5182 uint min_tid = 0; // position of min_val this round
5183 for (uint j = 0; j < ParallelGCThreads; j++) {
5184 ChunkArray* cur_sca = &_survivor_plab_array[j];
5185 if (_cursor[j] == cur_sca->end()) {
5186 continue;
5187 }
5188 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5189 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5190 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5191 if (cur_val < min_val) {
5192 min_tid = j;
5193 min_val = cur_val;
5194 } else {
5195 assert(cur_val < top, "All recorded addresses should be less");
5196 }
5197 }
5198 // At this point min_val and min_tid are respectively
5199 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5200 // and the thread (j) that witnesses that address.
5201 // We record this address in the _survivor_chunk_array[i]
5202 // and increment _cursor[min_tid] prior to the next round i.
5203 if (min_val == top) {
5204 break;
5205 }
5206 _survivor_chunk_array[i] = min_val;
5207 _cursor[min_tid]++;
5208 }
5209 // We are all done; record the size of the _survivor_chunk_array
5210 _survivor_chunk_index = i; // exclusive: [0, i)
5211 if (PrintCMSStatistics > 0) {
5212 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5213 }
5214 // Verify that we used up all the recorded entries
5215 #ifdef ASSERT
5216 size_t total = 0;
5217 for (uint j = 0; j < ParallelGCThreads; j++) {
5218 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5219 total += _cursor[j];
5220 }
5221 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5222 // Check that the merged array is in sorted order
5223 if (total > 0) {
5224 for (size_t i = 0; i < total - 1; i++) {
5225 if (PrintCMSStatistics > 0) {
5226 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5227 i, _survivor_chunk_array[i]);
5228 }
5229 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5230 "Not sorted");
5231 }
5232 }
5233 #endif // ASSERT
5234 }
5236 // Set up the space's par_seq_tasks structure for work claiming
5237 // for parallel rescan of young gen.
5238 // See ParRescanTask where this is currently used.
5239 void
5240 CMSCollector::
5241 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5242 assert(n_threads > 0, "Unexpected n_threads argument");
5243 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5245 // Eden space
5246 {
5247 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5248 assert(!pst->valid(), "Clobbering existing data?");
5249 // Each valid entry in [0, _eden_chunk_index) represents a task.
5250 size_t n_tasks = _eden_chunk_index + 1;
5251 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5252 pst->set_par_threads(n_threads);
5253 pst->set_n_tasks((int)n_tasks);
5254 }
5256 // Merge the survivor plab arrays into _survivor_chunk_array
5257 if (_survivor_plab_array != NULL) {
5258 merge_survivor_plab_arrays(dng->from());
5259 } else {
5260 assert(_survivor_chunk_index == 0, "Error");
5261 }
5263 // To space
5264 {
5265 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5266 assert(!pst->valid(), "Clobbering existing data?");
5267 pst->set_par_threads(n_threads);
5268 pst->set_n_tasks(1);
5269 assert(pst->valid(), "Error");
5270 }
5272 // From space
5273 {
5274 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5275 assert(!pst->valid(), "Clobbering existing data?");
5276 size_t n_tasks = _survivor_chunk_index + 1;
5277 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5278 pst->set_par_threads(n_threads);
5279 pst->set_n_tasks((int)n_tasks);
5280 assert(pst->valid(), "Error");
5281 }
5282 }
5284 // Parallel version of remark
5285 void CMSCollector::do_remark_parallel() {
5286 GenCollectedHeap* gch = GenCollectedHeap::heap();
5287 WorkGang* workers = gch->workers();
5288 assert(workers != NULL, "Need parallel worker threads.");
5289 int n_workers = workers->total_workers();
5290 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5291 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5293 CMSParRemarkTask tsk(this,
5294 cms_space, perm_space,
5295 n_workers, workers, task_queues());
5297 // Set up for parallel process_strong_roots work.
5298 gch->set_par_threads(n_workers);
5299 gch->change_strong_roots_parity();
5300 // We won't be iterating over the cards in the card table updating
5301 // the younger_gen cards, so we shouldn't call the following else
5302 // the verification code as well as subsequent younger_refs_iterate
5303 // code would get confused. XXX
5304 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5306 // The young gen rescan work will not be done as part of
5307 // process_strong_roots (which currently doesn't knw how to
5308 // parallelize such a scan), but rather will be broken up into
5309 // a set of parallel tasks (via the sampling that the [abortable]
5310 // preclean phase did of EdenSpace, plus the [two] tasks of
5311 // scanning the [two] survivor spaces. Further fine-grain
5312 // parallelization of the scanning of the survivor spaces
5313 // themselves, and of precleaning of the younger gen itself
5314 // is deferred to the future.
5315 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5317 // The dirty card rescan work is broken up into a "sequence"
5318 // of parallel tasks (per constituent space) that are dynamically
5319 // claimed by the parallel threads.
5320 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5321 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5323 // It turns out that even when we're using 1 thread, doing the work in a
5324 // separate thread causes wide variance in run times. We can't help this
5325 // in the multi-threaded case, but we special-case n=1 here to get
5326 // repeatable measurements of the 1-thread overhead of the parallel code.
5327 if (n_workers > 1) {
5328 // Make refs discovery MT-safe
5329 ReferenceProcessorMTMutator mt(ref_processor(), true);
5330 workers->run_task(&tsk);
5331 } else {
5332 tsk.work(0);
5333 }
5334 gch->set_par_threads(0); // 0 ==> non-parallel.
5335 // restore, single-threaded for now, any preserved marks
5336 // as a result of work_q overflow
5337 restore_preserved_marks_if_any();
5338 }
5340 // Non-parallel version of remark
5341 void CMSCollector::do_remark_non_parallel() {
5342 ResourceMark rm;
5343 HandleMark hm;
5344 GenCollectedHeap* gch = GenCollectedHeap::heap();
5345 MarkRefsIntoAndScanClosure
5346 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5347 &_markStack, &_revisitStack, this,
5348 false /* should_yield */, false /* not precleaning */);
5349 MarkFromDirtyCardsClosure
5350 markFromDirtyCardsClosure(this, _span,
5351 NULL, // space is set further below
5352 &_markBitMap, &_markStack, &_revisitStack,
5353 &mrias_cl);
5354 {
5355 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5356 // Iterate over the dirty cards, marking them precleaned, and
5357 // setting the corresponding bits in the mod union table.
5358 {
5359 ModUnionClosure modUnionClosure(&_modUnionTable);
5360 _ct->ct_bs()->dirty_card_iterate(
5361 _cmsGen->used_region(),
5362 &modUnionClosure);
5363 _ct->ct_bs()->dirty_card_iterate(
5364 _permGen->used_region(),
5365 &modUnionClosure);
5366 }
5367 // Having transferred these marks into the modUnionTable, we just need
5368 // to rescan the marked objects on the dirty cards in the modUnionTable.
5369 // The initial marking may have been done during an asynchronous
5370 // collection so there may be dirty bits in the mod-union table.
5371 const int alignment =
5372 CardTableModRefBS::card_size * BitsPerWord;
5373 {
5374 // ... First handle dirty cards in CMS gen
5375 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5376 MemRegion ur = _cmsGen->used_region();
5377 HeapWord* lb = ur.start();
5378 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5379 MemRegion cms_span(lb, ub);
5380 _modUnionTable.dirty_range_iterate_clear(cms_span,
5381 &markFromDirtyCardsClosure);
5382 verify_work_stacks_empty();
5383 if (PrintCMSStatistics != 0) {
5384 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5385 markFromDirtyCardsClosure.num_dirty_cards());
5386 }
5387 }
5388 {
5389 // .. and then repeat for dirty cards in perm gen
5390 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5391 MemRegion ur = _permGen->used_region();
5392 HeapWord* lb = ur.start();
5393 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5394 MemRegion perm_span(lb, ub);
5395 _modUnionTable.dirty_range_iterate_clear(perm_span,
5396 &markFromDirtyCardsClosure);
5397 verify_work_stacks_empty();
5398 if (PrintCMSStatistics != 0) {
5399 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5400 markFromDirtyCardsClosure.num_dirty_cards());
5401 }
5402 }
5403 }
5404 if (VerifyDuringGC &&
5405 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5406 HandleMark hm; // Discard invalid handles created during verification
5407 Universe::verify(true);
5408 }
5409 {
5410 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5412 verify_work_stacks_empty();
5414 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5415 gch->gen_process_strong_roots(_cmsGen->level(),
5416 true, // younger gens as roots
5417 true, // collecting perm gen
5418 SharedHeap::ScanningOption(roots_scanning_options()),
5419 NULL, &mrias_cl);
5420 }
5421 verify_work_stacks_empty();
5422 // Restore evacuated mark words, if any, used for overflow list links
5423 if (!CMSOverflowEarlyRestoration) {
5424 restore_preserved_marks_if_any();
5425 }
5426 verify_overflow_empty();
5427 }
5429 ////////////////////////////////////////////////////////
5430 // Parallel Reference Processing Task Proxy Class
5431 ////////////////////////////////////////////////////////
5432 class CMSRefProcTaskProxy: public AbstractGangTask {
5433 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5434 CMSCollector* _collector;
5435 CMSBitMap* _mark_bit_map;
5436 MemRegion _span;
5437 OopTaskQueueSet* _task_queues;
5438 ParallelTaskTerminator _term;
5439 ProcessTask& _task;
5441 public:
5442 CMSRefProcTaskProxy(ProcessTask& task,
5443 CMSCollector* collector,
5444 const MemRegion& span,
5445 CMSBitMap* mark_bit_map,
5446 int total_workers,
5447 OopTaskQueueSet* task_queues):
5448 AbstractGangTask("Process referents by policy in parallel"),
5449 _task(task),
5450 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5451 _task_queues(task_queues),
5452 _term(total_workers, task_queues)
5453 { }
5455 OopTaskQueueSet* task_queues() { return _task_queues; }
5457 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5459 ParallelTaskTerminator* terminator() { return &_term; }
5461 void do_work_steal(int i,
5462 CMSParDrainMarkingStackClosure* drain,
5463 CMSParKeepAliveClosure* keep_alive,
5464 int* seed);
5466 virtual void work(int i);
5467 };
5469 void CMSRefProcTaskProxy::work(int i) {
5470 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5471 _mark_bit_map, work_queue(i));
5472 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5473 _mark_bit_map, work_queue(i));
5474 CMSIsAliveClosure is_alive_closure(_mark_bit_map);
5475 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5476 if (_task.marks_oops_alive()) {
5477 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5478 _collector->hash_seed(i));
5479 }
5480 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5481 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5482 }
5484 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5485 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5486 EnqueueTask& _task;
5488 public:
5489 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5490 : AbstractGangTask("Enqueue reference objects in parallel"),
5491 _task(task)
5492 { }
5494 virtual void work(int i)
5495 {
5496 _task.work(i);
5497 }
5498 };
5500 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5501 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5502 _collector(collector),
5503 _span(span),
5504 _bit_map(bit_map),
5505 _work_queue(work_queue),
5506 _mark_and_push(collector, span, bit_map, work_queue),
5507 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5508 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5509 { }
5511 // . see if we can share work_queues with ParNew? XXX
5512 void CMSRefProcTaskProxy::do_work_steal(int i,
5513 CMSParDrainMarkingStackClosure* drain,
5514 CMSParKeepAliveClosure* keep_alive,
5515 int* seed) {
5516 OopTaskQueue* work_q = work_queue(i);
5517 NOT_PRODUCT(int num_steals = 0;)
5518 oop obj_to_scan;
5519 size_t num_from_overflow_list =
5520 MIN2((size_t)work_q->max_elems()/4,
5521 (size_t)ParGCDesiredObjsFromOverflowList);
5523 while (true) {
5524 // Completely finish any left over work from (an) earlier round(s)
5525 drain->trim_queue(0);
5526 // Now check if there's any work in the overflow list
5527 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5528 work_q)) {
5529 // Found something in global overflow list;
5530 // not yet ready to go stealing work from others.
5531 // We'd like to assert(work_q->size() != 0, ...)
5532 // because we just took work from the overflow list,
5533 // but of course we can't, since all of that might have
5534 // been already stolen from us.
5535 continue;
5536 }
5537 // Verify that we have no work before we resort to stealing
5538 assert(work_q->size() == 0, "Have work, shouldn't steal");
5539 // Try to steal from other queues that have work
5540 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5541 NOT_PRODUCT(num_steals++;)
5542 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5543 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5544 // Do scanning work
5545 obj_to_scan->oop_iterate(keep_alive);
5546 // Loop around, finish this work, and try to steal some more
5547 } else if (terminator()->offer_termination()) {
5548 break; // nirvana from the infinite cycle
5549 }
5550 }
5551 NOT_PRODUCT(
5552 if (PrintCMSStatistics != 0) {
5553 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5554 }
5555 )
5556 }
5558 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5559 {
5560 GenCollectedHeap* gch = GenCollectedHeap::heap();
5561 WorkGang* workers = gch->workers();
5562 assert(workers != NULL, "Need parallel worker threads.");
5563 int n_workers = workers->total_workers();
5564 CMSRefProcTaskProxy rp_task(task, &_collector,
5565 _collector.ref_processor()->span(),
5566 _collector.markBitMap(),
5567 n_workers, _collector.task_queues());
5568 workers->run_task(&rp_task);
5569 }
5571 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5572 {
5574 GenCollectedHeap* gch = GenCollectedHeap::heap();
5575 WorkGang* workers = gch->workers();
5576 assert(workers != NULL, "Need parallel worker threads.");
5577 CMSRefEnqueueTaskProxy enq_task(task);
5578 workers->run_task(&enq_task);
5579 }
5581 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5583 ResourceMark rm;
5584 HandleMark hm;
5585 ReferencePolicy* soft_ref_policy;
5587 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
5588 // Process weak references.
5589 if (clear_all_soft_refs) {
5590 soft_ref_policy = new AlwaysClearPolicy();
5591 } else {
5592 #ifdef COMPILER2
5593 soft_ref_policy = new LRUMaxHeapPolicy();
5594 #else
5595 soft_ref_policy = new LRUCurrentHeapPolicy();
5596 #endif // COMPILER2
5597 }
5598 verify_work_stacks_empty();
5600 ReferenceProcessor* rp = ref_processor();
5601 assert(rp->span().equals(_span), "Spans should be equal");
5602 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5603 &_markStack);
5604 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5605 _span, &_markBitMap, &_markStack,
5606 &cmsKeepAliveClosure);
5607 {
5608 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5609 if (rp->processing_is_mt()) {
5610 CMSRefProcTaskExecutor task_executor(*this);
5611 rp->process_discovered_references(soft_ref_policy,
5612 &_is_alive_closure,
5613 &cmsKeepAliveClosure,
5614 &cmsDrainMarkingStackClosure,
5615 &task_executor);
5616 } else {
5617 rp->process_discovered_references(soft_ref_policy,
5618 &_is_alive_closure,
5619 &cmsKeepAliveClosure,
5620 &cmsDrainMarkingStackClosure,
5621 NULL);
5622 }
5623 verify_work_stacks_empty();
5624 }
5626 if (cms_should_unload_classes()) {
5627 {
5628 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5630 // Follow SystemDictionary roots and unload classes
5631 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5633 // Follow CodeCache roots and unload any methods marked for unloading
5634 CodeCache::do_unloading(&_is_alive_closure,
5635 &cmsKeepAliveClosure,
5636 purged_class);
5638 cmsDrainMarkingStackClosure.do_void();
5639 verify_work_stacks_empty();
5641 // Update subklass/sibling/implementor links in KlassKlass descendants
5642 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5643 oop k;
5644 while ((k = _revisitStack.pop()) != NULL) {
5645 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5646 &_is_alive_closure,
5647 &cmsKeepAliveClosure);
5648 }
5649 assert(!ClassUnloading ||
5650 (_markStack.isEmpty() && overflow_list_is_empty()),
5651 "Should not have found new reachable objects");
5652 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5653 cmsDrainMarkingStackClosure.do_void();
5654 verify_work_stacks_empty();
5655 }
5657 {
5658 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5659 // Now clean up stale oops in SymbolTable and StringTable
5660 SymbolTable::unlink(&_is_alive_closure);
5661 StringTable::unlink(&_is_alive_closure);
5662 }
5663 }
5665 verify_work_stacks_empty();
5666 // Restore any preserved marks as a result of mark stack or
5667 // work queue overflow
5668 restore_preserved_marks_if_any(); // done single-threaded for now
5670 rp->set_enqueuing_is_done(true);
5671 if (rp->processing_is_mt()) {
5672 CMSRefProcTaskExecutor task_executor(*this);
5673 rp->enqueue_discovered_references(&task_executor);
5674 } else {
5675 rp->enqueue_discovered_references(NULL);
5676 }
5677 rp->verify_no_references_recorded();
5678 assert(!rp->discovery_enabled(), "should have been disabled");
5680 // JVMTI object tagging is based on JNI weak refs. If any of these
5681 // refs were cleared then JVMTI needs to update its maps and
5682 // maybe post ObjectFrees to agents.
5683 JvmtiExport::cms_ref_processing_epilogue();
5684 }
5686 #ifndef PRODUCT
5687 void CMSCollector::check_correct_thread_executing() {
5688 Thread* t = Thread::current();
5689 // Only the VM thread or the CMS thread should be here.
5690 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5691 "Unexpected thread type");
5692 // If this is the vm thread, the foreground process
5693 // should not be waiting. Note that _foregroundGCIsActive is
5694 // true while the foreground collector is waiting.
5695 if (_foregroundGCShouldWait) {
5696 // We cannot be the VM thread
5697 assert(t->is_ConcurrentGC_thread(),
5698 "Should be CMS thread");
5699 } else {
5700 // We can be the CMS thread only if we are in a stop-world
5701 // phase of CMS collection.
5702 if (t->is_ConcurrentGC_thread()) {
5703 assert(_collectorState == InitialMarking ||
5704 _collectorState == FinalMarking,
5705 "Should be a stop-world phase");
5706 // The CMS thread should be holding the CMS_token.
5707 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5708 "Potential interference with concurrently "
5709 "executing VM thread");
5710 }
5711 }
5712 }
5713 #endif
5715 void CMSCollector::sweep(bool asynch) {
5716 assert(_collectorState == Sweeping, "just checking");
5717 check_correct_thread_executing();
5718 verify_work_stacks_empty();
5719 verify_overflow_empty();
5720 incrementSweepCount();
5721 _sweep_timer.stop();
5722 _sweep_estimate.sample(_sweep_timer.seconds());
5723 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5725 // PermGen verification support: If perm gen sweeping is disabled in
5726 // this cycle, we preserve the perm gen object "deadness" information
5727 // in the perm_gen_verify_bit_map. In order to do that we traverse
5728 // all blocks in perm gen and mark all dead objects.
5729 if (verifying() && !cms_should_unload_classes()) {
5730 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5731 "Should have already been allocated");
5732 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5733 markBitMap(), perm_gen_verify_bit_map());
5734 if (asynch) {
5735 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5736 bitMapLock());
5737 _permGen->cmsSpace()->blk_iterate(&mdo);
5738 } else {
5739 // In the case of synchronous sweep, we already have
5740 // the requisite locks/tokens.
5741 _permGen->cmsSpace()->blk_iterate(&mdo);
5742 }
5743 }
5745 if (asynch) {
5746 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5747 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5748 // First sweep the old gen then the perm gen
5749 {
5750 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5751 bitMapLock());
5752 sweepWork(_cmsGen, asynch);
5753 }
5755 // Now repeat for perm gen
5756 if (cms_should_unload_classes()) {
5757 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5758 bitMapLock());
5759 sweepWork(_permGen, asynch);
5760 }
5762 // Update Universe::_heap_*_at_gc figures.
5763 // We need all the free list locks to make the abstract state
5764 // transition from Sweeping to Resetting. See detailed note
5765 // further below.
5766 {
5767 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5768 _permGen->freelistLock());
5769 // Update heap occupancy information which is used as
5770 // input to soft ref clearing policy at the next gc.
5771 Universe::update_heap_info_at_gc();
5772 _collectorState = Resizing;
5773 }
5774 } else {
5775 // already have needed locks
5776 sweepWork(_cmsGen, asynch);
5778 if (cms_should_unload_classes()) {
5779 sweepWork(_permGen, asynch);
5780 }
5781 // Update heap occupancy information which is used as
5782 // input to soft ref clearing policy at the next gc.
5783 Universe::update_heap_info_at_gc();
5784 _collectorState = Resizing;
5785 }
5786 verify_work_stacks_empty();
5787 verify_overflow_empty();
5789 _sweep_timer.reset();
5790 _sweep_timer.start();
5792 update_time_of_last_gc(os::javaTimeMillis());
5794 // NOTE on abstract state transitions:
5795 // Mutators allocate-live and/or mark the mod-union table dirty
5796 // based on the state of the collection. The former is done in
5797 // the interval [Marking, Sweeping] and the latter in the interval
5798 // [Marking, Sweeping). Thus the transitions into the Marking state
5799 // and out of the Sweeping state must be synchronously visible
5800 // globally to the mutators.
5801 // The transition into the Marking state happens with the world
5802 // stopped so the mutators will globally see it. Sweeping is
5803 // done asynchronously by the background collector so the transition
5804 // from the Sweeping state to the Resizing state must be done
5805 // under the freelistLock (as is the check for whether to
5806 // allocate-live and whether to dirty the mod-union table).
5807 assert(_collectorState == Resizing, "Change of collector state to"
5808 " Resizing must be done under the freelistLocks (plural)");
5810 // Now that sweeping has been completed, if the GCH's
5811 // incremental_collection_will_fail flag is set, clear it,
5812 // thus inviting a younger gen collection to promote into
5813 // this generation. If such a promotion may still fail,
5814 // the flag will be set again when a young collection is
5815 // attempted.
5816 // I think the incremental_collection_will_fail flag's use
5817 // is specific to a 2 generation collection policy, so i'll
5818 // assert that that's the configuration we are operating within.
5819 // The use of the flag can and should be generalized appropriately
5820 // in the future to deal with a general n-generation system.
5822 GenCollectedHeap* gch = GenCollectedHeap::heap();
5823 assert(gch->collector_policy()->is_two_generation_policy(),
5824 "Resetting of incremental_collection_will_fail flag"
5825 " may be incorrect otherwise");
5826 gch->clear_incremental_collection_will_fail();
5827 gch->update_full_collections_completed(_collection_count_start);
5828 }
5830 // FIX ME!!! Looks like this belongs in CFLSpace, with
5831 // CMSGen merely delegating to it.
5832 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5833 double nearLargestPercent = 0.999;
5834 HeapWord* minAddr = _cmsSpace->bottom();
5835 HeapWord* largestAddr =
5836 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5837 if (largestAddr == 0) {
5838 // The dictionary appears to be empty. In this case
5839 // try to coalesce at the end of the heap.
5840 largestAddr = _cmsSpace->end();
5841 }
5842 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5843 size_t nearLargestOffset =
5844 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5845 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5846 }
5848 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5849 return addr >= _cmsSpace->nearLargestChunk();
5850 }
5852 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5853 return _cmsSpace->find_chunk_at_end();
5854 }
5856 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5857 bool full) {
5858 // The next lower level has been collected. Gather any statistics
5859 // that are of interest at this point.
5860 if (!full && (current_level + 1) == level()) {
5861 // Gather statistics on the young generation collection.
5862 collector()->stats().record_gc0_end(used());
5863 }
5864 }
5866 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
5867 GenCollectedHeap* gch = GenCollectedHeap::heap();
5868 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
5869 "Wrong type of heap");
5870 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
5871 gch->gen_policy()->size_policy();
5872 assert(sp->is_gc_cms_adaptive_size_policy(),
5873 "Wrong type of size policy");
5874 return sp;
5875 }
5877 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5878 if (PrintGCDetails && Verbose) {
5879 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
5880 }
5881 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
5882 _debug_collection_type =
5883 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
5884 if (PrintGCDetails && Verbose) {
5885 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
5886 }
5887 }
5889 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
5890 bool asynch) {
5891 // We iterate over the space(s) underlying this generation,
5892 // checking the mark bit map to see if the bits corresponding
5893 // to specific blocks are marked or not. Blocks that are
5894 // marked are live and are not swept up. All remaining blocks
5895 // are swept up, with coalescing on-the-fly as we sweep up
5896 // contiguous free and/or garbage blocks:
5897 // We need to ensure that the sweeper synchronizes with allocators
5898 // and stop-the-world collectors. In particular, the following
5899 // locks are used:
5900 // . CMS token: if this is held, a stop the world collection cannot occur
5901 // . freelistLock: if this is held no allocation can occur from this
5902 // generation by another thread
5903 // . bitMapLock: if this is held, no other thread can access or update
5904 //
5906 // Note that we need to hold the freelistLock if we use
5907 // block iterate below; else the iterator might go awry if
5908 // a mutator (or promotion) causes block contents to change
5909 // (for instance if the allocator divvies up a block).
5910 // If we hold the free list lock, for all practical purposes
5911 // young generation GC's can't occur (they'll usually need to
5912 // promote), so we might as well prevent all young generation
5913 // GC's while we do a sweeping step. For the same reason, we might
5914 // as well take the bit map lock for the entire duration
5916 // check that we hold the requisite locks
5917 assert(have_cms_token(), "Should hold cms token");
5918 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
5919 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
5920 "Should possess CMS token to sweep");
5921 assert_lock_strong(gen->freelistLock());
5922 assert_lock_strong(bitMapLock());
5924 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5925 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5926 _sweep_estimate.padded_average());
5927 gen->setNearLargestChunk();
5929 {
5930 SweepClosure sweepClosure(this, gen, &_markBitMap,
5931 CMSYield && asynch);
5932 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5933 // We need to free-up/coalesce garbage/blocks from a
5934 // co-terminal free run. This is done in the SweepClosure
5935 // destructor; so, do not remove this scope, else the
5936 // end-of-sweep-census below will be off by a little bit.
5937 }
5938 gen->cmsSpace()->sweep_completed();
5939 gen->cmsSpace()->endSweepFLCensus(sweepCount());
5940 }
5942 // Reset CMS data structures (for now just the marking bit map)
5943 // preparatory for the next cycle.
5944 void CMSCollector::reset(bool asynch) {
5945 GenCollectedHeap* gch = GenCollectedHeap::heap();
5946 CMSAdaptiveSizePolicy* sp = size_policy();
5947 AdaptiveSizePolicyOutput(sp, gch->total_collections());
5948 if (asynch) {
5949 CMSTokenSyncWithLocks ts(true, bitMapLock());
5951 // If the state is not "Resetting", the foreground thread
5952 // has done a collection and the resetting.
5953 if (_collectorState != Resetting) {
5954 assert(_collectorState == Idling, "The state should only change"
5955 " because the foreground collector has finished the collection");
5956 return;
5957 }
5959 // Clear the mark bitmap (no grey objects to start with)
5960 // for the next cycle.
5961 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5962 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5964 HeapWord* curAddr = _markBitMap.startWord();
5965 while (curAddr < _markBitMap.endWord()) {
5966 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5967 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5968 _markBitMap.clear_large_range(chunk);
5969 if (ConcurrentMarkSweepThread::should_yield() &&
5970 !foregroundGCIsActive() &&
5971 CMSYield) {
5972 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5973 "CMS thread should hold CMS token");
5974 assert_lock_strong(bitMapLock());
5975 bitMapLock()->unlock();
5976 ConcurrentMarkSweepThread::desynchronize(true);
5977 ConcurrentMarkSweepThread::acknowledge_yield_request();
5978 stopTimer();
5979 if (PrintCMSStatistics != 0) {
5980 incrementYields();
5981 }
5982 icms_wait();
5984 // See the comment in coordinator_yield()
5985 for (unsigned i = 0; i < CMSYieldSleepCount &&
5986 ConcurrentMarkSweepThread::should_yield() &&
5987 !CMSCollector::foregroundGCIsActive(); ++i) {
5988 os::sleep(Thread::current(), 1, false);
5989 ConcurrentMarkSweepThread::acknowledge_yield_request();
5990 }
5992 ConcurrentMarkSweepThread::synchronize(true);
5993 bitMapLock()->lock_without_safepoint_check();
5994 startTimer();
5995 }
5996 curAddr = chunk.end();
5997 }
5998 _collectorState = Idling;
5999 } else {
6000 // already have the lock
6001 assert(_collectorState == Resetting, "just checking");
6002 assert_lock_strong(bitMapLock());
6003 _markBitMap.clear_all();
6004 _collectorState = Idling;
6005 }
6007 // Stop incremental mode after a cycle completes, so that any future cycles
6008 // are triggered by allocation.
6009 stop_icms();
6011 NOT_PRODUCT(
6012 if (RotateCMSCollectionTypes) {
6013 _cmsGen->rotate_debug_collection_type();
6014 }
6015 )
6016 }
6018 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6019 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6020 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6021 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6022 TraceCollectorStats tcs(counters());
6024 switch (op) {
6025 case CMS_op_checkpointRootsInitial: {
6026 checkpointRootsInitial(true); // asynch
6027 if (PrintGC) {
6028 _cmsGen->printOccupancy("initial-mark");
6029 }
6030 break;
6031 }
6032 case CMS_op_checkpointRootsFinal: {
6033 checkpointRootsFinal(true, // asynch
6034 false, // !clear_all_soft_refs
6035 false); // !init_mark_was_synchronous
6036 if (PrintGC) {
6037 _cmsGen->printOccupancy("remark");
6038 }
6039 break;
6040 }
6041 default:
6042 fatal("No such CMS_op");
6043 }
6044 }
6046 #ifndef PRODUCT
6047 size_t const CMSCollector::skip_header_HeapWords() {
6048 return FreeChunk::header_size();
6049 }
6051 // Try and collect here conditions that should hold when
6052 // CMS thread is exiting. The idea is that the foreground GC
6053 // thread should not be blocked if it wants to terminate
6054 // the CMS thread and yet continue to run the VM for a while
6055 // after that.
6056 void CMSCollector::verify_ok_to_terminate() const {
6057 assert(Thread::current()->is_ConcurrentGC_thread(),
6058 "should be called by CMS thread");
6059 assert(!_foregroundGCShouldWait, "should be false");
6060 // We could check here that all the various low-level locks
6061 // are not held by the CMS thread, but that is overkill; see
6062 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6063 // is checked.
6064 }
6065 #endif
6067 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6068 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6069 "missing Printezis mark?");
6070 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6071 size_t size = pointer_delta(nextOneAddr + 1, addr);
6072 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6073 "alignment problem");
6074 assert(size >= 3, "Necessary for Printezis marks to work");
6075 return size;
6076 }
6078 // A variant of the above (block_size_using_printezis_bits()) except
6079 // that we return 0 if the P-bits are not yet set.
6080 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6081 if (_markBitMap.isMarked(addr)) {
6082 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6083 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6084 size_t size = pointer_delta(nextOneAddr + 1, addr);
6085 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6086 "alignment problem");
6087 assert(size >= 3, "Necessary for Printezis marks to work");
6088 return size;
6089 } else {
6090 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6091 return 0;
6092 }
6093 }
6095 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6096 size_t sz = 0;
6097 oop p = (oop)addr;
6098 if (p->klass() != NULL && p->is_parsable()) {
6099 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6100 } else {
6101 sz = block_size_using_printezis_bits(addr);
6102 }
6103 assert(sz > 0, "size must be nonzero");
6104 HeapWord* next_block = addr + sz;
6105 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6106 CardTableModRefBS::card_size);
6107 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6108 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6109 "must be different cards");
6110 return next_card;
6111 }
6114 // CMS Bit Map Wrapper /////////////////////////////////////////
6116 // Construct a CMS bit map infrastructure, but don't create the
6117 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6118 // further below.
6119 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6120 _bm(NULL,0),
6121 _shifter(shifter),
6122 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6123 {
6124 _bmStartWord = 0;
6125 _bmWordSize = 0;
6126 }
6128 bool CMSBitMap::allocate(MemRegion mr) {
6129 _bmStartWord = mr.start();
6130 _bmWordSize = mr.word_size();
6131 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6132 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6133 if (!brs.is_reserved()) {
6134 warning("CMS bit map allocation failure");
6135 return false;
6136 }
6137 // For now we'll just commit all of the bit map up fromt.
6138 // Later on we'll try to be more parsimonious with swap.
6139 if (!_virtual_space.initialize(brs, brs.size())) {
6140 warning("CMS bit map backing store failure");
6141 return false;
6142 }
6143 assert(_virtual_space.committed_size() == brs.size(),
6144 "didn't reserve backing store for all of CMS bit map?");
6145 _bm.set_map((uintptr_t*)_virtual_space.low());
6146 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6147 _bmWordSize, "inconsistency in bit map sizing");
6148 _bm.set_size(_bmWordSize >> _shifter);
6150 // bm.clear(); // can we rely on getting zero'd memory? verify below
6151 assert(isAllClear(),
6152 "Expected zero'd memory from ReservedSpace constructor");
6153 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6154 "consistency check");
6155 return true;
6156 }
6158 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6159 HeapWord *next_addr, *end_addr, *last_addr;
6160 assert_locked();
6161 assert(covers(mr), "out-of-range error");
6162 // XXX assert that start and end are appropriately aligned
6163 for (next_addr = mr.start(), end_addr = mr.end();
6164 next_addr < end_addr; next_addr = last_addr) {
6165 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6166 last_addr = dirty_region.end();
6167 if (!dirty_region.is_empty()) {
6168 cl->do_MemRegion(dirty_region);
6169 } else {
6170 assert(last_addr == end_addr, "program logic");
6171 return;
6172 }
6173 }
6174 }
6176 #ifndef PRODUCT
6177 void CMSBitMap::assert_locked() const {
6178 CMSLockVerifier::assert_locked(lock());
6179 }
6181 bool CMSBitMap::covers(MemRegion mr) const {
6182 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6183 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6184 "size inconsistency");
6185 return (mr.start() >= _bmStartWord) &&
6186 (mr.end() <= endWord());
6187 }
6189 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6190 return (start >= _bmStartWord && (start + size) <= endWord());
6191 }
6193 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6194 // verify that there are no 1 bits in the interval [left, right)
6195 FalseBitMapClosure falseBitMapClosure;
6196 iterate(&falseBitMapClosure, left, right);
6197 }
6199 void CMSBitMap::region_invariant(MemRegion mr)
6200 {
6201 assert_locked();
6202 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6203 assert(!mr.is_empty(), "unexpected empty region");
6204 assert(covers(mr), "mr should be covered by bit map");
6205 // convert address range into offset range
6206 size_t start_ofs = heapWordToOffset(mr.start());
6207 // Make sure that end() is appropriately aligned
6208 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6209 (1 << (_shifter+LogHeapWordSize))),
6210 "Misaligned mr.end()");
6211 size_t end_ofs = heapWordToOffset(mr.end());
6212 assert(end_ofs > start_ofs, "Should mark at least one bit");
6213 }
6215 #endif
6217 bool CMSMarkStack::allocate(size_t size) {
6218 // allocate a stack of the requisite depth
6219 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6220 size * sizeof(oop)));
6221 if (!rs.is_reserved()) {
6222 warning("CMSMarkStack allocation failure");
6223 return false;
6224 }
6225 if (!_virtual_space.initialize(rs, rs.size())) {
6226 warning("CMSMarkStack backing store failure");
6227 return false;
6228 }
6229 assert(_virtual_space.committed_size() == rs.size(),
6230 "didn't reserve backing store for all of CMS stack?");
6231 _base = (oop*)(_virtual_space.low());
6232 _index = 0;
6233 _capacity = size;
6234 NOT_PRODUCT(_max_depth = 0);
6235 return true;
6236 }
6238 // XXX FIX ME !!! In the MT case we come in here holding a
6239 // leaf lock. For printing we need to take a further lock
6240 // which has lower rank. We need to recallibrate the two
6241 // lock-ranks involved in order to be able to rpint the
6242 // messages below. (Or defer the printing to the caller.
6243 // For now we take the expedient path of just disabling the
6244 // messages for the problematic case.)
6245 void CMSMarkStack::expand() {
6246 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6247 if (_capacity == CMSMarkStackSizeMax) {
6248 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6249 // We print a warning message only once per CMS cycle.
6250 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6251 }
6252 return;
6253 }
6254 // Double capacity if possible
6255 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6256 // Do not give up existing stack until we have managed to
6257 // get the double capacity that we desired.
6258 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6259 new_capacity * sizeof(oop)));
6260 if (rs.is_reserved()) {
6261 // Release the backing store associated with old stack
6262 _virtual_space.release();
6263 // Reinitialize virtual space for new stack
6264 if (!_virtual_space.initialize(rs, rs.size())) {
6265 fatal("Not enough swap for expanded marking stack");
6266 }
6267 _base = (oop*)(_virtual_space.low());
6268 _index = 0;
6269 _capacity = new_capacity;
6270 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6271 // Failed to double capacity, continue;
6272 // we print a detail message only once per CMS cycle.
6273 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6274 SIZE_FORMAT"K",
6275 _capacity / K, new_capacity / K);
6276 }
6277 }
6280 // Closures
6281 // XXX: there seems to be a lot of code duplication here;
6282 // should refactor and consolidate common code.
6284 // This closure is used to mark refs into the CMS generation in
6285 // the CMS bit map. Called at the first checkpoint. This closure
6286 // assumes that we do not need to re-mark dirty cards; if the CMS
6287 // generation on which this is used is not an oldest (modulo perm gen)
6288 // generation then this will lose younger_gen cards!
6290 MarkRefsIntoClosure::MarkRefsIntoClosure(
6291 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
6292 _span(span),
6293 _bitMap(bitMap),
6294 _should_do_nmethods(should_do_nmethods)
6295 {
6296 assert(_ref_processor == NULL, "deliberately left NULL");
6297 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6298 }
6300 void MarkRefsIntoClosure::do_oop(oop* p) {
6301 // if p points into _span, then mark corresponding bit in _markBitMap
6302 oop thisOop = *p;
6303 if (thisOop != NULL) {
6304 assert(thisOop->is_oop(), "expected an oop");
6305 HeapWord* addr = (HeapWord*)thisOop;
6306 if (_span.contains(addr)) {
6307 // this should be made more efficient
6308 _bitMap->mark(addr);
6309 }
6310 }
6311 }
6313 // A variant of the above, used for CMS marking verification.
6314 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6315 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6316 bool should_do_nmethods):
6317 _span(span),
6318 _verification_bm(verification_bm),
6319 _cms_bm(cms_bm),
6320 _should_do_nmethods(should_do_nmethods) {
6321 assert(_ref_processor == NULL, "deliberately left NULL");
6322 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6323 }
6325 void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
6326 // if p points into _span, then mark corresponding bit in _markBitMap
6327 oop this_oop = *p;
6328 if (this_oop != NULL) {
6329 assert(this_oop->is_oop(), "expected an oop");
6330 HeapWord* addr = (HeapWord*)this_oop;
6331 if (_span.contains(addr)) {
6332 _verification_bm->mark(addr);
6333 if (!_cms_bm->isMarked(addr)) {
6334 oop(addr)->print();
6335 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
6336 fatal("... aborting");
6337 }
6338 }
6339 }
6340 }
6342 //////////////////////////////////////////////////
6343 // MarkRefsIntoAndScanClosure
6344 //////////////////////////////////////////////////
6346 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6347 ReferenceProcessor* rp,
6348 CMSBitMap* bit_map,
6349 CMSBitMap* mod_union_table,
6350 CMSMarkStack* mark_stack,
6351 CMSMarkStack* revisit_stack,
6352 CMSCollector* collector,
6353 bool should_yield,
6354 bool concurrent_precleaning):
6355 _collector(collector),
6356 _span(span),
6357 _bit_map(bit_map),
6358 _mark_stack(mark_stack),
6359 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6360 mark_stack, revisit_stack, concurrent_precleaning),
6361 _yield(should_yield),
6362 _concurrent_precleaning(concurrent_precleaning),
6363 _freelistLock(NULL)
6364 {
6365 _ref_processor = rp;
6366 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6367 }
6369 // This closure is used to mark refs into the CMS generation at the
6370 // second (final) checkpoint, and to scan and transitively follow
6371 // the unmarked oops. It is also used during the concurrent precleaning
6372 // phase while scanning objects on dirty cards in the CMS generation.
6373 // The marks are made in the marking bit map and the marking stack is
6374 // used for keeping the (newly) grey objects during the scan.
6375 // The parallel version (Par_...) appears further below.
6376 void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
6377 oop this_oop = *p;
6378 if (this_oop != NULL) {
6379 assert(this_oop->is_oop(), "expected an oop");
6380 HeapWord* addr = (HeapWord*)this_oop;
6381 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6382 assert(_collector->overflow_list_is_empty(), "should be empty");
6383 if (_span.contains(addr) &&
6384 !_bit_map->isMarked(addr)) {
6385 // mark bit map (object is now grey)
6386 _bit_map->mark(addr);
6387 // push on marking stack (stack should be empty), and drain the
6388 // stack by applying this closure to the oops in the oops popped
6389 // from the stack (i.e. blacken the grey objects)
6390 bool res = _mark_stack->push(this_oop);
6391 assert(res, "Should have space to push on empty stack");
6392 do {
6393 oop new_oop = _mark_stack->pop();
6394 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6395 assert(new_oop->is_parsable(), "Found unparsable oop");
6396 assert(_bit_map->isMarked((HeapWord*)new_oop),
6397 "only grey objects on this stack");
6398 // iterate over the oops in this oop, marking and pushing
6399 // the ones in CMS heap (i.e. in _span).
6400 new_oop->oop_iterate(&_pushAndMarkClosure);
6401 // check if it's time to yield
6402 do_yield_check();
6403 } while (!_mark_stack->isEmpty() ||
6404 (!_concurrent_precleaning && take_from_overflow_list()));
6405 // if marking stack is empty, and we are not doing this
6406 // during precleaning, then check the overflow list
6407 }
6408 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6409 assert(_collector->overflow_list_is_empty(),
6410 "overflow list was drained above");
6411 // We could restore evacuated mark words, if any, used for
6412 // overflow list links here because the overflow list is
6413 // provably empty here. That would reduce the maximum
6414 // size requirements for preserved_{oop,mark}_stack.
6415 // But we'll just postpone it until we are all done
6416 // so we can just stream through.
6417 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6418 _collector->restore_preserved_marks_if_any();
6419 assert(_collector->no_preserved_marks(), "No preserved marks");
6420 }
6421 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6422 "All preserved marks should have been restored above");
6423 }
6424 }
6426 void MarkRefsIntoAndScanClosure::do_yield_work() {
6427 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6428 "CMS thread should hold CMS token");
6429 assert_lock_strong(_freelistLock);
6430 assert_lock_strong(_bit_map->lock());
6431 // relinquish the free_list_lock and bitMaplock()
6432 _bit_map->lock()->unlock();
6433 _freelistLock->unlock();
6434 ConcurrentMarkSweepThread::desynchronize(true);
6435 ConcurrentMarkSweepThread::acknowledge_yield_request();
6436 _collector->stopTimer();
6437 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6438 if (PrintCMSStatistics != 0) {
6439 _collector->incrementYields();
6440 }
6441 _collector->icms_wait();
6443 // See the comment in coordinator_yield()
6444 for (unsigned i = 0; i < CMSYieldSleepCount &&
6445 ConcurrentMarkSweepThread::should_yield() &&
6446 !CMSCollector::foregroundGCIsActive(); ++i) {
6447 os::sleep(Thread::current(), 1, false);
6448 ConcurrentMarkSweepThread::acknowledge_yield_request();
6449 }
6451 ConcurrentMarkSweepThread::synchronize(true);
6452 _freelistLock->lock_without_safepoint_check();
6453 _bit_map->lock()->lock_without_safepoint_check();
6454 _collector->startTimer();
6455 }
6457 ///////////////////////////////////////////////////////////
6458 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6459 // MarkRefsIntoAndScanClosure
6460 ///////////////////////////////////////////////////////////
6461 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6462 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6463 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6464 _span(span),
6465 _bit_map(bit_map),
6466 _work_queue(work_queue),
6467 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6468 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6469 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6470 revisit_stack)
6471 {
6472 _ref_processor = rp;
6473 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6474 }
6476 // This closure is used to mark refs into the CMS generation at the
6477 // second (final) checkpoint, and to scan and transitively follow
6478 // the unmarked oops. The marks are made in the marking bit map and
6479 // the work_queue is used for keeping the (newly) grey objects during
6480 // the scan phase whence they are also available for stealing by parallel
6481 // threads. Since the marking bit map is shared, updates are
6482 // synchronized (via CAS).
6483 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
6484 oop this_oop = *p;
6485 if (this_oop != NULL) {
6486 // Ignore mark word because this could be an already marked oop
6487 // that may be chained at the end of the overflow list.
6488 assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
6489 HeapWord* addr = (HeapWord*)this_oop;
6490 if (_span.contains(addr) &&
6491 !_bit_map->isMarked(addr)) {
6492 // mark bit map (object will become grey):
6493 // It is possible for several threads to be
6494 // trying to "claim" this object concurrently;
6495 // the unique thread that succeeds in marking the
6496 // object first will do the subsequent push on
6497 // to the work queue (or overflow list).
6498 if (_bit_map->par_mark(addr)) {
6499 // push on work_queue (which may not be empty), and trim the
6500 // queue to an appropriate length by applying this closure to
6501 // the oops in the oops popped from the stack (i.e. blacken the
6502 // grey objects)
6503 bool res = _work_queue->push(this_oop);
6504 assert(res, "Low water mark should be less than capacity?");
6505 trim_queue(_low_water_mark);
6506 } // Else, another thread claimed the object
6507 }
6508 }
6509 }
6511 // This closure is used to rescan the marked objects on the dirty cards
6512 // in the mod union table and the card table proper.
6513 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6514 oop p, MemRegion mr) {
6516 size_t size = 0;
6517 HeapWord* addr = (HeapWord*)p;
6518 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6519 assert(_span.contains(addr), "we are scanning the CMS generation");
6520 // check if it's time to yield
6521 if (do_yield_check()) {
6522 // We yielded for some foreground stop-world work,
6523 // and we have been asked to abort this ongoing preclean cycle.
6524 return 0;
6525 }
6526 if (_bitMap->isMarked(addr)) {
6527 // it's marked; is it potentially uninitialized?
6528 if (p->klass() != NULL) {
6529 if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
6530 // Signal precleaning to redirty the card since
6531 // the klass pointer is already installed.
6532 assert(size == 0, "Initial value");
6533 } else {
6534 assert(p->is_parsable(), "must be parsable.");
6535 // an initialized object; ignore mark word in verification below
6536 // since we are running concurrent with mutators
6537 assert(p->is_oop(true), "should be an oop");
6538 if (p->is_objArray()) {
6539 // objArrays are precisely marked; restrict scanning
6540 // to dirty cards only.
6541 size = p->oop_iterate(_scanningClosure, mr);
6542 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6543 "adjustObjectSize should be the identity for array sizes, "
6544 "which are necessarily larger than minimum object size of "
6545 "two heap words");
6546 } else {
6547 // A non-array may have been imprecisely marked; we need
6548 // to scan object in its entirety.
6549 size = CompactibleFreeListSpace::adjustObjectSize(
6550 p->oop_iterate(_scanningClosure));
6551 }
6552 #ifdef DEBUG
6553 size_t direct_size =
6554 CompactibleFreeListSpace::adjustObjectSize(p->size());
6555 assert(size == direct_size, "Inconsistency in size");
6556 assert(size >= 3, "Necessary for Printezis marks to work");
6557 if (!_bitMap->isMarked(addr+1)) {
6558 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6559 } else {
6560 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6561 assert(_bitMap->isMarked(addr+size-1),
6562 "inconsistent Printezis mark");
6563 }
6564 #endif // DEBUG
6565 }
6566 } else {
6567 // an unitialized object
6568 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6569 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6570 size = pointer_delta(nextOneAddr + 1, addr);
6571 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6572 "alignment problem");
6573 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6574 // will dirty the card when the klass pointer is installed in the
6575 // object (signalling the completion of initialization).
6576 }
6577 } else {
6578 // Either a not yet marked object or an uninitialized object
6579 if (p->klass() == NULL || !p->is_parsable()) {
6580 // An uninitialized object, skip to the next card, since
6581 // we may not be able to read its P-bits yet.
6582 assert(size == 0, "Initial value");
6583 } else {
6584 // An object not (yet) reached by marking: we merely need to
6585 // compute its size so as to go look at the next block.
6586 assert(p->is_oop(true), "should be an oop");
6587 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6588 }
6589 }
6590 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6591 return size;
6592 }
6594 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6595 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6596 "CMS thread should hold CMS token");
6597 assert_lock_strong(_freelistLock);
6598 assert_lock_strong(_bitMap->lock());
6599 // relinquish the free_list_lock and bitMaplock()
6600 _bitMap->lock()->unlock();
6601 _freelistLock->unlock();
6602 ConcurrentMarkSweepThread::desynchronize(true);
6603 ConcurrentMarkSweepThread::acknowledge_yield_request();
6604 _collector->stopTimer();
6605 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6606 if (PrintCMSStatistics != 0) {
6607 _collector->incrementYields();
6608 }
6609 _collector->icms_wait();
6611 // See the comment in coordinator_yield()
6612 for (unsigned i = 0; i < CMSYieldSleepCount &&
6613 ConcurrentMarkSweepThread::should_yield() &&
6614 !CMSCollector::foregroundGCIsActive(); ++i) {
6615 os::sleep(Thread::current(), 1, false);
6616 ConcurrentMarkSweepThread::acknowledge_yield_request();
6617 }
6619 ConcurrentMarkSweepThread::synchronize(true);
6620 _freelistLock->lock_without_safepoint_check();
6621 _bitMap->lock()->lock_without_safepoint_check();
6622 _collector->startTimer();
6623 }
6626 //////////////////////////////////////////////////////////////////
6627 // SurvivorSpacePrecleanClosure
6628 //////////////////////////////////////////////////////////////////
6629 // This (single-threaded) closure is used to preclean the oops in
6630 // the survivor spaces.
6631 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6633 HeapWord* addr = (HeapWord*)p;
6634 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6635 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6636 assert(p->klass() != NULL, "object should be initializd");
6637 assert(p->is_parsable(), "must be parsable.");
6638 // an initialized object; ignore mark word in verification below
6639 // since we are running concurrent with mutators
6640 assert(p->is_oop(true), "should be an oop");
6641 // Note that we do not yield while we iterate over
6642 // the interior oops of p, pushing the relevant ones
6643 // on our marking stack.
6644 size_t size = p->oop_iterate(_scanning_closure);
6645 do_yield_check();
6646 // Observe that below, we do not abandon the preclean
6647 // phase as soon as we should; rather we empty the
6648 // marking stack before returning. This is to satisfy
6649 // some existing assertions. In general, it may be a
6650 // good idea to abort immediately and complete the marking
6651 // from the grey objects at a later time.
6652 while (!_mark_stack->isEmpty()) {
6653 oop new_oop = _mark_stack->pop();
6654 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6655 assert(new_oop->is_parsable(), "Found unparsable oop");
6656 assert(_bit_map->isMarked((HeapWord*)new_oop),
6657 "only grey objects on this stack");
6658 // iterate over the oops in this oop, marking and pushing
6659 // the ones in CMS heap (i.e. in _span).
6660 new_oop->oop_iterate(_scanning_closure);
6661 // check if it's time to yield
6662 do_yield_check();
6663 }
6664 unsigned int after_count =
6665 GenCollectedHeap::heap()->total_collections();
6666 bool abort = (_before_count != after_count) ||
6667 _collector->should_abort_preclean();
6668 return abort ? 0 : size;
6669 }
6671 void SurvivorSpacePrecleanClosure::do_yield_work() {
6672 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6673 "CMS thread should hold CMS token");
6674 assert_lock_strong(_bit_map->lock());
6675 // Relinquish the bit map lock
6676 _bit_map->lock()->unlock();
6677 ConcurrentMarkSweepThread::desynchronize(true);
6678 ConcurrentMarkSweepThread::acknowledge_yield_request();
6679 _collector->stopTimer();
6680 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6681 if (PrintCMSStatistics != 0) {
6682 _collector->incrementYields();
6683 }
6684 _collector->icms_wait();
6686 // See the comment in coordinator_yield()
6687 for (unsigned i = 0; i < CMSYieldSleepCount &&
6688 ConcurrentMarkSweepThread::should_yield() &&
6689 !CMSCollector::foregroundGCIsActive(); ++i) {
6690 os::sleep(Thread::current(), 1, false);
6691 ConcurrentMarkSweepThread::acknowledge_yield_request();
6692 }
6694 ConcurrentMarkSweepThread::synchronize(true);
6695 _bit_map->lock()->lock_without_safepoint_check();
6696 _collector->startTimer();
6697 }
6699 // This closure is used to rescan the marked objects on the dirty cards
6700 // in the mod union table and the card table proper. In the parallel
6701 // case, although the bitMap is shared, we do a single read so the
6702 // isMarked() query is "safe".
6703 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6704 // Ignore mark word because we are running concurrent with mutators
6705 assert(p->is_oop_or_null(true), "expected an oop or null");
6706 HeapWord* addr = (HeapWord*)p;
6707 assert(_span.contains(addr), "we are scanning the CMS generation");
6708 bool is_obj_array = false;
6709 #ifdef DEBUG
6710 if (!_parallel) {
6711 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6712 assert(_collector->overflow_list_is_empty(),
6713 "overflow list should be empty");
6715 }
6716 #endif // DEBUG
6717 if (_bit_map->isMarked(addr)) {
6718 // Obj arrays are precisely marked, non-arrays are not;
6719 // so we scan objArrays precisely and non-arrays in their
6720 // entirety.
6721 if (p->is_objArray()) {
6722 is_obj_array = true;
6723 if (_parallel) {
6724 p->oop_iterate(_par_scan_closure, mr);
6725 } else {
6726 p->oop_iterate(_scan_closure, mr);
6727 }
6728 } else {
6729 if (_parallel) {
6730 p->oop_iterate(_par_scan_closure);
6731 } else {
6732 p->oop_iterate(_scan_closure);
6733 }
6734 }
6735 }
6736 #ifdef DEBUG
6737 if (!_parallel) {
6738 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6739 assert(_collector->overflow_list_is_empty(),
6740 "overflow list should be empty");
6742 }
6743 #endif // DEBUG
6744 return is_obj_array;
6745 }
6747 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6748 MemRegion span,
6749 CMSBitMap* bitMap, CMSMarkStack* markStack,
6750 CMSMarkStack* revisitStack,
6751 bool should_yield, bool verifying):
6752 _collector(collector),
6753 _span(span),
6754 _bitMap(bitMap),
6755 _mut(&collector->_modUnionTable),
6756 _markStack(markStack),
6757 _revisitStack(revisitStack),
6758 _yield(should_yield),
6759 _skipBits(0)
6760 {
6761 assert(_markStack->isEmpty(), "stack should be empty");
6762 _finger = _bitMap->startWord();
6763 _threshold = _finger;
6764 assert(_collector->_restart_addr == NULL, "Sanity check");
6765 assert(_span.contains(_finger), "Out of bounds _finger?");
6766 DEBUG_ONLY(_verifying = verifying;)
6767 }
6769 void MarkFromRootsClosure::reset(HeapWord* addr) {
6770 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6771 assert(_span.contains(addr), "Out of bounds _finger?");
6772 _finger = addr;
6773 _threshold = (HeapWord*)round_to(
6774 (intptr_t)_finger, CardTableModRefBS::card_size);
6775 }
6777 // Should revisit to see if this should be restructured for
6778 // greater efficiency.
6779 void MarkFromRootsClosure::do_bit(size_t offset) {
6780 if (_skipBits > 0) {
6781 _skipBits--;
6782 return;
6783 }
6784 // convert offset into a HeapWord*
6785 HeapWord* addr = _bitMap->startWord() + offset;
6786 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6787 "address out of range");
6788 assert(_bitMap->isMarked(addr), "tautology");
6789 if (_bitMap->isMarked(addr+1)) {
6790 // this is an allocated but not yet initialized object
6791 assert(_skipBits == 0, "tautology");
6792 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6793 oop p = oop(addr);
6794 if (p->klass() == NULL || !p->is_parsable()) {
6795 DEBUG_ONLY(if (!_verifying) {)
6796 // We re-dirty the cards on which this object lies and increase
6797 // the _threshold so that we'll come back to scan this object
6798 // during the preclean or remark phase. (CMSCleanOnEnter)
6799 if (CMSCleanOnEnter) {
6800 size_t sz = _collector->block_size_using_printezis_bits(addr);
6801 HeapWord* start_card_addr = (HeapWord*)round_down(
6802 (intptr_t)addr, CardTableModRefBS::card_size);
6803 HeapWord* end_card_addr = (HeapWord*)round_to(
6804 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6805 MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
6806 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6807 // Bump _threshold to end_card_addr; note that
6808 // _threshold cannot possibly exceed end_card_addr, anyhow.
6809 // This prevents future clearing of the card as the scan proceeds
6810 // to the right.
6811 assert(_threshold <= end_card_addr,
6812 "Because we are just scanning into this object");
6813 if (_threshold < end_card_addr) {
6814 _threshold = end_card_addr;
6815 }
6816 if (p->klass() != NULL) {
6817 // Redirty the range of cards...
6818 _mut->mark_range(redirty_range);
6819 } // ...else the setting of klass will dirty the card anyway.
6820 }
6821 DEBUG_ONLY(})
6822 return;
6823 }
6824 }
6825 scanOopsInOop(addr);
6826 }
6828 // We take a break if we've been at this for a while,
6829 // so as to avoid monopolizing the locks involved.
6830 void MarkFromRootsClosure::do_yield_work() {
6831 // First give up the locks, then yield, then re-lock
6832 // We should probably use a constructor/destructor idiom to
6833 // do this unlock/lock or modify the MutexUnlocker class to
6834 // serve our purpose. XXX
6835 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6836 "CMS thread should hold CMS token");
6837 assert_lock_strong(_bitMap->lock());
6838 _bitMap->lock()->unlock();
6839 ConcurrentMarkSweepThread::desynchronize(true);
6840 ConcurrentMarkSweepThread::acknowledge_yield_request();
6841 _collector->stopTimer();
6842 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6843 if (PrintCMSStatistics != 0) {
6844 _collector->incrementYields();
6845 }
6846 _collector->icms_wait();
6848 // See the comment in coordinator_yield()
6849 for (unsigned i = 0; i < CMSYieldSleepCount &&
6850 ConcurrentMarkSweepThread::should_yield() &&
6851 !CMSCollector::foregroundGCIsActive(); ++i) {
6852 os::sleep(Thread::current(), 1, false);
6853 ConcurrentMarkSweepThread::acknowledge_yield_request();
6854 }
6856 ConcurrentMarkSweepThread::synchronize(true);
6857 _bitMap->lock()->lock_without_safepoint_check();
6858 _collector->startTimer();
6859 }
6861 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6862 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6863 assert(_markStack->isEmpty(),
6864 "should drain stack to limit stack usage");
6865 // convert ptr to an oop preparatory to scanning
6866 oop this_oop = oop(ptr);
6867 // Ignore mark word in verification below, since we
6868 // may be running concurrent with mutators.
6869 assert(this_oop->is_oop(true), "should be an oop");
6870 assert(_finger <= ptr, "_finger runneth ahead");
6871 // advance the finger to right end of this object
6872 _finger = ptr + this_oop->size();
6873 assert(_finger > ptr, "we just incremented it above");
6874 // On large heaps, it may take us some time to get through
6875 // the marking phase (especially if running iCMS). During
6876 // this time it's possible that a lot of mutations have
6877 // accumulated in the card table and the mod union table --
6878 // these mutation records are redundant until we have
6879 // actually traced into the corresponding card.
6880 // Here, we check whether advancing the finger would make
6881 // us cross into a new card, and if so clear corresponding
6882 // cards in the MUT (preclean them in the card-table in the
6883 // future).
6885 DEBUG_ONLY(if (!_verifying) {)
6886 // The clean-on-enter optimization is disabled by default,
6887 // until we fix 6178663.
6888 if (CMSCleanOnEnter && (_finger > _threshold)) {
6889 // [_threshold, _finger) represents the interval
6890 // of cards to be cleared in MUT (or precleaned in card table).
6891 // The set of cards to be cleared is all those that overlap
6892 // with the interval [_threshold, _finger); note that
6893 // _threshold is always kept card-aligned but _finger isn't
6894 // always card-aligned.
6895 HeapWord* old_threshold = _threshold;
6896 assert(old_threshold == (HeapWord*)round_to(
6897 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6898 "_threshold should always be card-aligned");
6899 _threshold = (HeapWord*)round_to(
6900 (intptr_t)_finger, CardTableModRefBS::card_size);
6901 MemRegion mr(old_threshold, _threshold);
6902 assert(!mr.is_empty(), "Control point invariant");
6903 assert(_span.contains(mr), "Should clear within span");
6904 // XXX When _finger crosses from old gen into perm gen
6905 // we may be doing unnecessary cleaning; do better in the
6906 // future by detecting that condition and clearing fewer
6907 // MUT/CT entries.
6908 _mut->clear_range(mr);
6909 }
6910 DEBUG_ONLY(})
6912 // Note: the finger doesn't advance while we drain
6913 // the stack below.
6914 PushOrMarkClosure pushOrMarkClosure(_collector,
6915 _span, _bitMap, _markStack,
6916 _revisitStack,
6917 _finger, this);
6918 bool res = _markStack->push(this_oop);
6919 assert(res, "Empty non-zero size stack should have space for single push");
6920 while (!_markStack->isEmpty()) {
6921 oop new_oop = _markStack->pop();
6922 // Skip verifying header mark word below because we are
6923 // running concurrent with mutators.
6924 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6925 // now scan this oop's oops
6926 new_oop->oop_iterate(&pushOrMarkClosure);
6927 do_yield_check();
6928 }
6929 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6930 }
6932 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6933 CMSCollector* collector, MemRegion span,
6934 CMSBitMap* bit_map,
6935 OopTaskQueue* work_queue,
6936 CMSMarkStack* overflow_stack,
6937 CMSMarkStack* revisit_stack,
6938 bool should_yield):
6939 _collector(collector),
6940 _whole_span(collector->_span),
6941 _span(span),
6942 _bit_map(bit_map),
6943 _mut(&collector->_modUnionTable),
6944 _work_queue(work_queue),
6945 _overflow_stack(overflow_stack),
6946 _revisit_stack(revisit_stack),
6947 _yield(should_yield),
6948 _skip_bits(0),
6949 _task(task)
6950 {
6951 assert(_work_queue->size() == 0, "work_queue should be empty");
6952 _finger = span.start();
6953 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
6954 assert(_span.contains(_finger), "Out of bounds _finger?");
6955 }
6957 // Should revisit to see if this should be restructured for
6958 // greater efficiency.
6959 void Par_MarkFromRootsClosure::do_bit(size_t offset) {
6960 if (_skip_bits > 0) {
6961 _skip_bits--;
6962 return;
6963 }
6964 // convert offset into a HeapWord*
6965 HeapWord* addr = _bit_map->startWord() + offset;
6966 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6967 "address out of range");
6968 assert(_bit_map->isMarked(addr), "tautology");
6969 if (_bit_map->isMarked(addr+1)) {
6970 // this is an allocated object that might not yet be initialized
6971 assert(_skip_bits == 0, "tautology");
6972 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
6973 oop p = oop(addr);
6974 if (p->klass() == NULL || !p->is_parsable()) {
6975 // in the case of Clean-on-Enter optimization, redirty card
6976 // and avoid clearing card by increasing the threshold.
6977 return;
6978 }
6979 }
6980 scan_oops_in_oop(addr);
6981 }
6983 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6984 assert(_bit_map->isMarked(ptr), "expected bit to be set");
6985 // Should we assert that our work queue is empty or
6986 // below some drain limit?
6987 assert(_work_queue->size() == 0,
6988 "should drain stack to limit stack usage");
6989 // convert ptr to an oop preparatory to scanning
6990 oop this_oop = oop(ptr);
6991 // Ignore mark word in verification below, since we
6992 // may be running concurrent with mutators.
6993 assert(this_oop->is_oop(true), "should be an oop");
6994 assert(_finger <= ptr, "_finger runneth ahead");
6995 // advance the finger to right end of this object
6996 _finger = ptr + this_oop->size();
6997 assert(_finger > ptr, "we just incremented it above");
6998 // On large heaps, it may take us some time to get through
6999 // the marking phase (especially if running iCMS). During
7000 // this time it's possible that a lot of mutations have
7001 // accumulated in the card table and the mod union table --
7002 // these mutation records are redundant until we have
7003 // actually traced into the corresponding card.
7004 // Here, we check whether advancing the finger would make
7005 // us cross into a new card, and if so clear corresponding
7006 // cards in the MUT (preclean them in the card-table in the
7007 // future).
7009 // The clean-on-enter optimization is disabled by default,
7010 // until we fix 6178663.
7011 if (CMSCleanOnEnter && (_finger > _threshold)) {
7012 // [_threshold, _finger) represents the interval
7013 // of cards to be cleared in MUT (or precleaned in card table).
7014 // The set of cards to be cleared is all those that overlap
7015 // with the interval [_threshold, _finger); note that
7016 // _threshold is always kept card-aligned but _finger isn't
7017 // always card-aligned.
7018 HeapWord* old_threshold = _threshold;
7019 assert(old_threshold == (HeapWord*)round_to(
7020 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7021 "_threshold should always be card-aligned");
7022 _threshold = (HeapWord*)round_to(
7023 (intptr_t)_finger, CardTableModRefBS::card_size);
7024 MemRegion mr(old_threshold, _threshold);
7025 assert(!mr.is_empty(), "Control point invariant");
7026 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7027 // XXX When _finger crosses from old gen into perm gen
7028 // we may be doing unnecessary cleaning; do better in the
7029 // future by detecting that condition and clearing fewer
7030 // MUT/CT entries.
7031 _mut->clear_range(mr);
7032 }
7034 // Note: the local finger doesn't advance while we drain
7035 // the stack below, but the global finger sure can and will.
7036 HeapWord** gfa = _task->global_finger_addr();
7037 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7038 _span, _bit_map,
7039 _work_queue,
7040 _overflow_stack,
7041 _revisit_stack,
7042 _finger,
7043 gfa, this);
7044 bool res = _work_queue->push(this_oop); // overflow could occur here
7045 assert(res, "Will hold once we use workqueues");
7046 while (true) {
7047 oop new_oop;
7048 if (!_work_queue->pop_local(new_oop)) {
7049 // We emptied our work_queue; check if there's stuff that can
7050 // be gotten from the overflow stack.
7051 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7052 _overflow_stack, _work_queue)) {
7053 do_yield_check();
7054 continue;
7055 } else { // done
7056 break;
7057 }
7058 }
7059 // Skip verifying header mark word below because we are
7060 // running concurrent with mutators.
7061 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7062 // now scan this oop's oops
7063 new_oop->oop_iterate(&pushOrMarkClosure);
7064 do_yield_check();
7065 }
7066 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7067 }
7069 // Yield in response to a request from VM Thread or
7070 // from mutators.
7071 void Par_MarkFromRootsClosure::do_yield_work() {
7072 assert(_task != NULL, "sanity");
7073 _task->yield();
7074 }
7076 // A variant of the above used for verifying CMS marking work.
7077 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7078 MemRegion span,
7079 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7080 CMSMarkStack* mark_stack):
7081 _collector(collector),
7082 _span(span),
7083 _verification_bm(verification_bm),
7084 _cms_bm(cms_bm),
7085 _mark_stack(mark_stack),
7086 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7087 mark_stack)
7088 {
7089 assert(_mark_stack->isEmpty(), "stack should be empty");
7090 _finger = _verification_bm->startWord();
7091 assert(_collector->_restart_addr == NULL, "Sanity check");
7092 assert(_span.contains(_finger), "Out of bounds _finger?");
7093 }
7095 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7096 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7097 assert(_span.contains(addr), "Out of bounds _finger?");
7098 _finger = addr;
7099 }
7101 // Should revisit to see if this should be restructured for
7102 // greater efficiency.
7103 void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7104 // convert offset into a HeapWord*
7105 HeapWord* addr = _verification_bm->startWord() + offset;
7106 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7107 "address out of range");
7108 assert(_verification_bm->isMarked(addr), "tautology");
7109 assert(_cms_bm->isMarked(addr), "tautology");
7111 assert(_mark_stack->isEmpty(),
7112 "should drain stack to limit stack usage");
7113 // convert addr to an oop preparatory to scanning
7114 oop this_oop = oop(addr);
7115 assert(this_oop->is_oop(), "should be an oop");
7116 assert(_finger <= addr, "_finger runneth ahead");
7117 // advance the finger to right end of this object
7118 _finger = addr + this_oop->size();
7119 assert(_finger > addr, "we just incremented it above");
7120 // Note: the finger doesn't advance while we drain
7121 // the stack below.
7122 bool res = _mark_stack->push(this_oop);
7123 assert(res, "Empty non-zero size stack should have space for single push");
7124 while (!_mark_stack->isEmpty()) {
7125 oop new_oop = _mark_stack->pop();
7126 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7127 // now scan this oop's oops
7128 new_oop->oop_iterate(&_pam_verify_closure);
7129 }
7130 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7131 }
7133 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7134 CMSCollector* collector, MemRegion span,
7135 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7136 CMSMarkStack* mark_stack):
7137 OopClosure(collector->ref_processor()),
7138 _collector(collector),
7139 _span(span),
7140 _verification_bm(verification_bm),
7141 _cms_bm(cms_bm),
7142 _mark_stack(mark_stack)
7143 { }
7146 // Upon stack overflow, we discard (part of) the stack,
7147 // remembering the least address amongst those discarded
7148 // in CMSCollector's _restart_address.
7149 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7150 // Remember the least grey address discarded
7151 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7152 _collector->lower_restart_addr(ra);
7153 _mark_stack->reset(); // discard stack contents
7154 _mark_stack->expand(); // expand the stack if possible
7155 }
7157 void PushAndMarkVerifyClosure::do_oop(oop* p) {
7158 oop this_oop = *p;
7159 assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
7160 HeapWord* addr = (HeapWord*)this_oop;
7161 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7162 // Oop lies in _span and isn't yet grey or black
7163 _verification_bm->mark(addr); // now grey
7164 if (!_cms_bm->isMarked(addr)) {
7165 oop(addr)->print();
7166 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
7167 fatal("... aborting");
7168 }
7170 if (!_mark_stack->push(this_oop)) { // stack overflow
7171 if (PrintCMSStatistics != 0) {
7172 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7173 SIZE_FORMAT, _mark_stack->capacity());
7174 }
7175 assert(_mark_stack->isFull(), "Else push should have succeeded");
7176 handle_stack_overflow(addr);
7177 }
7178 // anything including and to the right of _finger
7179 // will be scanned as we iterate over the remainder of the
7180 // bit map
7181 }
7182 }
7184 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7185 MemRegion span,
7186 CMSBitMap* bitMap, CMSMarkStack* markStack,
7187 CMSMarkStack* revisitStack,
7188 HeapWord* finger, MarkFromRootsClosure* parent) :
7189 OopClosure(collector->ref_processor()),
7190 _collector(collector),
7191 _span(span),
7192 _bitMap(bitMap),
7193 _markStack(markStack),
7194 _revisitStack(revisitStack),
7195 _finger(finger),
7196 _parent(parent),
7197 _should_remember_klasses(collector->cms_should_unload_classes())
7198 { }
7200 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7201 MemRegion span,
7202 CMSBitMap* bit_map,
7203 OopTaskQueue* work_queue,
7204 CMSMarkStack* overflow_stack,
7205 CMSMarkStack* revisit_stack,
7206 HeapWord* finger,
7207 HeapWord** global_finger_addr,
7208 Par_MarkFromRootsClosure* parent) :
7209 OopClosure(collector->ref_processor()),
7210 _collector(collector),
7211 _whole_span(collector->_span),
7212 _span(span),
7213 _bit_map(bit_map),
7214 _work_queue(work_queue),
7215 _overflow_stack(overflow_stack),
7216 _revisit_stack(revisit_stack),
7217 _finger(finger),
7218 _global_finger_addr(global_finger_addr),
7219 _parent(parent),
7220 _should_remember_klasses(collector->cms_should_unload_classes())
7221 { }
7224 void CMSCollector::lower_restart_addr(HeapWord* low) {
7225 assert(_span.contains(low), "Out of bounds addr");
7226 if (_restart_addr == NULL) {
7227 _restart_addr = low;
7228 } else {
7229 _restart_addr = MIN2(_restart_addr, low);
7230 }
7231 }
7233 // Upon stack overflow, we discard (part of) the stack,
7234 // remembering the least address amongst those discarded
7235 // in CMSCollector's _restart_address.
7236 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7237 // Remember the least grey address discarded
7238 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7239 _collector->lower_restart_addr(ra);
7240 _markStack->reset(); // discard stack contents
7241 _markStack->expand(); // expand the stack if possible
7242 }
7244 // Upon stack overflow, we discard (part of) the stack,
7245 // remembering the least address amongst those discarded
7246 // in CMSCollector's _restart_address.
7247 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7248 // We need to do this under a mutex to prevent other
7249 // workers from interfering with the expansion below.
7250 MutexLockerEx ml(_overflow_stack->par_lock(),
7251 Mutex::_no_safepoint_check_flag);
7252 // Remember the least grey address discarded
7253 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7254 _collector->lower_restart_addr(ra);
7255 _overflow_stack->reset(); // discard stack contents
7256 _overflow_stack->expand(); // expand the stack if possible
7257 }
7260 void PushOrMarkClosure::do_oop(oop* p) {
7261 oop thisOop = *p;
7262 // Ignore mark word because we are running concurrent with mutators.
7263 assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
7264 HeapWord* addr = (HeapWord*)thisOop;
7265 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7266 // Oop lies in _span and isn't yet grey or black
7267 _bitMap->mark(addr); // now grey
7268 if (addr < _finger) {
7269 // the bit map iteration has already either passed, or
7270 // sampled, this bit in the bit map; we'll need to
7271 // use the marking stack to scan this oop's oops.
7272 bool simulate_overflow = false;
7273 NOT_PRODUCT(
7274 if (CMSMarkStackOverflowALot &&
7275 _collector->simulate_overflow()) {
7276 // simulate a stack overflow
7277 simulate_overflow = true;
7278 }
7279 )
7280 if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
7281 if (PrintCMSStatistics != 0) {
7282 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7283 SIZE_FORMAT, _markStack->capacity());
7284 }
7285 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7286 handle_stack_overflow(addr);
7287 }
7288 }
7289 // anything including and to the right of _finger
7290 // will be scanned as we iterate over the remainder of the
7291 // bit map
7292 do_yield_check();
7293 }
7294 }
7296 void Par_PushOrMarkClosure::do_oop(oop* p) {
7297 oop this_oop = *p;
7298 // Ignore mark word because we are running concurrent with mutators.
7299 assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
7300 HeapWord* addr = (HeapWord*)this_oop;
7301 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7302 // Oop lies in _span and isn't yet grey or black
7303 // We read the global_finger (volatile read) strictly after marking oop
7304 bool res = _bit_map->par_mark(addr); // now grey
7305 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7306 // Should we push this marked oop on our stack?
7307 // -- if someone else marked it, nothing to do
7308 // -- if target oop is above global finger nothing to do
7309 // -- if target oop is in chunk and above local finger
7310 // then nothing to do
7311 // -- else push on work queue
7312 if ( !res // someone else marked it, they will deal with it
7313 || (addr >= *gfa) // will be scanned in a later task
7314 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7315 return;
7316 }
7317 // the bit map iteration has already either passed, or
7318 // sampled, this bit in the bit map; we'll need to
7319 // use the marking stack to scan this oop's oops.
7320 bool simulate_overflow = false;
7321 NOT_PRODUCT(
7322 if (CMSMarkStackOverflowALot &&
7323 _collector->simulate_overflow()) {
7324 // simulate a stack overflow
7325 simulate_overflow = true;
7326 }
7327 )
7328 if (simulate_overflow ||
7329 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
7330 // stack overflow
7331 if (PrintCMSStatistics != 0) {
7332 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7333 SIZE_FORMAT, _overflow_stack->capacity());
7334 }
7335 // We cannot assert that the overflow stack is full because
7336 // it may have been emptied since.
7337 assert(simulate_overflow ||
7338 _work_queue->size() == _work_queue->max_elems(),
7339 "Else push should have succeeded");
7340 handle_stack_overflow(addr);
7341 }
7342 do_yield_check();
7343 }
7344 }
7347 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7348 MemRegion span,
7349 ReferenceProcessor* rp,
7350 CMSBitMap* bit_map,
7351 CMSBitMap* mod_union_table,
7352 CMSMarkStack* mark_stack,
7353 CMSMarkStack* revisit_stack,
7354 bool concurrent_precleaning):
7355 OopClosure(rp),
7356 _collector(collector),
7357 _span(span),
7358 _bit_map(bit_map),
7359 _mod_union_table(mod_union_table),
7360 _mark_stack(mark_stack),
7361 _revisit_stack(revisit_stack),
7362 _concurrent_precleaning(concurrent_precleaning),
7363 _should_remember_klasses(collector->cms_should_unload_classes())
7364 {
7365 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7366 }
7368 // Grey object rescan during pre-cleaning and second checkpoint phases --
7369 // the non-parallel version (the parallel version appears further below.)
7370 void PushAndMarkClosure::do_oop(oop* p) {
7371 oop this_oop = *p;
7372 // Ignore mark word verification. If during concurrent precleaning
7373 // the object monitor may be locked. If during the checkpoint
7374 // phases, the object may already have been reached by a different
7375 // path and may be at the end of the global overflow list (so
7376 // the mark word may be NULL).
7377 assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7378 "expected an oop or NULL");
7379 HeapWord* addr = (HeapWord*)this_oop;
7380 // Check if oop points into the CMS generation
7381 // and is not marked
7382 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7383 // a white object ...
7384 _bit_map->mark(addr); // ... now grey
7385 // push on the marking stack (grey set)
7386 bool simulate_overflow = false;
7387 NOT_PRODUCT(
7388 if (CMSMarkStackOverflowALot &&
7389 _collector->simulate_overflow()) {
7390 // simulate a stack overflow
7391 simulate_overflow = true;
7392 }
7393 )
7394 if (simulate_overflow || !_mark_stack->push(this_oop)) {
7395 if (_concurrent_precleaning) {
7396 // During precleaning we can just dirty the appropriate card
7397 // in the mod union table, thus ensuring that the object remains
7398 // in the grey set and continue. Note that no one can be intefering
7399 // with us in this action of dirtying the mod union table, so
7400 // no locking is required.
7401 _mod_union_table->mark(addr);
7402 _collector->_ser_pmc_preclean_ovflw++;
7403 } else {
7404 // During the remark phase, we need to remember this oop
7405 // in the overflow list.
7406 _collector->push_on_overflow_list(this_oop);
7407 _collector->_ser_pmc_remark_ovflw++;
7408 }
7409 }
7410 }
7411 }
7413 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7414 MemRegion span,
7415 ReferenceProcessor* rp,
7416 CMSBitMap* bit_map,
7417 OopTaskQueue* work_queue,
7418 CMSMarkStack* revisit_stack):
7419 OopClosure(rp),
7420 _collector(collector),
7421 _span(span),
7422 _bit_map(bit_map),
7423 _work_queue(work_queue),
7424 _revisit_stack(revisit_stack),
7425 _should_remember_klasses(collector->cms_should_unload_classes())
7426 {
7427 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7428 }
7430 // Grey object rescan during second checkpoint phase --
7431 // the parallel version.
7432 void Par_PushAndMarkClosure::do_oop(oop* p) {
7433 oop this_oop = *p;
7434 // In the assert below, we ignore the mark word because
7435 // this oop may point to an already visited object that is
7436 // on the overflow stack (in which case the mark word has
7437 // been hijacked for chaining into the overflow stack --
7438 // if this is the last object in the overflow stack then
7439 // its mark word will be NULL). Because this object may
7440 // have been subsequently popped off the global overflow
7441 // stack, and the mark word possibly restored to the prototypical
7442 // value, by the time we get to examined this failing assert in
7443 // the debugger, is_oop_or_null(false) may subsequently start
7444 // to hold.
7445 assert(this_oop->is_oop_or_null(true),
7446 "expected an oop or NULL");
7447 HeapWord* addr = (HeapWord*)this_oop;
7448 // Check if oop points into the CMS generation
7449 // and is not marked
7450 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7451 // a white object ...
7452 // If we manage to "claim" the object, by being the
7453 // first thread to mark it, then we push it on our
7454 // marking stack
7455 if (_bit_map->par_mark(addr)) { // ... now grey
7456 // push on work queue (grey set)
7457 bool simulate_overflow = false;
7458 NOT_PRODUCT(
7459 if (CMSMarkStackOverflowALot &&
7460 _collector->par_simulate_overflow()) {
7461 // simulate a stack overflow
7462 simulate_overflow = true;
7463 }
7464 )
7465 if (simulate_overflow || !_work_queue->push(this_oop)) {
7466 _collector->par_push_on_overflow_list(this_oop);
7467 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7468 }
7469 } // Else, some other thread got there first
7470 }
7471 }
7473 void PushAndMarkClosure::remember_klass(Klass* k) {
7474 if (!_revisit_stack->push(oop(k))) {
7475 fatal("Revisit stack overflowed in PushAndMarkClosure");
7476 }
7477 }
7479 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7480 if (!_revisit_stack->par_push(oop(k))) {
7481 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7482 }
7483 }
7485 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7486 Mutex* bml = _collector->bitMapLock();
7487 assert_lock_strong(bml);
7488 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7489 "CMS thread should hold CMS token");
7491 bml->unlock();
7492 ConcurrentMarkSweepThread::desynchronize(true);
7494 ConcurrentMarkSweepThread::acknowledge_yield_request();
7496 _collector->stopTimer();
7497 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7498 if (PrintCMSStatistics != 0) {
7499 _collector->incrementYields();
7500 }
7501 _collector->icms_wait();
7503 // See the comment in coordinator_yield()
7504 for (unsigned i = 0; i < CMSYieldSleepCount &&
7505 ConcurrentMarkSweepThread::should_yield() &&
7506 !CMSCollector::foregroundGCIsActive(); ++i) {
7507 os::sleep(Thread::current(), 1, false);
7508 ConcurrentMarkSweepThread::acknowledge_yield_request();
7509 }
7511 ConcurrentMarkSweepThread::synchronize(true);
7512 bml->lock();
7514 _collector->startTimer();
7515 }
7517 bool CMSPrecleanRefsYieldClosure::should_return() {
7518 if (ConcurrentMarkSweepThread::should_yield()) {
7519 do_yield_work();
7520 }
7521 return _collector->foregroundGCIsActive();
7522 }
7524 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7525 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7526 "mr should be aligned to start at a card boundary");
7527 // We'd like to assert:
7528 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7529 // "mr should be a range of cards");
7530 // However, that would be too strong in one case -- the last
7531 // partition ends at _unallocated_block which, in general, can be
7532 // an arbitrary boundary, not necessarily card aligned.
7533 if (PrintCMSStatistics != 0) {
7534 _num_dirty_cards +=
7535 mr.word_size()/CardTableModRefBS::card_size_in_words;
7536 }
7537 _space->object_iterate_mem(mr, &_scan_cl);
7538 }
7540 SweepClosure::SweepClosure(CMSCollector* collector,
7541 ConcurrentMarkSweepGeneration* g,
7542 CMSBitMap* bitMap, bool should_yield) :
7543 _collector(collector),
7544 _g(g),
7545 _sp(g->cmsSpace()),
7546 _limit(_sp->sweep_limit()),
7547 _freelistLock(_sp->freelistLock()),
7548 _bitMap(bitMap),
7549 _yield(should_yield),
7550 _inFreeRange(false), // No free range at beginning of sweep
7551 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7552 _lastFreeRangeCoalesced(false),
7553 _freeFinger(g->used_region().start())
7554 {
7555 NOT_PRODUCT(
7556 _numObjectsFreed = 0;
7557 _numWordsFreed = 0;
7558 _numObjectsLive = 0;
7559 _numWordsLive = 0;
7560 _numObjectsAlreadyFree = 0;
7561 _numWordsAlreadyFree = 0;
7562 _last_fc = NULL;
7564 _sp->initializeIndexedFreeListArrayReturnedBytes();
7565 _sp->dictionary()->initializeDictReturnedBytes();
7566 )
7567 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7568 "sweep _limit out of bounds");
7569 if (CMSTraceSweeper) {
7570 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7571 }
7572 }
7574 // We need this destructor to reclaim any space at the end
7575 // of the space, which do_blk below may not have added back to
7576 // the free lists. [basically dealing with the "fringe effect"]
7577 SweepClosure::~SweepClosure() {
7578 assert_lock_strong(_freelistLock);
7579 // this should be treated as the end of a free run if any
7580 // The current free range should be returned to the free lists
7581 // as one coalesced chunk.
7582 if (inFreeRange()) {
7583 flushCurFreeChunk(freeFinger(),
7584 pointer_delta(_limit, freeFinger()));
7585 assert(freeFinger() < _limit, "the finger pointeth off base");
7586 if (CMSTraceSweeper) {
7587 gclog_or_tty->print("destructor:");
7588 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7589 "[coalesced:"SIZE_FORMAT"]\n",
7590 freeFinger(), pointer_delta(_limit, freeFinger()),
7591 lastFreeRangeCoalesced());
7592 }
7593 }
7594 NOT_PRODUCT(
7595 if (Verbose && PrintGC) {
7596 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7597 SIZE_FORMAT " bytes",
7598 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7599 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7600 SIZE_FORMAT" bytes "
7601 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7602 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7603 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7604 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7605 sizeof(HeapWord);
7606 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7608 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7609 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7610 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7611 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7612 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7613 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7614 indexListReturnedBytes);
7615 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7616 dictReturnedBytes);
7617 }
7618 }
7619 )
7620 // Now, in debug mode, just null out the sweep_limit
7621 NOT_PRODUCT(_sp->clear_sweep_limit();)
7622 if (CMSTraceSweeper) {
7623 gclog_or_tty->print("end of sweep\n================\n");
7624 }
7625 }
7627 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7628 bool freeRangeInFreeLists) {
7629 if (CMSTraceSweeper) {
7630 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7631 freeFinger, _sp->block_size(freeFinger),
7632 freeRangeInFreeLists);
7633 }
7634 assert(!inFreeRange(), "Trampling existing free range");
7635 set_inFreeRange(true);
7636 set_lastFreeRangeCoalesced(false);
7638 set_freeFinger(freeFinger);
7639 set_freeRangeInFreeLists(freeRangeInFreeLists);
7640 if (CMSTestInFreeList) {
7641 if (freeRangeInFreeLists) {
7642 FreeChunk* fc = (FreeChunk*) freeFinger;
7643 assert(fc->isFree(), "A chunk on the free list should be free.");
7644 assert(fc->size() > 0, "Free range should have a size");
7645 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7646 }
7647 }
7648 }
7650 // Note that the sweeper runs concurrently with mutators. Thus,
7651 // it is possible for direct allocation in this generation to happen
7652 // in the middle of the sweep. Note that the sweeper also coalesces
7653 // contiguous free blocks. Thus, unless the sweeper and the allocator
7654 // synchronize appropriately freshly allocated blocks may get swept up.
7655 // This is accomplished by the sweeper locking the free lists while
7656 // it is sweeping. Thus blocks that are determined to be free are
7657 // indeed free. There is however one additional complication:
7658 // blocks that have been allocated since the final checkpoint and
7659 // mark, will not have been marked and so would be treated as
7660 // unreachable and swept up. To prevent this, the allocator marks
7661 // the bit map when allocating during the sweep phase. This leads,
7662 // however, to a further complication -- objects may have been allocated
7663 // but not yet initialized -- in the sense that the header isn't yet
7664 // installed. The sweeper can not then determine the size of the block
7665 // in order to skip over it. To deal with this case, we use a technique
7666 // (due to Printezis) to encode such uninitialized block sizes in the
7667 // bit map. Since the bit map uses a bit per every HeapWord, but the
7668 // CMS generation has a minimum object size of 3 HeapWords, it follows
7669 // that "normal marks" won't be adjacent in the bit map (there will
7670 // always be at least two 0 bits between successive 1 bits). We make use
7671 // of these "unused" bits to represent uninitialized blocks -- the bit
7672 // corresponding to the start of the uninitialized object and the next
7673 // bit are both set. Finally, a 1 bit marks the end of the object that
7674 // started with the two consecutive 1 bits to indicate its potentially
7675 // uninitialized state.
7677 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7678 FreeChunk* fc = (FreeChunk*)addr;
7679 size_t res;
7681 // check if we are done sweepinrg
7682 if (addr == _limit) { // we have swept up to the limit, do nothing more
7683 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7684 "sweep _limit out of bounds");
7685 // help the closure application finish
7686 return pointer_delta(_sp->end(), _limit);
7687 }
7688 assert(addr <= _limit, "sweep invariant");
7690 // check if we should yield
7691 do_yield_check(addr);
7692 if (fc->isFree()) {
7693 // Chunk that is already free
7694 res = fc->size();
7695 doAlreadyFreeChunk(fc);
7696 debug_only(_sp->verifyFreeLists());
7697 assert(res == fc->size(), "Don't expect the size to change");
7698 NOT_PRODUCT(
7699 _numObjectsAlreadyFree++;
7700 _numWordsAlreadyFree += res;
7701 )
7702 NOT_PRODUCT(_last_fc = fc;)
7703 } else if (!_bitMap->isMarked(addr)) {
7704 // Chunk is fresh garbage
7705 res = doGarbageChunk(fc);
7706 debug_only(_sp->verifyFreeLists());
7707 NOT_PRODUCT(
7708 _numObjectsFreed++;
7709 _numWordsFreed += res;
7710 )
7711 } else {
7712 // Chunk that is alive.
7713 res = doLiveChunk(fc);
7714 debug_only(_sp->verifyFreeLists());
7715 NOT_PRODUCT(
7716 _numObjectsLive++;
7717 _numWordsLive += res;
7718 )
7719 }
7720 return res;
7721 }
7723 // For the smart allocation, record following
7724 // split deaths - a free chunk is removed from its free list because
7725 // it is being split into two or more chunks.
7726 // split birth - a free chunk is being added to its free list because
7727 // a larger free chunk has been split and resulted in this free chunk.
7728 // coal death - a free chunk is being removed from its free list because
7729 // it is being coalesced into a large free chunk.
7730 // coal birth - a free chunk is being added to its free list because
7731 // it was created when two or more free chunks where coalesced into
7732 // this free chunk.
7733 //
7734 // These statistics are used to determine the desired number of free
7735 // chunks of a given size. The desired number is chosen to be relative
7736 // to the end of a CMS sweep. The desired number at the end of a sweep
7737 // is the
7738 // count-at-end-of-previous-sweep (an amount that was enough)
7739 // - count-at-beginning-of-current-sweep (the excess)
7740 // + split-births (gains in this size during interval)
7741 // - split-deaths (demands on this size during interval)
7742 // where the interval is from the end of one sweep to the end of the
7743 // next.
7744 //
7745 // When sweeping the sweeper maintains an accumulated chunk which is
7746 // the chunk that is made up of chunks that have been coalesced. That
7747 // will be termed the left-hand chunk. A new chunk of garbage that
7748 // is being considered for coalescing will be referred to as the
7749 // right-hand chunk.
7750 //
7751 // When making a decision on whether to coalesce a right-hand chunk with
7752 // the current left-hand chunk, the current count vs. the desired count
7753 // of the left-hand chunk is considered. Also if the right-hand chunk
7754 // is near the large chunk at the end of the heap (see
7755 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7756 // left-hand chunk is coalesced.
7757 //
7758 // When making a decision about whether to split a chunk, the desired count
7759 // vs. the current count of the candidate to be split is also considered.
7760 // If the candidate is underpopulated (currently fewer chunks than desired)
7761 // a chunk of an overpopulated (currently more chunks than desired) size may
7762 // be chosen. The "hint" associated with a free list, if non-null, points
7763 // to a free list which may be overpopulated.
7764 //
7766 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7767 size_t size = fc->size();
7768 // Chunks that cannot be coalesced are not in the
7769 // free lists.
7770 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7771 assert(_sp->verifyChunkInFreeLists(fc),
7772 "free chunk should be in free lists");
7773 }
7774 // a chunk that is already free, should not have been
7775 // marked in the bit map
7776 HeapWord* addr = (HeapWord*) fc;
7777 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7778 // Verify that the bit map has no bits marked between
7779 // addr and purported end of this block.
7780 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7782 // Some chunks cannot be coalesced in under any circumstances.
7783 // See the definition of cantCoalesce().
7784 if (!fc->cantCoalesce()) {
7785 // This chunk can potentially be coalesced.
7786 if (_sp->adaptive_freelists()) {
7787 // All the work is done in
7788 doPostIsFreeOrGarbageChunk(fc, size);
7789 } else { // Not adaptive free lists
7790 // this is a free chunk that can potentially be coalesced by the sweeper;
7791 if (!inFreeRange()) {
7792 // if the next chunk is a free block that can't be coalesced
7793 // it doesn't make sense to remove this chunk from the free lists
7794 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7795 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7796 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7797 nextChunk->isFree() && // which is free...
7798 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7799 // nothing to do
7800 } else {
7801 // Potentially the start of a new free range:
7802 // Don't eagerly remove it from the free lists.
7803 // No need to remove it if it will just be put
7804 // back again. (Also from a pragmatic point of view
7805 // if it is a free block in a region that is beyond
7806 // any allocated blocks, an assertion will fail)
7807 // Remember the start of a free run.
7808 initialize_free_range(addr, true);
7809 // end - can coalesce with next chunk
7810 }
7811 } else {
7812 // the midst of a free range, we are coalescing
7813 debug_only(record_free_block_coalesced(fc);)
7814 if (CMSTraceSweeper) {
7815 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7816 }
7817 // remove it from the free lists
7818 _sp->removeFreeChunkFromFreeLists(fc);
7819 set_lastFreeRangeCoalesced(true);
7820 // If the chunk is being coalesced and the current free range is
7821 // in the free lists, remove the current free range so that it
7822 // will be returned to the free lists in its entirety - all
7823 // the coalesced pieces included.
7824 if (freeRangeInFreeLists()) {
7825 FreeChunk* ffc = (FreeChunk*) freeFinger();
7826 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7827 "Size of free range is inconsistent with chunk size.");
7828 if (CMSTestInFreeList) {
7829 assert(_sp->verifyChunkInFreeLists(ffc),
7830 "free range is not in free lists");
7831 }
7832 _sp->removeFreeChunkFromFreeLists(ffc);
7833 set_freeRangeInFreeLists(false);
7834 }
7835 }
7836 }
7837 } else {
7838 // Code path common to both original and adaptive free lists.
7840 // cant coalesce with previous block; this should be treated
7841 // as the end of a free run if any
7842 if (inFreeRange()) {
7843 // we kicked some butt; time to pick up the garbage
7844 assert(freeFinger() < addr, "the finger pointeth off base");
7845 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
7846 }
7847 // else, nothing to do, just continue
7848 }
7849 }
7851 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
7852 // This is a chunk of garbage. It is not in any free list.
7853 // Add it to a free list or let it possibly be coalesced into
7854 // a larger chunk.
7855 HeapWord* addr = (HeapWord*) fc;
7856 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7858 if (_sp->adaptive_freelists()) {
7859 // Verify that the bit map has no bits marked between
7860 // addr and purported end of just dead object.
7861 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7863 doPostIsFreeOrGarbageChunk(fc, size);
7864 } else {
7865 if (!inFreeRange()) {
7866 // start of a new free range
7867 assert(size > 0, "A free range should have a size");
7868 initialize_free_range(addr, false);
7870 } else {
7871 // this will be swept up when we hit the end of the
7872 // free range
7873 if (CMSTraceSweeper) {
7874 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
7875 }
7876 // If the chunk is being coalesced and the current free range is
7877 // in the free lists, remove the current free range so that it
7878 // will be returned to the free lists in its entirety - all
7879 // the coalesced pieces included.
7880 if (freeRangeInFreeLists()) {
7881 FreeChunk* ffc = (FreeChunk*)freeFinger();
7882 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7883 "Size of free range is inconsistent with chunk size.");
7884 if (CMSTestInFreeList) {
7885 assert(_sp->verifyChunkInFreeLists(ffc),
7886 "free range is not in free lists");
7887 }
7888 _sp->removeFreeChunkFromFreeLists(ffc);
7889 set_freeRangeInFreeLists(false);
7890 }
7891 set_lastFreeRangeCoalesced(true);
7892 }
7893 // this will be swept up when we hit the end of the free range
7895 // Verify that the bit map has no bits marked between
7896 // addr and purported end of just dead object.
7897 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7898 }
7899 return size;
7900 }
7902 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
7903 HeapWord* addr = (HeapWord*) fc;
7904 // The sweeper has just found a live object. Return any accumulated
7905 // left hand chunk to the free lists.
7906 if (inFreeRange()) {
7907 if (_sp->adaptive_freelists()) {
7908 flushCurFreeChunk(freeFinger(),
7909 pointer_delta(addr, freeFinger()));
7910 } else { // not adaptive freelists
7911 set_inFreeRange(false);
7912 // Add the free range back to the free list if it is not already
7913 // there.
7914 if (!freeRangeInFreeLists()) {
7915 assert(freeFinger() < addr, "the finger pointeth off base");
7916 if (CMSTraceSweeper) {
7917 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
7918 "[coalesced:%d]\n",
7919 freeFinger(), pointer_delta(addr, freeFinger()),
7920 lastFreeRangeCoalesced());
7921 }
7922 _sp->addChunkAndRepairOffsetTable(freeFinger(),
7923 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
7924 }
7925 }
7926 }
7928 // Common code path for original and adaptive free lists.
7930 // this object is live: we'd normally expect this to be
7931 // an oop, and like to assert the following:
7932 // assert(oop(addr)->is_oop(), "live block should be an oop");
7933 // However, as we commented above, this may be an object whose
7934 // header hasn't yet been initialized.
7935 size_t size;
7936 assert(_bitMap->isMarked(addr), "Tautology for this control point");
7937 if (_bitMap->isMarked(addr + 1)) {
7938 // Determine the size from the bit map, rather than trying to
7939 // compute it from the object header.
7940 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7941 size = pointer_delta(nextOneAddr + 1, addr);
7942 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7943 "alignment problem");
7945 #ifdef DEBUG
7946 if (oop(addr)->klass() != NULL &&
7947 ( !_collector->cms_should_unload_classes()
7948 || oop(addr)->is_parsable())) {
7949 // Ignore mark word because we are running concurrent with mutators
7950 assert(oop(addr)->is_oop(true), "live block should be an oop");
7951 assert(size ==
7952 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7953 "P-mark and computed size do not agree");
7954 }
7955 #endif
7957 } else {
7958 // This should be an initialized object that's alive.
7959 assert(oop(addr)->klass() != NULL &&
7960 (!_collector->cms_should_unload_classes()
7961 || oop(addr)->is_parsable()),
7962 "Should be an initialized object");
7963 // Ignore mark word because we are running concurrent with mutators
7964 assert(oop(addr)->is_oop(true), "live block should be an oop");
7965 // Verify that the bit map has no bits marked between
7966 // addr and purported end of this block.
7967 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7968 assert(size >= 3, "Necessary for Printezis marks to work");
7969 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7970 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7971 }
7972 return size;
7973 }
7975 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
7976 size_t chunkSize) {
7977 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
7978 // scheme.
7979 bool fcInFreeLists = fc->isFree();
7980 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7981 assert((HeapWord*)fc <= _limit, "sweep invariant");
7982 if (CMSTestInFreeList && fcInFreeLists) {
7983 assert(_sp->verifyChunkInFreeLists(fc),
7984 "free chunk is not in free lists");
7985 }
7988 if (CMSTraceSweeper) {
7989 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
7990 }
7992 HeapWord* addr = (HeapWord*) fc;
7994 bool coalesce;
7995 size_t left = pointer_delta(addr, freeFinger());
7996 size_t right = chunkSize;
7997 switch (FLSCoalescePolicy) {
7998 // numeric value forms a coalition aggressiveness metric
7999 case 0: { // never coalesce
8000 coalesce = false;
8001 break;
8002 }
8003 case 1: { // coalesce if left & right chunks on overpopulated lists
8004 coalesce = _sp->coalOverPopulated(left) &&
8005 _sp->coalOverPopulated(right);
8006 break;
8007 }
8008 case 2: { // coalesce if left chunk on overpopulated list (default)
8009 coalesce = _sp->coalOverPopulated(left);
8010 break;
8011 }
8012 case 3: { // coalesce if left OR right chunk on overpopulated list
8013 coalesce = _sp->coalOverPopulated(left) ||
8014 _sp->coalOverPopulated(right);
8015 break;
8016 }
8017 case 4: { // always coalesce
8018 coalesce = true;
8019 break;
8020 }
8021 default:
8022 ShouldNotReachHere();
8023 }
8025 // Should the current free range be coalesced?
8026 // If the chunk is in a free range and either we decided to coalesce above
8027 // or the chunk is near the large block at the end of the heap
8028 // (isNearLargestChunk() returns true), then coalesce this chunk.
8029 bool doCoalesce = inFreeRange() &&
8030 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8031 if (doCoalesce) {
8032 // Coalesce the current free range on the left with the new
8033 // chunk on the right. If either is on a free list,
8034 // it must be removed from the list and stashed in the closure.
8035 if (freeRangeInFreeLists()) {
8036 FreeChunk* ffc = (FreeChunk*)freeFinger();
8037 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8038 "Size of free range is inconsistent with chunk size.");
8039 if (CMSTestInFreeList) {
8040 assert(_sp->verifyChunkInFreeLists(ffc),
8041 "Chunk is not in free lists");
8042 }
8043 _sp->coalDeath(ffc->size());
8044 _sp->removeFreeChunkFromFreeLists(ffc);
8045 set_freeRangeInFreeLists(false);
8046 }
8047 if (fcInFreeLists) {
8048 _sp->coalDeath(chunkSize);
8049 assert(fc->size() == chunkSize,
8050 "The chunk has the wrong size or is not in the free lists");
8051 _sp->removeFreeChunkFromFreeLists(fc);
8052 }
8053 set_lastFreeRangeCoalesced(true);
8054 } else { // not in a free range and/or should not coalesce
8055 // Return the current free range and start a new one.
8056 if (inFreeRange()) {
8057 // In a free range but cannot coalesce with the right hand chunk.
8058 // Put the current free range into the free lists.
8059 flushCurFreeChunk(freeFinger(),
8060 pointer_delta(addr, freeFinger()));
8061 }
8062 // Set up for new free range. Pass along whether the right hand
8063 // chunk is in the free lists.
8064 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8065 }
8066 }
8067 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8068 assert(inFreeRange(), "Should only be called if currently in a free range.");
8069 assert(size > 0,
8070 "A zero sized chunk cannot be added to the free lists.");
8071 if (!freeRangeInFreeLists()) {
8072 if(CMSTestInFreeList) {
8073 FreeChunk* fc = (FreeChunk*) chunk;
8074 fc->setSize(size);
8075 assert(!_sp->verifyChunkInFreeLists(fc),
8076 "chunk should not be in free lists yet");
8077 }
8078 if (CMSTraceSweeper) {
8079 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8080 chunk, size);
8081 }
8082 // A new free range is going to be starting. The current
8083 // free range has not been added to the free lists yet or
8084 // was removed so add it back.
8085 // If the current free range was coalesced, then the death
8086 // of the free range was recorded. Record a birth now.
8087 if (lastFreeRangeCoalesced()) {
8088 _sp->coalBirth(size);
8089 }
8090 _sp->addChunkAndRepairOffsetTable(chunk, size,
8091 lastFreeRangeCoalesced());
8092 }
8093 set_inFreeRange(false);
8094 set_freeRangeInFreeLists(false);
8095 }
8097 // We take a break if we've been at this for a while,
8098 // so as to avoid monopolizing the locks involved.
8099 void SweepClosure::do_yield_work(HeapWord* addr) {
8100 // Return current free chunk being used for coalescing (if any)
8101 // to the appropriate freelist. After yielding, the next
8102 // free block encountered will start a coalescing range of
8103 // free blocks. If the next free block is adjacent to the
8104 // chunk just flushed, they will need to wait for the next
8105 // sweep to be coalesced.
8106 if (inFreeRange()) {
8107 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8108 }
8110 // First give up the locks, then yield, then re-lock.
8111 // We should probably use a constructor/destructor idiom to
8112 // do this unlock/lock or modify the MutexUnlocker class to
8113 // serve our purpose. XXX
8114 assert_lock_strong(_bitMap->lock());
8115 assert_lock_strong(_freelistLock);
8116 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8117 "CMS thread should hold CMS token");
8118 _bitMap->lock()->unlock();
8119 _freelistLock->unlock();
8120 ConcurrentMarkSweepThread::desynchronize(true);
8121 ConcurrentMarkSweepThread::acknowledge_yield_request();
8122 _collector->stopTimer();
8123 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8124 if (PrintCMSStatistics != 0) {
8125 _collector->incrementYields();
8126 }
8127 _collector->icms_wait();
8129 // See the comment in coordinator_yield()
8130 for (unsigned i = 0; i < CMSYieldSleepCount &&
8131 ConcurrentMarkSweepThread::should_yield() &&
8132 !CMSCollector::foregroundGCIsActive(); ++i) {
8133 os::sleep(Thread::current(), 1, false);
8134 ConcurrentMarkSweepThread::acknowledge_yield_request();
8135 }
8137 ConcurrentMarkSweepThread::synchronize(true);
8138 _freelistLock->lock();
8139 _bitMap->lock()->lock_without_safepoint_check();
8140 _collector->startTimer();
8141 }
8143 #ifndef PRODUCT
8144 // This is actually very useful in a product build if it can
8145 // be called from the debugger. Compile it into the product
8146 // as needed.
8147 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8148 return debug_cms_space->verifyChunkInFreeLists(fc);
8149 }
8151 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8152 if (CMSTraceSweeper) {
8153 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8154 }
8155 }
8156 #endif
8158 // CMSIsAliveClosure
8159 bool CMSIsAliveClosure::do_object_b(oop obj) {
8160 HeapWord* addr = (HeapWord*)obj;
8161 return addr != NULL &&
8162 (!_span.contains(addr) || _bit_map->isMarked(addr));
8163 }
8165 // CMSKeepAliveClosure: the serial version
8166 void CMSKeepAliveClosure::do_oop(oop* p) {
8167 oop this_oop = *p;
8168 HeapWord* addr = (HeapWord*)this_oop;
8169 if (_span.contains(addr) &&
8170 !_bit_map->isMarked(addr)) {
8171 _bit_map->mark(addr);
8172 bool simulate_overflow = false;
8173 NOT_PRODUCT(
8174 if (CMSMarkStackOverflowALot &&
8175 _collector->simulate_overflow()) {
8176 // simulate a stack overflow
8177 simulate_overflow = true;
8178 }
8179 )
8180 if (simulate_overflow || !_mark_stack->push(this_oop)) {
8181 _collector->push_on_overflow_list(this_oop);
8182 _collector->_ser_kac_ovflw++;
8183 }
8184 }
8185 }
8187 // CMSParKeepAliveClosure: a parallel version of the above.
8188 // The work queues are private to each closure (thread),
8189 // but (may be) available for stealing by other threads.
8190 void CMSParKeepAliveClosure::do_oop(oop* p) {
8191 oop this_oop = *p;
8192 HeapWord* addr = (HeapWord*)this_oop;
8193 if (_span.contains(addr) &&
8194 !_bit_map->isMarked(addr)) {
8195 // In general, during recursive tracing, several threads
8196 // may be concurrently getting here; the first one to
8197 // "tag" it, claims it.
8198 if (_bit_map->par_mark(addr)) {
8199 bool res = _work_queue->push(this_oop);
8200 assert(res, "Low water mark should be much less than capacity");
8201 // Do a recursive trim in the hope that this will keep
8202 // stack usage lower, but leave some oops for potential stealers
8203 trim_queue(_low_water_mark);
8204 } // Else, another thread got there first
8205 }
8206 }
8208 void CMSParKeepAliveClosure::trim_queue(uint max) {
8209 while (_work_queue->size() > max) {
8210 oop new_oop;
8211 if (_work_queue->pop_local(new_oop)) {
8212 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8213 assert(_bit_map->isMarked((HeapWord*)new_oop),
8214 "no white objects on this stack!");
8215 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8216 // iterate over the oops in this oop, marking and pushing
8217 // the ones in CMS heap (i.e. in _span).
8218 new_oop->oop_iterate(&_mark_and_push);
8219 }
8220 }
8221 }
8223 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
8224 oop this_oop = *p;
8225 HeapWord* addr = (HeapWord*)this_oop;
8226 if (_span.contains(addr) &&
8227 !_bit_map->isMarked(addr)) {
8228 if (_bit_map->par_mark(addr)) {
8229 bool simulate_overflow = false;
8230 NOT_PRODUCT(
8231 if (CMSMarkStackOverflowALot &&
8232 _collector->par_simulate_overflow()) {
8233 // simulate a stack overflow
8234 simulate_overflow = true;
8235 }
8236 )
8237 if (simulate_overflow || !_work_queue->push(this_oop)) {
8238 _collector->par_push_on_overflow_list(this_oop);
8239 _collector->_par_kac_ovflw++;
8240 }
8241 } // Else another thread got there already
8242 }
8243 }
8245 //////////////////////////////////////////////////////////////////
8246 // CMSExpansionCause /////////////////////////////
8247 //////////////////////////////////////////////////////////////////
8248 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8249 switch (cause) {
8250 case _no_expansion:
8251 return "No expansion";
8252 case _satisfy_free_ratio:
8253 return "Free ratio";
8254 case _satisfy_promotion:
8255 return "Satisfy promotion";
8256 case _satisfy_allocation:
8257 return "allocation";
8258 case _allocate_par_lab:
8259 return "Par LAB";
8260 case _allocate_par_spooling_space:
8261 return "Par Spooling Space";
8262 case _adaptive_size_policy:
8263 return "Ergonomics";
8264 default:
8265 return "unknown";
8266 }
8267 }
8269 void CMSDrainMarkingStackClosure::do_void() {
8270 // the max number to take from overflow list at a time
8271 const size_t num = _mark_stack->capacity()/4;
8272 while (!_mark_stack->isEmpty() ||
8273 // if stack is empty, check the overflow list
8274 _collector->take_from_overflow_list(num, _mark_stack)) {
8275 oop this_oop = _mark_stack->pop();
8276 HeapWord* addr = (HeapWord*)this_oop;
8277 assert(_span.contains(addr), "Should be within span");
8278 assert(_bit_map->isMarked(addr), "Should be marked");
8279 assert(this_oop->is_oop(), "Should be an oop");
8280 this_oop->oop_iterate(_keep_alive);
8281 }
8282 }
8284 void CMSParDrainMarkingStackClosure::do_void() {
8285 // drain queue
8286 trim_queue(0);
8287 }
8289 // Trim our work_queue so its length is below max at return
8290 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8291 while (_work_queue->size() > max) {
8292 oop new_oop;
8293 if (_work_queue->pop_local(new_oop)) {
8294 assert(new_oop->is_oop(), "Expected an oop");
8295 assert(_bit_map->isMarked((HeapWord*)new_oop),
8296 "no white objects on this stack!");
8297 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8298 // iterate over the oops in this oop, marking and pushing
8299 // the ones in CMS heap (i.e. in _span).
8300 new_oop->oop_iterate(&_mark_and_push);
8301 }
8302 }
8303 }
8305 ////////////////////////////////////////////////////////////////////
8306 // Support for Marking Stack Overflow list handling and related code
8307 ////////////////////////////////////////////////////////////////////
8308 // Much of the following code is similar in shape and spirit to the
8309 // code used in ParNewGC. We should try and share that code
8310 // as much as possible in the future.
8312 #ifndef PRODUCT
8313 // Debugging support for CMSStackOverflowALot
8315 // It's OK to call this multi-threaded; the worst thing
8316 // that can happen is that we'll get a bunch of closely
8317 // spaced simulated oveflows, but that's OK, in fact
8318 // probably good as it would exercise the overflow code
8319 // under contention.
8320 bool CMSCollector::simulate_overflow() {
8321 if (_overflow_counter-- <= 0) { // just being defensive
8322 _overflow_counter = CMSMarkStackOverflowInterval;
8323 return true;
8324 } else {
8325 return false;
8326 }
8327 }
8329 bool CMSCollector::par_simulate_overflow() {
8330 return simulate_overflow();
8331 }
8332 #endif
8334 // Single-threaded
8335 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8336 assert(stack->isEmpty(), "Expected precondition");
8337 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8338 size_t i = num;
8339 oop cur = _overflow_list;
8340 const markOop proto = markOopDesc::prototype();
8341 NOT_PRODUCT(size_t n = 0;)
8342 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8343 next = oop(cur->mark());
8344 cur->set_mark(proto); // until proven otherwise
8345 assert(cur->is_oop(), "Should be an oop");
8346 bool res = stack->push(cur);
8347 assert(res, "Bit off more than can chew?");
8348 NOT_PRODUCT(n++;)
8349 }
8350 _overflow_list = cur;
8351 #ifndef PRODUCT
8352 assert(_num_par_pushes >= n, "Too many pops?");
8353 _num_par_pushes -=n;
8354 #endif
8355 return !stack->isEmpty();
8356 }
8358 // Multi-threaded; use CAS to break off a prefix
8359 bool CMSCollector::par_take_from_overflow_list(size_t num,
8360 OopTaskQueue* work_q) {
8361 assert(work_q->size() == 0, "That's the current policy");
8362 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8363 if (_overflow_list == NULL) {
8364 return false;
8365 }
8366 // Grab the entire list; we'll put back a suffix
8367 oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
8368 if (prefix == NULL) { // someone grabbed it before we did ...
8369 // ... we could spin for a short while, but for now we don't
8370 return false;
8371 }
8372 size_t i = num;
8373 oop cur = prefix;
8374 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8375 if (cur->mark() != NULL) {
8376 oop suffix_head = cur->mark(); // suffix will be put back on global list
8377 cur->set_mark(NULL); // break off suffix
8378 // Find tail of suffix so we can prepend suffix to global list
8379 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8380 oop suffix_tail = cur;
8381 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8382 "Tautology");
8383 oop observed_overflow_list = _overflow_list;
8384 do {
8385 cur = observed_overflow_list;
8386 suffix_tail->set_mark(markOop(cur));
8387 observed_overflow_list =
8388 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
8389 } while (cur != observed_overflow_list);
8390 }
8392 // Push the prefix elements on work_q
8393 assert(prefix != NULL, "control point invariant");
8394 const markOop proto = markOopDesc::prototype();
8395 oop next;
8396 NOT_PRODUCT(size_t n = 0;)
8397 for (cur = prefix; cur != NULL; cur = next) {
8398 next = oop(cur->mark());
8399 cur->set_mark(proto); // until proven otherwise
8400 assert(cur->is_oop(), "Should be an oop");
8401 bool res = work_q->push(cur);
8402 assert(res, "Bit off more than we can chew?");
8403 NOT_PRODUCT(n++;)
8404 }
8405 #ifndef PRODUCT
8406 assert(_num_par_pushes >= n, "Too many pops?");
8407 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8408 #endif
8409 return true;
8410 }
8412 // Single-threaded
8413 void CMSCollector::push_on_overflow_list(oop p) {
8414 NOT_PRODUCT(_num_par_pushes++;)
8415 assert(p->is_oop(), "Not an oop");
8416 preserve_mark_if_necessary(p);
8417 p->set_mark((markOop)_overflow_list);
8418 _overflow_list = p;
8419 }
8421 // Multi-threaded; use CAS to prepend to overflow list
8422 void CMSCollector::par_push_on_overflow_list(oop p) {
8423 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8424 assert(p->is_oop(), "Not an oop");
8425 par_preserve_mark_if_necessary(p);
8426 oop observed_overflow_list = _overflow_list;
8427 oop cur_overflow_list;
8428 do {
8429 cur_overflow_list = observed_overflow_list;
8430 p->set_mark(markOop(cur_overflow_list));
8431 observed_overflow_list =
8432 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8433 } while (cur_overflow_list != observed_overflow_list);
8434 }
8436 // Single threaded
8437 // General Note on GrowableArray: pushes may silently fail
8438 // because we are (temporarily) out of C-heap for expanding
8439 // the stack. The problem is quite ubiquitous and affects
8440 // a lot of code in the JVM. The prudent thing for GrowableArray
8441 // to do (for now) is to exit with an error. However, that may
8442 // be too draconian in some cases because the caller may be
8443 // able to recover without much harm. For suych cases, we
8444 // should probably introduce a "soft_push" method which returns
8445 // an indication of success or failure with the assumption that
8446 // the caller may be able to recover from a failure; code in
8447 // the VM can then be changed, incrementally, to deal with such
8448 // failures where possible, thus, incrementally hardening the VM
8449 // in such low resource situations.
8450 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8451 int PreserveMarkStackSize = 128;
8453 if (_preserved_oop_stack == NULL) {
8454 assert(_preserved_mark_stack == NULL,
8455 "bijection with preserved_oop_stack");
8456 // Allocate the stacks
8457 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8458 GrowableArray<oop>(PreserveMarkStackSize, true);
8459 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8460 GrowableArray<markOop>(PreserveMarkStackSize, true);
8461 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8462 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8463 "Preserved Mark/Oop Stack for CMS (C-heap)");
8464 }
8465 }
8466 _preserved_oop_stack->push(p);
8467 _preserved_mark_stack->push(m);
8468 assert(m == p->mark(), "Mark word changed");
8469 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8470 "bijection");
8471 }
8473 // Single threaded
8474 void CMSCollector::preserve_mark_if_necessary(oop p) {
8475 markOop m = p->mark();
8476 if (m->must_be_preserved(p)) {
8477 preserve_mark_work(p, m);
8478 }
8479 }
8481 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8482 markOop m = p->mark();
8483 if (m->must_be_preserved(p)) {
8484 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8485 // Even though we read the mark word without holding
8486 // the lock, we are assured that it will not change
8487 // because we "own" this oop, so no other thread can
8488 // be trying to push it on the overflow list; see
8489 // the assertion in preserve_mark_work() that checks
8490 // that m == p->mark().
8491 preserve_mark_work(p, m);
8492 }
8493 }
8495 // We should be able to do this multi-threaded,
8496 // a chunk of stack being a task (this is
8497 // correct because each oop only ever appears
8498 // once in the overflow list. However, it's
8499 // not very easy to completely overlap this with
8500 // other operations, so will generally not be done
8501 // until all work's been completed. Because we
8502 // expect the preserved oop stack (set) to be small,
8503 // it's probably fine to do this single-threaded.
8504 // We can explore cleverer concurrent/overlapped/parallel
8505 // processing of preserved marks if we feel the
8506 // need for this in the future. Stack overflow should
8507 // be so rare in practice and, when it happens, its
8508 // effect on performance so great that this will
8509 // likely just be in the noise anyway.
8510 void CMSCollector::restore_preserved_marks_if_any() {
8511 if (_preserved_oop_stack == NULL) {
8512 assert(_preserved_mark_stack == NULL,
8513 "bijection with preserved_oop_stack");
8514 return;
8515 }
8517 assert(SafepointSynchronize::is_at_safepoint(),
8518 "world should be stopped");
8519 assert(Thread::current()->is_ConcurrentGC_thread() ||
8520 Thread::current()->is_VM_thread(),
8521 "should be single-threaded");
8523 int length = _preserved_oop_stack->length();
8524 assert(_preserved_mark_stack->length() == length, "bijection");
8525 for (int i = 0; i < length; i++) {
8526 oop p = _preserved_oop_stack->at(i);
8527 assert(p->is_oop(), "Should be an oop");
8528 assert(_span.contains(p), "oop should be in _span");
8529 assert(p->mark() == markOopDesc::prototype(),
8530 "Set when taken from overflow list");
8531 markOop m = _preserved_mark_stack->at(i);
8532 p->set_mark(m);
8533 }
8534 _preserved_mark_stack->clear();
8535 _preserved_oop_stack->clear();
8536 assert(_preserved_mark_stack->is_empty() &&
8537 _preserved_oop_stack->is_empty(),
8538 "stacks were cleared above");
8539 }
8541 #ifndef PRODUCT
8542 bool CMSCollector::no_preserved_marks() const {
8543 return ( ( _preserved_mark_stack == NULL
8544 && _preserved_oop_stack == NULL)
8545 || ( _preserved_mark_stack->is_empty()
8546 && _preserved_oop_stack->is_empty()));
8547 }
8548 #endif
8550 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8551 {
8552 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8553 CMSAdaptiveSizePolicy* size_policy =
8554 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8555 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8556 "Wrong type for size policy");
8557 return size_policy;
8558 }
8560 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8561 size_t desired_promo_size) {
8562 if (cur_promo_size < desired_promo_size) {
8563 size_t expand_bytes = desired_promo_size - cur_promo_size;
8564 if (PrintAdaptiveSizePolicy && Verbose) {
8565 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8566 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8567 expand_bytes);
8568 }
8569 expand(expand_bytes,
8570 MinHeapDeltaBytes,
8571 CMSExpansionCause::_adaptive_size_policy);
8572 } else if (desired_promo_size < cur_promo_size) {
8573 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8574 if (PrintAdaptiveSizePolicy && Verbose) {
8575 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8576 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8577 shrink_bytes);
8578 }
8579 shrink(shrink_bytes);
8580 }
8581 }
8583 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8584 GenCollectedHeap* gch = GenCollectedHeap::heap();
8585 CMSGCAdaptivePolicyCounters* counters =
8586 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8587 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8588 "Wrong kind of counters");
8589 return counters;
8590 }
8593 void ASConcurrentMarkSweepGeneration::update_counters() {
8594 if (UsePerfData) {
8595 _space_counters->update_all();
8596 _gen_counters->update_all();
8597 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8598 GenCollectedHeap* gch = GenCollectedHeap::heap();
8599 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8600 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8601 "Wrong gc statistics type");
8602 counters->update_counters(gc_stats_l);
8603 }
8604 }
8606 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8607 if (UsePerfData) {
8608 _space_counters->update_used(used);
8609 _space_counters->update_capacity();
8610 _gen_counters->update_all();
8612 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8613 GenCollectedHeap* gch = GenCollectedHeap::heap();
8614 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8615 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8616 "Wrong gc statistics type");
8617 counters->update_counters(gc_stats_l);
8618 }
8619 }
8621 // The desired expansion delta is computed so that:
8622 // . desired free percentage or greater is used
8623 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8624 assert_locked_or_safepoint(Heap_lock);
8626 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8628 // If incremental collection failed, we just want to expand
8629 // to the limit.
8630 if (incremental_collection_failed()) {
8631 clear_incremental_collection_failed();
8632 grow_to_reserved();
8633 return;
8634 }
8636 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8638 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8639 "Wrong type of heap");
8640 int prev_level = level() - 1;
8641 assert(prev_level >= 0, "The cms generation is the lowest generation");
8642 Generation* prev_gen = gch->get_gen(prev_level);
8643 assert(prev_gen->kind() == Generation::ASParNew,
8644 "Wrong type of young generation");
8645 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8646 size_t cur_eden = younger_gen->eden()->capacity();
8647 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8648 size_t cur_promo = free();
8649 size_policy->compute_tenured_generation_free_space(cur_promo,
8650 max_available(),
8651 cur_eden);
8652 resize(cur_promo, size_policy->promo_size());
8654 // Record the new size of the space in the cms generation
8655 // that is available for promotions. This is temporary.
8656 // It should be the desired promo size.
8657 size_policy->avg_cms_promo()->sample(free());
8658 size_policy->avg_old_live()->sample(used());
8660 if (UsePerfData) {
8661 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8662 counters->update_cms_capacity_counter(capacity());
8663 }
8664 }
8666 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
8667 assert_locked_or_safepoint(Heap_lock);
8668 assert_lock_strong(freelistLock());
8669 HeapWord* old_end = _cmsSpace->end();
8670 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
8671 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
8672 FreeChunk* chunk_at_end = find_chunk_at_end();
8673 if (chunk_at_end == NULL) {
8674 // No room to shrink
8675 if (PrintGCDetails && Verbose) {
8676 gclog_or_tty->print_cr("No room to shrink: old_end "
8677 PTR_FORMAT " unallocated_start " PTR_FORMAT
8678 " chunk_at_end " PTR_FORMAT,
8679 old_end, unallocated_start, chunk_at_end);
8680 }
8681 return;
8682 } else {
8684 // Find the chunk at the end of the space and determine
8685 // how much it can be shrunk.
8686 size_t shrinkable_size_in_bytes = chunk_at_end->size();
8687 size_t aligned_shrinkable_size_in_bytes =
8688 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
8689 assert(unallocated_start <= chunk_at_end->end(),
8690 "Inconsistent chunk at end of space");
8691 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
8692 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
8694 // Shrink the underlying space
8695 _virtual_space.shrink_by(bytes);
8696 if (PrintGCDetails && Verbose) {
8697 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
8698 " desired_bytes " SIZE_FORMAT
8699 " shrinkable_size_in_bytes " SIZE_FORMAT
8700 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
8701 " bytes " SIZE_FORMAT,
8702 desired_bytes, shrinkable_size_in_bytes,
8703 aligned_shrinkable_size_in_bytes, bytes);
8704 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
8705 " unallocated_start " SIZE_FORMAT,
8706 old_end, unallocated_start);
8707 }
8709 // If the space did shrink (shrinking is not guaranteed),
8710 // shrink the chunk at the end by the appropriate amount.
8711 if (((HeapWord*)_virtual_space.high()) < old_end) {
8712 size_t new_word_size =
8713 heap_word_size(_virtual_space.committed_size());
8715 // Have to remove the chunk from the dictionary because it is changing
8716 // size and might be someplace elsewhere in the dictionary.
8718 // Get the chunk at end, shrink it, and put it
8719 // back.
8720 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
8721 size_t word_size_change = word_size_before - new_word_size;
8722 size_t chunk_at_end_old_size = chunk_at_end->size();
8723 assert(chunk_at_end_old_size >= word_size_change,
8724 "Shrink is too large");
8725 chunk_at_end->setSize(chunk_at_end_old_size -
8726 word_size_change);
8727 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
8728 word_size_change);
8730 _cmsSpace->returnChunkToDictionary(chunk_at_end);
8732 MemRegion mr(_cmsSpace->bottom(), new_word_size);
8733 _bts->resize(new_word_size); // resize the block offset shared array
8734 Universe::heap()->barrier_set()->resize_covered_region(mr);
8735 _cmsSpace->assert_locked();
8736 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
8738 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
8740 // update the space and generation capacity counters
8741 if (UsePerfData) {
8742 _space_counters->update_capacity();
8743 _gen_counters->update_all();
8744 }
8746 if (Verbose && PrintGCDetails) {
8747 size_t new_mem_size = _virtual_space.committed_size();
8748 size_t old_mem_size = new_mem_size + bytes;
8749 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
8750 name(), old_mem_size/K, bytes/K, new_mem_size/K);
8751 }
8752 }
8754 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
8755 "Inconsistency at end of space");
8756 assert(chunk_at_end->end() == _cmsSpace->end(),
8757 "Shrinking is inconsistent");
8758 return;
8759 }
8760 }
8762 // Transfer some number of overflown objects to usual marking
8763 // stack. Return true if some objects were transferred.
8764 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8765 size_t num = MIN2((size_t)_mark_stack->capacity()/4,
8766 (size_t)ParGCDesiredObjsFromOverflowList);
8768 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8769 assert(_collector->overflow_list_is_empty() || res,
8770 "If list is not empty, we should have taken something");
8771 assert(!res || !_mark_stack->isEmpty(),
8772 "If we took something, it should now be on our stack");
8773 return res;
8774 }
8776 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8777 size_t res = _sp->block_size_no_stall(addr, _collector);
8778 assert(res != 0, "Should always be able to compute a size");
8779 if (_sp->block_is_obj(addr)) {
8780 if (_live_bit_map->isMarked(addr)) {
8781 // It can't have been dead in a previous cycle
8782 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8783 } else {
8784 _dead_bit_map->mark(addr); // mark the dead object
8785 }
8786 }
8787 return res;
8788 }