Mon, 03 May 2010 10:24:51 -0700
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
Summary: On sun4v/CMT avoid use of memset() in BOT updates so as to prevent concurrent BOT readers from seeing the phantom zeros arising from memset()'s use of BIS.
Reviewed-by: jmasa, johnc, minqi, poonam, tonyp
1 /*
2 * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(UseCompressedOops ||
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
195 "Offset of FreeChunk::_prev within FreeChunk must match"
196 " that of OopDesc::_klass within OopDesc");
197 )
198 if (ParallelGCThreads > 0) {
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
200 _par_gc_thread_states =
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
202 if (_par_gc_thread_states == NULL) {
203 vm_exit_during_initialization("Could not allocate par gc structs");
204 }
205 for (uint i = 0; i < ParallelGCThreads; i++) {
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
207 if (_par_gc_thread_states[i] == NULL) {
208 vm_exit_during_initialization("Could not allocate par gc structs");
209 }
210 }
211 } else {
212 _par_gc_thread_states = NULL;
213 }
214 _incremental_collection_failed = false;
215 // The "dilatation_factor" is the expansion that can occur on
216 // account of the fact that the minimum object size in the CMS
217 // generation may be larger than that in, say, a contiguous young
218 // generation.
219 // Ideally, in the calculation below, we'd compute the dilatation
220 // factor as: MinChunkSize/(promoting_gen's min object size)
221 // Since we do not have such a general query interface for the
222 // promoting generation, we'll instead just use the mimimum
223 // object size (which today is a header's worth of space);
224 // note that all arithmetic is in units of HeapWords.
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
226 assert(_dilatation_factor >= 1.0, "from previous assert");
227 }
230 // The field "_initiating_occupancy" represents the occupancy percentage
231 // at which we trigger a new collection cycle. Unless explicitly specified
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
233 // is calculated by:
234 //
235 // Let "f" be MinHeapFreeRatio in
236 //
237 // _intiating_occupancy = 100-f +
238 // f * (CMSTrigger[Perm]Ratio/100)
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
240 //
241 // That is, if we assume the heap is at its desired maximum occupancy at the
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
243 // space be allocated before initiating a new collection cycle.
244 //
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
247 if (io >= 0) {
248 _initiating_occupancy = (double)io / 100.0;
249 } else {
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0;
253 }
254 }
256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
257 assert(collector() != NULL, "no collector");
258 collector()->ref_processor_init();
259 }
261 void CMSCollector::ref_processor_init() {
262 if (_ref_processor == NULL) {
263 // Allocate and initialize a reference processor
264 _ref_processor = ReferenceProcessor::create_ref_processor(
265 _span, // span
266 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
267 _cmsGen->refs_discovery_is_mt(), // mt_discovery
268 &_is_alive_closure,
269 ParallelGCThreads,
270 ParallelRefProcEnabled);
271 // Initialize the _ref_processor field of CMSGen
272 _cmsGen->set_ref_processor(_ref_processor);
274 // Allocate a dummy ref processor for perm gen.
275 ReferenceProcessor* rp2 = new ReferenceProcessor();
276 if (rp2 == NULL) {
277 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
278 }
279 _permGen->set_ref_processor(rp2);
280 }
281 }
283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
284 GenCollectedHeap* gch = GenCollectedHeap::heap();
285 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
286 "Wrong type of heap");
287 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
288 gch->gen_policy()->size_policy();
289 assert(sp->is_gc_cms_adaptive_size_policy(),
290 "Wrong type of size policy");
291 return sp;
292 }
294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
295 CMSGCAdaptivePolicyCounters* results =
296 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
297 assert(
298 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
299 "Wrong gc policy counter kind");
300 return results;
301 }
304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
306 const char* gen_name = "old";
308 // Generation Counters - generation 1, 1 subspace
309 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
311 _space_counters = new GSpaceCounters(gen_name, 0,
312 _virtual_space.reserved_size(),
313 this, _gen_counters);
314 }
316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317 _cms_gen(cms_gen)
318 {
319 assert(alpha <= 100, "bad value");
320 _saved_alpha = alpha;
322 // Initialize the alphas to the bootstrap value of 100.
323 _gc0_alpha = _cms_alpha = 100;
325 _cms_begin_time.update();
326 _cms_end_time.update();
328 _gc0_duration = 0.0;
329 _gc0_period = 0.0;
330 _gc0_promoted = 0;
332 _cms_duration = 0.0;
333 _cms_period = 0.0;
334 _cms_allocated = 0;
336 _cms_used_at_gc0_begin = 0;
337 _cms_used_at_gc0_end = 0;
338 _allow_duty_cycle_reduction = false;
339 _valid_bits = 0;
340 _icms_duty_cycle = CMSIncrementalDutyCycle;
341 }
343 double CMSStats::cms_free_adjustment_factor(size_t free) const {
344 // TBD: CR 6909490
345 return 1.0;
346 }
348 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
349 }
351 // If promotion failure handling is on use
352 // the padded average size of the promotion for each
353 // young generation collection.
354 double CMSStats::time_until_cms_gen_full() const {
355 size_t cms_free = _cms_gen->cmsSpace()->free();
356 GenCollectedHeap* gch = GenCollectedHeap::heap();
357 size_t expected_promotion = gch->get_gen(0)->capacity();
358 if (HandlePromotionFailure) {
359 expected_promotion = MIN2(
360 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
361 expected_promotion);
362 }
363 if (cms_free > expected_promotion) {
364 // Start a cms collection if there isn't enough space to promote
365 // for the next minor collection. Use the padded average as
366 // a safety factor.
367 cms_free -= expected_promotion;
369 // Adjust by the safety factor.
370 double cms_free_dbl = (double)cms_free;
371 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
372 // Apply a further correction factor which tries to adjust
373 // for recent occurance of concurrent mode failures.
374 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
375 cms_free_dbl = cms_free_dbl * cms_adjustment;
377 if (PrintGCDetails && Verbose) {
378 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
379 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
380 cms_free, expected_promotion);
381 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
382 cms_free_dbl, cms_consumption_rate() + 1.0);
383 }
384 // Add 1 in case the consumption rate goes to zero.
385 return cms_free_dbl / (cms_consumption_rate() + 1.0);
386 }
387 return 0.0;
388 }
390 // Compare the duration of the cms collection to the
391 // time remaining before the cms generation is empty.
392 // Note that the time from the start of the cms collection
393 // to the start of the cms sweep (less than the total
394 // duration of the cms collection) can be used. This
395 // has been tried and some applications experienced
396 // promotion failures early in execution. This was
397 // possibly because the averages were not accurate
398 // enough at the beginning.
399 double CMSStats::time_until_cms_start() const {
400 // We add "gc0_period" to the "work" calculation
401 // below because this query is done (mostly) at the
402 // end of a scavenge, so we need to conservatively
403 // account for that much possible delay
404 // in the query so as to avoid concurrent mode failures
405 // due to starting the collection just a wee bit too
406 // late.
407 double work = cms_duration() + gc0_period();
408 double deadline = time_until_cms_gen_full();
409 // If a concurrent mode failure occurred recently, we want to be
410 // more conservative and halve our expected time_until_cms_gen_full()
411 if (work > deadline) {
412 if (Verbose && PrintGCDetails) {
413 gclog_or_tty->print(
414 " CMSCollector: collect because of anticipated promotion "
415 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
416 gc0_period(), time_until_cms_gen_full());
417 }
418 return 0.0;
419 }
420 return work - deadline;
421 }
423 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
424 // amount of change to prevent wild oscillation.
425 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
426 unsigned int new_duty_cycle) {
427 assert(old_duty_cycle <= 100, "bad input value");
428 assert(new_duty_cycle <= 100, "bad input value");
430 // Note: use subtraction with caution since it may underflow (values are
431 // unsigned). Addition is safe since we're in the range 0-100.
432 unsigned int damped_duty_cycle = new_duty_cycle;
433 if (new_duty_cycle < old_duty_cycle) {
434 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
435 if (new_duty_cycle + largest_delta < old_duty_cycle) {
436 damped_duty_cycle = old_duty_cycle - largest_delta;
437 }
438 } else if (new_duty_cycle > old_duty_cycle) {
439 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
440 if (new_duty_cycle > old_duty_cycle + largest_delta) {
441 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
442 }
443 }
444 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
446 if (CMSTraceIncrementalPacing) {
447 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
448 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
449 }
450 return damped_duty_cycle;
451 }
453 unsigned int CMSStats::icms_update_duty_cycle_impl() {
454 assert(CMSIncrementalPacing && valid(),
455 "should be handled in icms_update_duty_cycle()");
457 double cms_time_so_far = cms_timer().seconds();
458 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
459 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
461 // Avoid division by 0.
462 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
463 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
465 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
466 if (new_duty_cycle > _icms_duty_cycle) {
467 // Avoid very small duty cycles (1 or 2); 0 is allowed.
468 if (new_duty_cycle > 2) {
469 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
470 new_duty_cycle);
471 }
472 } else if (_allow_duty_cycle_reduction) {
473 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
474 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
475 // Respect the minimum duty cycle.
476 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
477 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
478 }
480 if (PrintGCDetails || CMSTraceIncrementalPacing) {
481 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
482 }
484 _allow_duty_cycle_reduction = false;
485 return _icms_duty_cycle;
486 }
488 #ifndef PRODUCT
489 void CMSStats::print_on(outputStream *st) const {
490 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
491 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
492 gc0_duration(), gc0_period(), gc0_promoted());
493 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
494 cms_duration(), cms_duration_per_mb(),
495 cms_period(), cms_allocated());
496 st->print(",cms_since_beg=%g,cms_since_end=%g",
497 cms_time_since_begin(), cms_time_since_end());
498 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
499 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
500 if (CMSIncrementalMode) {
501 st->print(",dc=%d", icms_duty_cycle());
502 }
504 if (valid()) {
505 st->print(",promo_rate=%g,cms_alloc_rate=%g",
506 promotion_rate(), cms_allocation_rate());
507 st->print(",cms_consumption_rate=%g,time_until_full=%g",
508 cms_consumption_rate(), time_until_cms_gen_full());
509 }
510 st->print(" ");
511 }
512 #endif // #ifndef PRODUCT
514 CMSCollector::CollectorState CMSCollector::_collectorState =
515 CMSCollector::Idling;
516 bool CMSCollector::_foregroundGCIsActive = false;
517 bool CMSCollector::_foregroundGCShouldWait = false;
519 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
520 ConcurrentMarkSweepGeneration* permGen,
521 CardTableRS* ct,
522 ConcurrentMarkSweepPolicy* cp):
523 _cmsGen(cmsGen),
524 _permGen(permGen),
525 _ct(ct),
526 _ref_processor(NULL), // will be set later
527 _conc_workers(NULL), // may be set later
528 _abort_preclean(false),
529 _start_sampling(false),
530 _between_prologue_and_epilogue(false),
531 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
532 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
533 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
534 -1 /* lock-free */, "No_lock" /* dummy */),
535 _modUnionClosure(&_modUnionTable),
536 _modUnionClosurePar(&_modUnionTable),
537 // Adjust my span to cover old (cms) gen and perm gen
538 _span(cmsGen->reserved()._union(permGen->reserved())),
539 // Construct the is_alive_closure with _span & markBitMap
540 _is_alive_closure(_span, &_markBitMap),
541 _restart_addr(NULL),
542 _overflow_list(NULL),
543 _preserved_oop_stack(NULL),
544 _preserved_mark_stack(NULL),
545 _stats(cmsGen),
546 _eden_chunk_array(NULL), // may be set in ctor body
547 _eden_chunk_capacity(0), // -- ditto --
548 _eden_chunk_index(0), // -- ditto --
549 _survivor_plab_array(NULL), // -- ditto --
550 _survivor_chunk_array(NULL), // -- ditto --
551 _survivor_chunk_capacity(0), // -- ditto --
552 _survivor_chunk_index(0), // -- ditto --
553 _ser_pmc_preclean_ovflw(0),
554 _ser_kac_preclean_ovflw(0),
555 _ser_pmc_remark_ovflw(0),
556 _par_pmc_remark_ovflw(0),
557 _ser_kac_ovflw(0),
558 _par_kac_ovflw(0),
559 #ifndef PRODUCT
560 _num_par_pushes(0),
561 #endif
562 _collection_count_start(0),
563 _verifying(false),
564 _icms_start_limit(NULL),
565 _icms_stop_limit(NULL),
566 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
567 _completed_initialization(false),
568 _collector_policy(cp),
569 _should_unload_classes(false),
570 _concurrent_cycles_since_last_unload(0),
571 _roots_scanning_options(0),
572 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
573 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
574 {
575 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
576 ExplicitGCInvokesConcurrent = true;
577 }
578 // Now expand the span and allocate the collection support structures
579 // (MUT, marking bit map etc.) to cover both generations subject to
580 // collection.
582 // First check that _permGen is adjacent to _cmsGen and above it.
583 assert( _cmsGen->reserved().word_size() > 0
584 && _permGen->reserved().word_size() > 0,
585 "generations should not be of zero size");
586 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
587 "_cmsGen and _permGen should not overlap");
588 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
589 "_cmsGen->end() different from _permGen->start()");
591 // For use by dirty card to oop closures.
592 _cmsGen->cmsSpace()->set_collector(this);
593 _permGen->cmsSpace()->set_collector(this);
595 // Allocate MUT and marking bit map
596 {
597 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
598 if (!_markBitMap.allocate(_span)) {
599 warning("Failed to allocate CMS Bit Map");
600 return;
601 }
602 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
603 }
604 {
605 _modUnionTable.allocate(_span);
606 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
607 }
609 if (!_markStack.allocate(MarkStackSize)) {
610 warning("Failed to allocate CMS Marking Stack");
611 return;
612 }
613 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
614 warning("Failed to allocate CMS Revisit Stack");
615 return;
616 }
618 // Support for multi-threaded concurrent phases
619 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
620 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
621 // just for now
622 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
623 }
624 if (ConcGCThreads > 1) {
625 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
626 ConcGCThreads, true);
627 if (_conc_workers == NULL) {
628 warning("GC/CMS: _conc_workers allocation failure: "
629 "forcing -CMSConcurrentMTEnabled");
630 CMSConcurrentMTEnabled = false;
631 }
632 } else {
633 CMSConcurrentMTEnabled = false;
634 }
635 }
636 if (!CMSConcurrentMTEnabled) {
637 ConcGCThreads = 0;
638 } else {
639 // Turn off CMSCleanOnEnter optimization temporarily for
640 // the MT case where it's not fixed yet; see 6178663.
641 CMSCleanOnEnter = false;
642 }
643 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
644 "Inconsistency");
646 // Parallel task queues; these are shared for the
647 // concurrent and stop-world phases of CMS, but
648 // are not shared with parallel scavenge (ParNew).
649 {
650 uint i;
651 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
653 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
654 || ParallelRefProcEnabled)
655 && num_queues > 0) {
656 _task_queues = new OopTaskQueueSet(num_queues);
657 if (_task_queues == NULL) {
658 warning("task_queues allocation failure.");
659 return;
660 }
661 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
662 if (_hash_seed == NULL) {
663 warning("_hash_seed array allocation failure");
664 return;
665 }
667 // XXX use a global constant instead of 64!
668 typedef struct OopTaskQueuePadded {
669 OopTaskQueue work_queue;
670 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
671 } OopTaskQueuePadded;
673 for (i = 0; i < num_queues; i++) {
674 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
675 if (q_padded == NULL) {
676 warning("work_queue allocation failure.");
677 return;
678 }
679 _task_queues->register_queue(i, &q_padded->work_queue);
680 }
681 for (i = 0; i < num_queues; i++) {
682 _task_queues->queue(i)->initialize();
683 _hash_seed[i] = 17; // copied from ParNew
684 }
685 }
686 }
688 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
689 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
691 // Clip CMSBootstrapOccupancy between 0 and 100.
692 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
693 /(double)100;
695 _full_gcs_since_conc_gc = 0;
697 // Now tell CMS generations the identity of their collector
698 ConcurrentMarkSweepGeneration::set_collector(this);
700 // Create & start a CMS thread for this CMS collector
701 _cmsThread = ConcurrentMarkSweepThread::start(this);
702 assert(cmsThread() != NULL, "CMS Thread should have been created");
703 assert(cmsThread()->collector() == this,
704 "CMS Thread should refer to this gen");
705 assert(CGC_lock != NULL, "Where's the CGC_lock?");
707 // Support for parallelizing young gen rescan
708 GenCollectedHeap* gch = GenCollectedHeap::heap();
709 _young_gen = gch->prev_gen(_cmsGen);
710 if (gch->supports_inline_contig_alloc()) {
711 _top_addr = gch->top_addr();
712 _end_addr = gch->end_addr();
713 assert(_young_gen != NULL, "no _young_gen");
714 _eden_chunk_index = 0;
715 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
716 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
717 if (_eden_chunk_array == NULL) {
718 _eden_chunk_capacity = 0;
719 warning("GC/CMS: _eden_chunk_array allocation failure");
720 }
721 }
722 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
724 // Support for parallelizing survivor space rescan
725 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
726 const size_t max_plab_samples =
727 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
729 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
730 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
731 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
732 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
733 || _cursor == NULL) {
734 warning("Failed to allocate survivor plab/chunk array");
735 if (_survivor_plab_array != NULL) {
736 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
737 _survivor_plab_array = NULL;
738 }
739 if (_survivor_chunk_array != NULL) {
740 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
741 _survivor_chunk_array = NULL;
742 }
743 if (_cursor != NULL) {
744 FREE_C_HEAP_ARRAY(size_t, _cursor);
745 _cursor = NULL;
746 }
747 } else {
748 _survivor_chunk_capacity = 2*max_plab_samples;
749 for (uint i = 0; i < ParallelGCThreads; i++) {
750 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
751 if (vec == NULL) {
752 warning("Failed to allocate survivor plab array");
753 for (int j = i; j > 0; j--) {
754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
755 }
756 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
757 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
758 _survivor_plab_array = NULL;
759 _survivor_chunk_array = NULL;
760 _survivor_chunk_capacity = 0;
761 break;
762 } else {
763 ChunkArray* cur =
764 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
765 max_plab_samples);
766 assert(cur->end() == 0, "Should be 0");
767 assert(cur->array() == vec, "Should be vec");
768 assert(cur->capacity() == max_plab_samples, "Error");
769 }
770 }
771 }
772 }
773 assert( ( _survivor_plab_array != NULL
774 && _survivor_chunk_array != NULL)
775 || ( _survivor_chunk_capacity == 0
776 && _survivor_chunk_index == 0),
777 "Error");
779 // Choose what strong roots should be scanned depending on verification options
780 // and perm gen collection mode.
781 if (!CMSClassUnloadingEnabled) {
782 // If class unloading is disabled we want to include all classes into the root set.
783 add_root_scanning_option(SharedHeap::SO_AllClasses);
784 } else {
785 add_root_scanning_option(SharedHeap::SO_SystemClasses);
786 }
788 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
789 _gc_counters = new CollectorCounters("CMS", 1);
790 _completed_initialization = true;
791 _inter_sweep_timer.start(); // start of time
792 #ifdef SPARC
793 // Issue a stern warning, but allow use for experimentation and debugging.
794 if (VM_Version::is_sun4v() && UseMemSetInBOT) {
795 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
796 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
797 " on sun4v; please understand that you are using at your own risk!");
798 }
799 #endif
800 }
802 const char* ConcurrentMarkSweepGeneration::name() const {
803 return "concurrent mark-sweep generation";
804 }
805 void ConcurrentMarkSweepGeneration::update_counters() {
806 if (UsePerfData) {
807 _space_counters->update_all();
808 _gen_counters->update_all();
809 }
810 }
812 // this is an optimized version of update_counters(). it takes the
813 // used value as a parameter rather than computing it.
814 //
815 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
816 if (UsePerfData) {
817 _space_counters->update_used(used);
818 _space_counters->update_capacity();
819 _gen_counters->update_all();
820 }
821 }
823 void ConcurrentMarkSweepGeneration::print() const {
824 Generation::print();
825 cmsSpace()->print();
826 }
828 #ifndef PRODUCT
829 void ConcurrentMarkSweepGeneration::print_statistics() {
830 cmsSpace()->printFLCensus(0);
831 }
832 #endif
834 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
835 GenCollectedHeap* gch = GenCollectedHeap::heap();
836 if (PrintGCDetails) {
837 if (Verbose) {
838 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
839 level(), short_name(), s, used(), capacity());
840 } else {
841 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
842 level(), short_name(), s, used() / K, capacity() / K);
843 }
844 }
845 if (Verbose) {
846 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
847 gch->used(), gch->capacity());
848 } else {
849 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
850 gch->used() / K, gch->capacity() / K);
851 }
852 }
854 size_t
855 ConcurrentMarkSweepGeneration::contiguous_available() const {
856 // dld proposes an improvement in precision here. If the committed
857 // part of the space ends in a free block we should add that to
858 // uncommitted size in the calculation below. Will make this
859 // change later, staying with the approximation below for the
860 // time being. -- ysr.
861 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
862 }
864 size_t
865 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
866 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
867 }
869 size_t ConcurrentMarkSweepGeneration::max_available() const {
870 return free() + _virtual_space.uncommitted_size();
871 }
873 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
874 size_t max_promotion_in_bytes,
875 bool younger_handles_promotion_failure) const {
877 // This is the most conservative test. Full promotion is
878 // guaranteed if this is used. The multiplicative factor is to
879 // account for the worst case "dilatation".
880 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
881 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
882 adjusted_max_promo_bytes = (double)max_uintx;
883 }
884 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
886 if (younger_handles_promotion_failure && !result) {
887 // Full promotion is not guaranteed because fragmentation
888 // of the cms generation can prevent the full promotion.
889 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
891 if (!result) {
892 // With promotion failure handling the test for the ability
893 // to support the promotion does not have to be guaranteed.
894 // Use an average of the amount promoted.
895 result = max_available() >= (size_t)
896 gc_stats()->avg_promoted()->padded_average();
897 if (PrintGC && Verbose && result) {
898 gclog_or_tty->print_cr(
899 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
900 " max_available: " SIZE_FORMAT
901 " avg_promoted: " SIZE_FORMAT,
902 max_available(), (size_t)
903 gc_stats()->avg_promoted()->padded_average());
904 }
905 } else {
906 if (PrintGC && Verbose) {
907 gclog_or_tty->print_cr(
908 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
909 " max_available: " SIZE_FORMAT
910 " adj_max_promo_bytes: " SIZE_FORMAT,
911 max_available(), (size_t)adjusted_max_promo_bytes);
912 }
913 }
914 } else {
915 if (PrintGC && Verbose) {
916 gclog_or_tty->print_cr(
917 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
918 " contiguous_available: " SIZE_FORMAT
919 " adj_max_promo_bytes: " SIZE_FORMAT,
920 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
921 }
922 }
923 return result;
924 }
926 // At a promotion failure dump information on block layout in heap
927 // (cms old generation).
928 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
929 if (CMSDumpAtPromotionFailure) {
930 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
931 }
932 }
934 CompactibleSpace*
935 ConcurrentMarkSweepGeneration::first_compaction_space() const {
936 return _cmsSpace;
937 }
939 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
940 // Clear the promotion information. These pointers can be adjusted
941 // along with all the other pointers into the heap but
942 // compaction is expected to be a rare event with
943 // a heap using cms so don't do it without seeing the need.
944 if (ParallelGCThreads > 0) {
945 for (uint i = 0; i < ParallelGCThreads; i++) {
946 _par_gc_thread_states[i]->promo.reset();
947 }
948 }
949 }
951 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
952 blk->do_space(_cmsSpace);
953 }
955 void ConcurrentMarkSweepGeneration::compute_new_size() {
956 assert_locked_or_safepoint(Heap_lock);
958 // If incremental collection failed, we just want to expand
959 // to the limit.
960 if (incremental_collection_failed()) {
961 clear_incremental_collection_failed();
962 grow_to_reserved();
963 return;
964 }
966 size_t expand_bytes = 0;
967 double free_percentage = ((double) free()) / capacity();
968 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
969 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
971 // compute expansion delta needed for reaching desired free percentage
972 if (free_percentage < desired_free_percentage) {
973 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
974 assert(desired_capacity >= capacity(), "invalid expansion size");
975 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
976 }
977 if (expand_bytes > 0) {
978 if (PrintGCDetails && Verbose) {
979 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
980 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
981 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
982 gclog_or_tty->print_cr(" Desired free fraction %f",
983 desired_free_percentage);
984 gclog_or_tty->print_cr(" Maximum free fraction %f",
985 maximum_free_percentage);
986 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
987 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
988 desired_capacity/1000);
989 int prev_level = level() - 1;
990 if (prev_level >= 0) {
991 size_t prev_size = 0;
992 GenCollectedHeap* gch = GenCollectedHeap::heap();
993 Generation* prev_gen = gch->_gens[prev_level];
994 prev_size = prev_gen->capacity();
995 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
996 prev_size/1000);
997 }
998 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
999 unsafe_max_alloc_nogc()/1000);
1000 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
1001 contiguous_available()/1000);
1002 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
1003 expand_bytes);
1004 }
1005 // safe if expansion fails
1006 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
1007 if (PrintGCDetails && Verbose) {
1008 gclog_or_tty->print_cr(" Expanded free fraction %f",
1009 ((double) free()) / capacity());
1010 }
1011 }
1012 }
1014 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1015 return cmsSpace()->freelistLock();
1016 }
1018 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1019 bool tlab) {
1020 CMSSynchronousYieldRequest yr;
1021 MutexLockerEx x(freelistLock(),
1022 Mutex::_no_safepoint_check_flag);
1023 return have_lock_and_allocate(size, tlab);
1024 }
1026 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1027 bool tlab) {
1028 assert_lock_strong(freelistLock());
1029 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1030 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1031 // Allocate the object live (grey) if the background collector has
1032 // started marking. This is necessary because the marker may
1033 // have passed this address and consequently this object will
1034 // not otherwise be greyed and would be incorrectly swept up.
1035 // Note that if this object contains references, the writing
1036 // of those references will dirty the card containing this object
1037 // allowing the object to be blackened (and its references scanned)
1038 // either during a preclean phase or at the final checkpoint.
1039 if (res != NULL) {
1040 collector()->direct_allocated(res, adjustedSize);
1041 _direct_allocated_words += adjustedSize;
1042 // allocation counters
1043 NOT_PRODUCT(
1044 _numObjectsAllocated++;
1045 _numWordsAllocated += (int)adjustedSize;
1046 )
1047 }
1048 return res;
1049 }
1051 // In the case of direct allocation by mutators in a generation that
1052 // is being concurrently collected, the object must be allocated
1053 // live (grey) if the background collector has started marking.
1054 // This is necessary because the marker may
1055 // have passed this address and consequently this object will
1056 // not otherwise be greyed and would be incorrectly swept up.
1057 // Note that if this object contains references, the writing
1058 // of those references will dirty the card containing this object
1059 // allowing the object to be blackened (and its references scanned)
1060 // either during a preclean phase or at the final checkpoint.
1061 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1062 assert(_markBitMap.covers(start, size), "Out of bounds");
1063 if (_collectorState >= Marking) {
1064 MutexLockerEx y(_markBitMap.lock(),
1065 Mutex::_no_safepoint_check_flag);
1066 // [see comments preceding SweepClosure::do_blk() below for details]
1067 // 1. need to mark the object as live so it isn't collected
1068 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1069 // 3. need to mark the end of the object so sweeper can skip over it
1070 // if it's uninitialized when the sweeper reaches it.
1071 _markBitMap.mark(start); // object is live
1072 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1073 _markBitMap.mark(start + size - 1);
1074 // mark end of object
1075 }
1076 // check that oop looks uninitialized
1077 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1078 }
1080 void CMSCollector::promoted(bool par, HeapWord* start,
1081 bool is_obj_array, size_t obj_size) {
1082 assert(_markBitMap.covers(start), "Out of bounds");
1083 // See comment in direct_allocated() about when objects should
1084 // be allocated live.
1085 if (_collectorState >= Marking) {
1086 // we already hold the marking bit map lock, taken in
1087 // the prologue
1088 if (par) {
1089 _markBitMap.par_mark(start);
1090 } else {
1091 _markBitMap.mark(start);
1092 }
1093 // We don't need to mark the object as uninitialized (as
1094 // in direct_allocated above) because this is being done with the
1095 // world stopped and the object will be initialized by the
1096 // time the sweeper gets to look at it.
1097 assert(SafepointSynchronize::is_at_safepoint(),
1098 "expect promotion only at safepoints");
1100 if (_collectorState < Sweeping) {
1101 // Mark the appropriate cards in the modUnionTable, so that
1102 // this object gets scanned before the sweep. If this is
1103 // not done, CMS generation references in the object might
1104 // not get marked.
1105 // For the case of arrays, which are otherwise precisely
1106 // marked, we need to dirty the entire array, not just its head.
1107 if (is_obj_array) {
1108 // The [par_]mark_range() method expects mr.end() below to
1109 // be aligned to the granularity of a bit's representation
1110 // in the heap. In the case of the MUT below, that's a
1111 // card size.
1112 MemRegion mr(start,
1113 (HeapWord*)round_to((intptr_t)(start + obj_size),
1114 CardTableModRefBS::card_size /* bytes */));
1115 if (par) {
1116 _modUnionTable.par_mark_range(mr);
1117 } else {
1118 _modUnionTable.mark_range(mr);
1119 }
1120 } else { // not an obj array; we can just mark the head
1121 if (par) {
1122 _modUnionTable.par_mark(start);
1123 } else {
1124 _modUnionTable.mark(start);
1125 }
1126 }
1127 }
1128 }
1129 }
1131 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1132 {
1133 size_t delta = pointer_delta(addr, space->bottom());
1134 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1135 }
1137 void CMSCollector::icms_update_allocation_limits()
1138 {
1139 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1140 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1142 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1143 if (CMSTraceIncrementalPacing) {
1144 stats().print();
1145 }
1147 assert(duty_cycle <= 100, "invalid duty cycle");
1148 if (duty_cycle != 0) {
1149 // The duty_cycle is a percentage between 0 and 100; convert to words and
1150 // then compute the offset from the endpoints of the space.
1151 size_t free_words = eden->free() / HeapWordSize;
1152 double free_words_dbl = (double)free_words;
1153 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1154 size_t offset_words = (free_words - duty_cycle_words) / 2;
1156 _icms_start_limit = eden->top() + offset_words;
1157 _icms_stop_limit = eden->end() - offset_words;
1159 // The limits may be adjusted (shifted to the right) by
1160 // CMSIncrementalOffset, to allow the application more mutator time after a
1161 // young gen gc (when all mutators were stopped) and before CMS starts and
1162 // takes away one or more cpus.
1163 if (CMSIncrementalOffset != 0) {
1164 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1165 size_t adjustment = (size_t)adjustment_dbl;
1166 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1167 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1168 _icms_start_limit += adjustment;
1169 _icms_stop_limit = tmp_stop;
1170 }
1171 }
1172 }
1173 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1174 _icms_start_limit = _icms_stop_limit = eden->end();
1175 }
1177 // Install the new start limit.
1178 eden->set_soft_end(_icms_start_limit);
1180 if (CMSTraceIncrementalMode) {
1181 gclog_or_tty->print(" icms alloc limits: "
1182 PTR_FORMAT "," PTR_FORMAT
1183 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1184 _icms_start_limit, _icms_stop_limit,
1185 percent_of_space(eden, _icms_start_limit),
1186 percent_of_space(eden, _icms_stop_limit));
1187 if (Verbose) {
1188 gclog_or_tty->print("eden: ");
1189 eden->print_on(gclog_or_tty);
1190 }
1191 }
1192 }
1194 // Any changes here should try to maintain the invariant
1195 // that if this method is called with _icms_start_limit
1196 // and _icms_stop_limit both NULL, then it should return NULL
1197 // and not notify the icms thread.
1198 HeapWord*
1199 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1200 size_t word_size)
1201 {
1202 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1203 // nop.
1204 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1205 if (top <= _icms_start_limit) {
1206 if (CMSTraceIncrementalMode) {
1207 space->print_on(gclog_or_tty);
1208 gclog_or_tty->stamp();
1209 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1210 ", new limit=" PTR_FORMAT
1211 " (" SIZE_FORMAT "%%)",
1212 top, _icms_stop_limit,
1213 percent_of_space(space, _icms_stop_limit));
1214 }
1215 ConcurrentMarkSweepThread::start_icms();
1216 assert(top < _icms_stop_limit, "Tautology");
1217 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1218 return _icms_stop_limit;
1219 }
1221 // The allocation will cross both the _start and _stop limits, so do the
1222 // stop notification also and return end().
1223 if (CMSTraceIncrementalMode) {
1224 space->print_on(gclog_or_tty);
1225 gclog_or_tty->stamp();
1226 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1227 ", new limit=" PTR_FORMAT
1228 " (" SIZE_FORMAT "%%)",
1229 top, space->end(),
1230 percent_of_space(space, space->end()));
1231 }
1232 ConcurrentMarkSweepThread::stop_icms();
1233 return space->end();
1234 }
1236 if (top <= _icms_stop_limit) {
1237 if (CMSTraceIncrementalMode) {
1238 space->print_on(gclog_or_tty);
1239 gclog_or_tty->stamp();
1240 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1241 ", new limit=" PTR_FORMAT
1242 " (" SIZE_FORMAT "%%)",
1243 top, space->end(),
1244 percent_of_space(space, space->end()));
1245 }
1246 ConcurrentMarkSweepThread::stop_icms();
1247 return space->end();
1248 }
1250 if (CMSTraceIncrementalMode) {
1251 space->print_on(gclog_or_tty);
1252 gclog_or_tty->stamp();
1253 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1254 ", new limit=" PTR_FORMAT,
1255 top, NULL);
1256 }
1257 }
1259 return NULL;
1260 }
1262 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1263 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1264 // allocate, copy and if necessary update promoinfo --
1265 // delegate to underlying space.
1266 assert_lock_strong(freelistLock());
1268 #ifndef PRODUCT
1269 if (Universe::heap()->promotion_should_fail()) {
1270 return NULL;
1271 }
1272 #endif // #ifndef PRODUCT
1274 oop res = _cmsSpace->promote(obj, obj_size);
1275 if (res == NULL) {
1276 // expand and retry
1277 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1278 expand(s*HeapWordSize, MinHeapDeltaBytes,
1279 CMSExpansionCause::_satisfy_promotion);
1280 // Since there's currently no next generation, we don't try to promote
1281 // into a more senior generation.
1282 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1283 "is made to pass on a possibly failing "
1284 "promotion to next generation");
1285 res = _cmsSpace->promote(obj, obj_size);
1286 }
1287 if (res != NULL) {
1288 // See comment in allocate() about when objects should
1289 // be allocated live.
1290 assert(obj->is_oop(), "Will dereference klass pointer below");
1291 collector()->promoted(false, // Not parallel
1292 (HeapWord*)res, obj->is_objArray(), obj_size);
1293 // promotion counters
1294 NOT_PRODUCT(
1295 _numObjectsPromoted++;
1296 _numWordsPromoted +=
1297 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1298 )
1299 }
1300 return res;
1301 }
1304 HeapWord*
1305 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1306 HeapWord* top,
1307 size_t word_sz)
1308 {
1309 return collector()->allocation_limit_reached(space, top, word_sz);
1310 }
1312 // Things to support parallel young-gen collection.
1313 oop
1314 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1315 oop old, markOop m,
1316 size_t word_sz) {
1317 #ifndef PRODUCT
1318 if (Universe::heap()->promotion_should_fail()) {
1319 return NULL;
1320 }
1321 #endif // #ifndef PRODUCT
1323 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1324 PromotionInfo* promoInfo = &ps->promo;
1325 // if we are tracking promotions, then first ensure space for
1326 // promotion (including spooling space for saving header if necessary).
1327 // then allocate and copy, then track promoted info if needed.
1328 // When tracking (see PromotionInfo::track()), the mark word may
1329 // be displaced and in this case restoration of the mark word
1330 // occurs in the (oop_since_save_marks_)iterate phase.
1331 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1332 // Out of space for allocating spooling buffers;
1333 // try expanding and allocating spooling buffers.
1334 if (!expand_and_ensure_spooling_space(promoInfo)) {
1335 return NULL;
1336 }
1337 }
1338 assert(promoInfo->has_spooling_space(), "Control point invariant");
1339 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1340 if (obj_ptr == NULL) {
1341 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1342 if (obj_ptr == NULL) {
1343 return NULL;
1344 }
1345 }
1346 oop obj = oop(obj_ptr);
1347 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1348 // Otherwise, copy the object. Here we must be careful to insert the
1349 // klass pointer last, since this marks the block as an allocated object.
1350 // Except with compressed oops it's the mark word.
1351 HeapWord* old_ptr = (HeapWord*)old;
1352 if (word_sz > (size_t)oopDesc::header_size()) {
1353 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1354 obj_ptr + oopDesc::header_size(),
1355 word_sz - oopDesc::header_size());
1356 }
1358 if (UseCompressedOops) {
1359 // Copy gap missed by (aligned) header size calculation above
1360 obj->set_klass_gap(old->klass_gap());
1361 }
1363 // Restore the mark word copied above.
1364 obj->set_mark(m);
1366 // Now we can track the promoted object, if necessary. We take care
1367 // To delay the transition from uninitialized to full object
1368 // (i.e., insertion of klass pointer) until after, so that it
1369 // atomically becomes a promoted object.
1370 if (promoInfo->tracking()) {
1371 promoInfo->track((PromotedObject*)obj, old->klass());
1372 }
1374 // Finally, install the klass pointer (this should be volatile).
1375 obj->set_klass(old->klass());
1377 assert(old->is_oop(), "Will dereference klass ptr below");
1378 collector()->promoted(true, // parallel
1379 obj_ptr, old->is_objArray(), word_sz);
1381 NOT_PRODUCT(
1382 Atomic::inc(&_numObjectsPromoted);
1383 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1384 &_numWordsPromoted);
1385 )
1387 return obj;
1388 }
1390 void
1391 ConcurrentMarkSweepGeneration::
1392 par_promote_alloc_undo(int thread_num,
1393 HeapWord* obj, size_t word_sz) {
1394 // CMS does not support promotion undo.
1395 ShouldNotReachHere();
1396 }
1398 void
1399 ConcurrentMarkSweepGeneration::
1400 par_promote_alloc_done(int thread_num) {
1401 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1402 ps->lab.retire(thread_num);
1403 }
1405 void
1406 ConcurrentMarkSweepGeneration::
1407 par_oop_since_save_marks_iterate_done(int thread_num) {
1408 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1409 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1410 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1411 }
1413 // XXXPERM
1414 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1415 size_t size,
1416 bool tlab)
1417 {
1418 // We allow a STW collection only if a full
1419 // collection was requested.
1420 return full || should_allocate(size, tlab); // FIX ME !!!
1421 // This and promotion failure handling are connected at the
1422 // hip and should be fixed by untying them.
1423 }
1425 bool CMSCollector::shouldConcurrentCollect() {
1426 if (_full_gc_requested) {
1427 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1428 if (Verbose && PrintGCDetails) {
1429 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1430 " gc request");
1431 }
1432 return true;
1433 }
1435 // For debugging purposes, change the type of collection.
1436 // If the rotation is not on the concurrent collection
1437 // type, don't start a concurrent collection.
1438 NOT_PRODUCT(
1439 if (RotateCMSCollectionTypes &&
1440 (_cmsGen->debug_collection_type() !=
1441 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1442 assert(_cmsGen->debug_collection_type() !=
1443 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1444 "Bad cms collection type");
1445 return false;
1446 }
1447 )
1449 FreelistLocker x(this);
1450 // ------------------------------------------------------------------
1451 // Print out lots of information which affects the initiation of
1452 // a collection.
1453 if (PrintCMSInitiationStatistics && stats().valid()) {
1454 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1455 gclog_or_tty->stamp();
1456 gclog_or_tty->print_cr("");
1457 stats().print_on(gclog_or_tty);
1458 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1459 stats().time_until_cms_gen_full());
1460 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1461 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1462 _cmsGen->contiguous_available());
1463 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1464 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1465 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1466 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1467 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1468 }
1469 // ------------------------------------------------------------------
1471 // If the estimated time to complete a cms collection (cms_duration())
1472 // is less than the estimated time remaining until the cms generation
1473 // is full, start a collection.
1474 if (!UseCMSInitiatingOccupancyOnly) {
1475 if (stats().valid()) {
1476 if (stats().time_until_cms_start() == 0.0) {
1477 return true;
1478 }
1479 } else {
1480 // We want to conservatively collect somewhat early in order
1481 // to try and "bootstrap" our CMS/promotion statistics;
1482 // this branch will not fire after the first successful CMS
1483 // collection because the stats should then be valid.
1484 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1485 if (Verbose && PrintGCDetails) {
1486 gclog_or_tty->print_cr(
1487 " CMSCollector: collect for bootstrapping statistics:"
1488 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1489 _bootstrap_occupancy);
1490 }
1491 return true;
1492 }
1493 }
1494 }
1496 // Otherwise, we start a collection cycle if either the perm gen or
1497 // old gen want a collection cycle started. Each may use
1498 // an appropriate criterion for making this decision.
1499 // XXX We need to make sure that the gen expansion
1500 // criterion dovetails well with this. XXX NEED TO FIX THIS
1501 if (_cmsGen->should_concurrent_collect()) {
1502 if (Verbose && PrintGCDetails) {
1503 gclog_or_tty->print_cr("CMS old gen initiated");
1504 }
1505 return true;
1506 }
1508 // We start a collection if we believe an incremental collection may fail;
1509 // this is not likely to be productive in practice because it's probably too
1510 // late anyway.
1511 GenCollectedHeap* gch = GenCollectedHeap::heap();
1512 assert(gch->collector_policy()->is_two_generation_policy(),
1513 "You may want to check the correctness of the following");
1514 if (gch->incremental_collection_will_fail()) {
1515 if (PrintGCDetails && Verbose) {
1516 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1517 }
1518 return true;
1519 }
1521 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1522 bool res = update_should_unload_classes();
1523 if (res) {
1524 if (Verbose && PrintGCDetails) {
1525 gclog_or_tty->print_cr("CMS perm gen initiated");
1526 }
1527 return true;
1528 }
1529 }
1530 return false;
1531 }
1533 // Clear _expansion_cause fields of constituent generations
1534 void CMSCollector::clear_expansion_cause() {
1535 _cmsGen->clear_expansion_cause();
1536 _permGen->clear_expansion_cause();
1537 }
1539 // We should be conservative in starting a collection cycle. To
1540 // start too eagerly runs the risk of collecting too often in the
1541 // extreme. To collect too rarely falls back on full collections,
1542 // which works, even if not optimum in terms of concurrent work.
1543 // As a work around for too eagerly collecting, use the flag
1544 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1545 // giving the user an easily understandable way of controlling the
1546 // collections.
1547 // We want to start a new collection cycle if any of the following
1548 // conditions hold:
1549 // . our current occupancy exceeds the configured initiating occupancy
1550 // for this generation, or
1551 // . we recently needed to expand this space and have not, since that
1552 // expansion, done a collection of this generation, or
1553 // . the underlying space believes that it may be a good idea to initiate
1554 // a concurrent collection (this may be based on criteria such as the
1555 // following: the space uses linear allocation and linear allocation is
1556 // going to fail, or there is believed to be excessive fragmentation in
1557 // the generation, etc... or ...
1558 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1559 // the case of the old generation, not the perm generation; see CR 6543076):
1560 // we may be approaching a point at which allocation requests may fail because
1561 // we will be out of sufficient free space given allocation rate estimates.]
1562 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1564 assert_lock_strong(freelistLock());
1565 if (occupancy() > initiating_occupancy()) {
1566 if (PrintGCDetails && Verbose) {
1567 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1568 short_name(), occupancy(), initiating_occupancy());
1569 }
1570 return true;
1571 }
1572 if (UseCMSInitiatingOccupancyOnly) {
1573 return false;
1574 }
1575 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1576 if (PrintGCDetails && Verbose) {
1577 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1578 short_name());
1579 }
1580 return true;
1581 }
1582 if (_cmsSpace->should_concurrent_collect()) {
1583 if (PrintGCDetails && Verbose) {
1584 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1585 short_name());
1586 }
1587 return true;
1588 }
1589 return false;
1590 }
1592 void ConcurrentMarkSweepGeneration::collect(bool full,
1593 bool clear_all_soft_refs,
1594 size_t size,
1595 bool tlab)
1596 {
1597 collector()->collect(full, clear_all_soft_refs, size, tlab);
1598 }
1600 void CMSCollector::collect(bool full,
1601 bool clear_all_soft_refs,
1602 size_t size,
1603 bool tlab)
1604 {
1605 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1606 // For debugging purposes skip the collection if the state
1607 // is not currently idle
1608 if (TraceCMSState) {
1609 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1610 Thread::current(), full, _collectorState);
1611 }
1612 return;
1613 }
1615 // The following "if" branch is present for defensive reasons.
1616 // In the current uses of this interface, it can be replaced with:
1617 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1618 // But I am not placing that assert here to allow future
1619 // generality in invoking this interface.
1620 if (GC_locker::is_active()) {
1621 // A consistency test for GC_locker
1622 assert(GC_locker::needs_gc(), "Should have been set already");
1623 // Skip this foreground collection, instead
1624 // expanding the heap if necessary.
1625 // Need the free list locks for the call to free() in compute_new_size()
1626 compute_new_size();
1627 return;
1628 }
1629 acquire_control_and_collect(full, clear_all_soft_refs);
1630 _full_gcs_since_conc_gc++;
1632 }
1634 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1635 GenCollectedHeap* gch = GenCollectedHeap::heap();
1636 unsigned int gc_count = gch->total_full_collections();
1637 if (gc_count == full_gc_count) {
1638 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1639 _full_gc_requested = true;
1640 CGC_lock->notify(); // nudge CMS thread
1641 }
1642 }
1645 // The foreground and background collectors need to coordinate in order
1646 // to make sure that they do not mutually interfere with CMS collections.
1647 // When a background collection is active,
1648 // the foreground collector may need to take over (preempt) and
1649 // synchronously complete an ongoing collection. Depending on the
1650 // frequency of the background collections and the heap usage
1651 // of the application, this preemption can be seldom or frequent.
1652 // There are only certain
1653 // points in the background collection that the "collection-baton"
1654 // can be passed to the foreground collector.
1655 //
1656 // The foreground collector will wait for the baton before
1657 // starting any part of the collection. The foreground collector
1658 // will only wait at one location.
1659 //
1660 // The background collector will yield the baton before starting a new
1661 // phase of the collection (e.g., before initial marking, marking from roots,
1662 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1663 // of the loop which switches the phases. The background collector does some
1664 // of the phases (initial mark, final re-mark) with the world stopped.
1665 // Because of locking involved in stopping the world,
1666 // the foreground collector should not block waiting for the background
1667 // collector when it is doing a stop-the-world phase. The background
1668 // collector will yield the baton at an additional point just before
1669 // it enters a stop-the-world phase. Once the world is stopped, the
1670 // background collector checks the phase of the collection. If the
1671 // phase has not changed, it proceeds with the collection. If the
1672 // phase has changed, it skips that phase of the collection. See
1673 // the comments on the use of the Heap_lock in collect_in_background().
1674 //
1675 // Variable used in baton passing.
1676 // _foregroundGCIsActive - Set to true by the foreground collector when
1677 // it wants the baton. The foreground clears it when it has finished
1678 // the collection.
1679 // _foregroundGCShouldWait - Set to true by the background collector
1680 // when it is running. The foreground collector waits while
1681 // _foregroundGCShouldWait is true.
1682 // CGC_lock - monitor used to protect access to the above variables
1683 // and to notify the foreground and background collectors.
1684 // _collectorState - current state of the CMS collection.
1685 //
1686 // The foreground collector
1687 // acquires the CGC_lock
1688 // sets _foregroundGCIsActive
1689 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1690 // various locks acquired in preparation for the collection
1691 // are released so as not to block the background collector
1692 // that is in the midst of a collection
1693 // proceeds with the collection
1694 // clears _foregroundGCIsActive
1695 // returns
1696 //
1697 // The background collector in a loop iterating on the phases of the
1698 // collection
1699 // acquires the CGC_lock
1700 // sets _foregroundGCShouldWait
1701 // if _foregroundGCIsActive is set
1702 // clears _foregroundGCShouldWait, notifies _CGC_lock
1703 // waits on _CGC_lock for _foregroundGCIsActive to become false
1704 // and exits the loop.
1705 // otherwise
1706 // proceed with that phase of the collection
1707 // if the phase is a stop-the-world phase,
1708 // yield the baton once more just before enqueueing
1709 // the stop-world CMS operation (executed by the VM thread).
1710 // returns after all phases of the collection are done
1711 //
1713 void CMSCollector::acquire_control_and_collect(bool full,
1714 bool clear_all_soft_refs) {
1715 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1716 assert(!Thread::current()->is_ConcurrentGC_thread(),
1717 "shouldn't try to acquire control from self!");
1719 // Start the protocol for acquiring control of the
1720 // collection from the background collector (aka CMS thread).
1721 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1722 "VM thread should have CMS token");
1723 // Remember the possibly interrupted state of an ongoing
1724 // concurrent collection
1725 CollectorState first_state = _collectorState;
1727 // Signal to a possibly ongoing concurrent collection that
1728 // we want to do a foreground collection.
1729 _foregroundGCIsActive = true;
1731 // Disable incremental mode during a foreground collection.
1732 ICMSDisabler icms_disabler;
1734 // release locks and wait for a notify from the background collector
1735 // releasing the locks in only necessary for phases which
1736 // do yields to improve the granularity of the collection.
1737 assert_lock_strong(bitMapLock());
1738 // We need to lock the Free list lock for the space that we are
1739 // currently collecting.
1740 assert(haveFreelistLocks(), "Must be holding free list locks");
1741 bitMapLock()->unlock();
1742 releaseFreelistLocks();
1743 {
1744 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1745 if (_foregroundGCShouldWait) {
1746 // We are going to be waiting for action for the CMS thread;
1747 // it had better not be gone (for instance at shutdown)!
1748 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1749 "CMS thread must be running");
1750 // Wait here until the background collector gives us the go-ahead
1751 ConcurrentMarkSweepThread::clear_CMS_flag(
1752 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1753 // Get a possibly blocked CMS thread going:
1754 // Note that we set _foregroundGCIsActive true above,
1755 // without protection of the CGC_lock.
1756 CGC_lock->notify();
1757 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1758 "Possible deadlock");
1759 while (_foregroundGCShouldWait) {
1760 // wait for notification
1761 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1762 // Possibility of delay/starvation here, since CMS token does
1763 // not know to give priority to VM thread? Actually, i think
1764 // there wouldn't be any delay/starvation, but the proof of
1765 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1766 }
1767 ConcurrentMarkSweepThread::set_CMS_flag(
1768 ConcurrentMarkSweepThread::CMS_vm_has_token);
1769 }
1770 }
1771 // The CMS_token is already held. Get back the other locks.
1772 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1773 "VM thread should have CMS token");
1774 getFreelistLocks();
1775 bitMapLock()->lock_without_safepoint_check();
1776 if (TraceCMSState) {
1777 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1778 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1779 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1780 }
1782 // Check if we need to do a compaction, or if not, whether
1783 // we need to start the mark-sweep from scratch.
1784 bool should_compact = false;
1785 bool should_start_over = false;
1786 decide_foreground_collection_type(clear_all_soft_refs,
1787 &should_compact, &should_start_over);
1789 NOT_PRODUCT(
1790 if (RotateCMSCollectionTypes) {
1791 if (_cmsGen->debug_collection_type() ==
1792 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1793 should_compact = true;
1794 } else if (_cmsGen->debug_collection_type() ==
1795 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1796 should_compact = false;
1797 }
1798 }
1799 )
1801 if (PrintGCDetails && first_state > Idling) {
1802 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1803 if (GCCause::is_user_requested_gc(cause) ||
1804 GCCause::is_serviceability_requested_gc(cause)) {
1805 gclog_or_tty->print(" (concurrent mode interrupted)");
1806 } else {
1807 gclog_or_tty->print(" (concurrent mode failure)");
1808 }
1809 }
1811 if (should_compact) {
1812 // If the collection is being acquired from the background
1813 // collector, there may be references on the discovered
1814 // references lists that have NULL referents (being those
1815 // that were concurrently cleared by a mutator) or
1816 // that are no longer active (having been enqueued concurrently
1817 // by the mutator).
1818 // Scrub the list of those references because Mark-Sweep-Compact
1819 // code assumes referents are not NULL and that all discovered
1820 // Reference objects are active.
1821 ref_processor()->clean_up_discovered_references();
1823 do_compaction_work(clear_all_soft_refs);
1825 // Has the GC time limit been exceeded?
1826 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1827 size_t max_eden_size = young_gen->max_capacity() -
1828 young_gen->to()->capacity() -
1829 young_gen->from()->capacity();
1830 GenCollectedHeap* gch = GenCollectedHeap::heap();
1831 GCCause::Cause gc_cause = gch->gc_cause();
1832 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1833 young_gen->eden()->used(),
1834 _cmsGen->max_capacity(),
1835 max_eden_size,
1836 full,
1837 gc_cause,
1838 gch->collector_policy());
1839 } else {
1840 do_mark_sweep_work(clear_all_soft_refs, first_state,
1841 should_start_over);
1842 }
1843 // Reset the expansion cause, now that we just completed
1844 // a collection cycle.
1845 clear_expansion_cause();
1846 _foregroundGCIsActive = false;
1847 return;
1848 }
1850 // Resize the perm generation and the tenured generation
1851 // after obtaining the free list locks for the
1852 // two generations.
1853 void CMSCollector::compute_new_size() {
1854 assert_locked_or_safepoint(Heap_lock);
1855 FreelistLocker z(this);
1856 _permGen->compute_new_size();
1857 _cmsGen->compute_new_size();
1858 }
1860 // A work method used by foreground collection to determine
1861 // what type of collection (compacting or not, continuing or fresh)
1862 // it should do.
1863 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1864 // and CMSCompactWhenClearAllSoftRefs the default in the future
1865 // and do away with the flags after a suitable period.
1866 void CMSCollector::decide_foreground_collection_type(
1867 bool clear_all_soft_refs, bool* should_compact,
1868 bool* should_start_over) {
1869 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1870 // flag is set, and we have either requested a System.gc() or
1871 // the number of full gc's since the last concurrent cycle
1872 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1873 // or if an incremental collection has failed
1874 GenCollectedHeap* gch = GenCollectedHeap::heap();
1875 assert(gch->collector_policy()->is_two_generation_policy(),
1876 "You may want to check the correctness of the following");
1877 // Inform cms gen if this was due to partial collection failing.
1878 // The CMS gen may use this fact to determine its expansion policy.
1879 if (gch->incremental_collection_will_fail()) {
1880 assert(!_cmsGen->incremental_collection_failed(),
1881 "Should have been noticed, reacted to and cleared");
1882 _cmsGen->set_incremental_collection_failed();
1883 }
1884 *should_compact =
1885 UseCMSCompactAtFullCollection &&
1886 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1887 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1888 gch->incremental_collection_will_fail());
1889 *should_start_over = false;
1890 if (clear_all_soft_refs && !*should_compact) {
1891 // We are about to do a last ditch collection attempt
1892 // so it would normally make sense to do a compaction
1893 // to reclaim as much space as possible.
1894 if (CMSCompactWhenClearAllSoftRefs) {
1895 // Default: The rationale is that in this case either
1896 // we are past the final marking phase, in which case
1897 // we'd have to start over, or so little has been done
1898 // that there's little point in saving that work. Compaction
1899 // appears to be the sensible choice in either case.
1900 *should_compact = true;
1901 } else {
1902 // We have been asked to clear all soft refs, but not to
1903 // compact. Make sure that we aren't past the final checkpoint
1904 // phase, for that is where we process soft refs. If we are already
1905 // past that phase, we'll need to redo the refs discovery phase and
1906 // if necessary clear soft refs that weren't previously
1907 // cleared. We do so by remembering the phase in which
1908 // we came in, and if we are past the refs processing
1909 // phase, we'll choose to just redo the mark-sweep
1910 // collection from scratch.
1911 if (_collectorState > FinalMarking) {
1912 // We are past the refs processing phase;
1913 // start over and do a fresh synchronous CMS cycle
1914 _collectorState = Resetting; // skip to reset to start new cycle
1915 reset(false /* == !asynch */);
1916 *should_start_over = true;
1917 } // else we can continue a possibly ongoing current cycle
1918 }
1919 }
1920 }
1922 // A work method used by the foreground collector to do
1923 // a mark-sweep-compact.
1924 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1925 GenCollectedHeap* gch = GenCollectedHeap::heap();
1926 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1927 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1928 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1929 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1930 }
1932 // Sample collection interval time and reset for collection pause.
1933 if (UseAdaptiveSizePolicy) {
1934 size_policy()->msc_collection_begin();
1935 }
1937 // Temporarily widen the span of the weak reference processing to
1938 // the entire heap.
1939 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1940 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1942 // Temporarily, clear the "is_alive_non_header" field of the
1943 // reference processor.
1944 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1946 // Temporarily make reference _processing_ single threaded (non-MT).
1947 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1949 // Temporarily make refs discovery atomic
1950 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1952 ref_processor()->set_enqueuing_is_done(false);
1953 ref_processor()->enable_discovery();
1954 ref_processor()->setup_policy(clear_all_soft_refs);
1955 // If an asynchronous collection finishes, the _modUnionTable is
1956 // all clear. If we are assuming the collection from an asynchronous
1957 // collection, clear the _modUnionTable.
1958 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1959 "_modUnionTable should be clear if the baton was not passed");
1960 _modUnionTable.clear_all();
1962 // We must adjust the allocation statistics being maintained
1963 // in the free list space. We do so by reading and clearing
1964 // the sweep timer and updating the block flux rate estimates below.
1965 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1966 if (_inter_sweep_timer.is_active()) {
1967 _inter_sweep_timer.stop();
1968 // Note that we do not use this sample to update the _inter_sweep_estimate.
1969 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1970 _inter_sweep_estimate.padded_average(),
1971 _intra_sweep_estimate.padded_average());
1972 }
1974 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1975 ref_processor(), clear_all_soft_refs);
1976 #ifdef ASSERT
1977 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1978 size_t free_size = cms_space->free();
1979 assert(free_size ==
1980 pointer_delta(cms_space->end(), cms_space->compaction_top())
1981 * HeapWordSize,
1982 "All the free space should be compacted into one chunk at top");
1983 assert(cms_space->dictionary()->totalChunkSize(
1984 debug_only(cms_space->freelistLock())) == 0 ||
1985 cms_space->totalSizeInIndexedFreeLists() == 0,
1986 "All the free space should be in a single chunk");
1987 size_t num = cms_space->totalCount();
1988 assert((free_size == 0 && num == 0) ||
1989 (free_size > 0 && (num == 1 || num == 2)),
1990 "There should be at most 2 free chunks after compaction");
1991 #endif // ASSERT
1992 _collectorState = Resetting;
1993 assert(_restart_addr == NULL,
1994 "Should have been NULL'd before baton was passed");
1995 reset(false /* == !asynch */);
1996 _cmsGen->reset_after_compaction();
1997 _concurrent_cycles_since_last_unload = 0;
1999 if (verifying() && !should_unload_classes()) {
2000 perm_gen_verify_bit_map()->clear_all();
2001 }
2003 // Clear any data recorded in the PLAB chunk arrays.
2004 if (_survivor_plab_array != NULL) {
2005 reset_survivor_plab_arrays();
2006 }
2008 // Adjust the per-size allocation stats for the next epoch.
2009 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2010 // Restart the "inter sweep timer" for the next epoch.
2011 _inter_sweep_timer.reset();
2012 _inter_sweep_timer.start();
2014 // Sample collection pause time and reset for collection interval.
2015 if (UseAdaptiveSizePolicy) {
2016 size_policy()->msc_collection_end(gch->gc_cause());
2017 }
2019 // For a mark-sweep-compact, compute_new_size() will be called
2020 // in the heap's do_collection() method.
2021 }
2023 // A work method used by the foreground collector to do
2024 // a mark-sweep, after taking over from a possibly on-going
2025 // concurrent mark-sweep collection.
2026 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2027 CollectorState first_state, bool should_start_over) {
2028 if (PrintGC && Verbose) {
2029 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2030 "collector with count %d",
2031 _full_gcs_since_conc_gc);
2032 }
2033 switch (_collectorState) {
2034 case Idling:
2035 if (first_state == Idling || should_start_over) {
2036 // The background GC was not active, or should
2037 // restarted from scratch; start the cycle.
2038 _collectorState = InitialMarking;
2039 }
2040 // If first_state was not Idling, then a background GC
2041 // was in progress and has now finished. No need to do it
2042 // again. Leave the state as Idling.
2043 break;
2044 case Precleaning:
2045 // In the foreground case don't do the precleaning since
2046 // it is not done concurrently and there is extra work
2047 // required.
2048 _collectorState = FinalMarking;
2049 }
2050 if (PrintGCDetails &&
2051 (_collectorState > Idling ||
2052 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2053 gclog_or_tty->print(" (concurrent mode failure)");
2054 }
2055 collect_in_foreground(clear_all_soft_refs);
2057 // For a mark-sweep, compute_new_size() will be called
2058 // in the heap's do_collection() method.
2059 }
2062 void CMSCollector::getFreelistLocks() const {
2063 // Get locks for all free lists in all generations that this
2064 // collector is responsible for
2065 _cmsGen->freelistLock()->lock_without_safepoint_check();
2066 _permGen->freelistLock()->lock_without_safepoint_check();
2067 }
2069 void CMSCollector::releaseFreelistLocks() const {
2070 // Release locks for all free lists in all generations that this
2071 // collector is responsible for
2072 _cmsGen->freelistLock()->unlock();
2073 _permGen->freelistLock()->unlock();
2074 }
2076 bool CMSCollector::haveFreelistLocks() const {
2077 // Check locks for all free lists in all generations that this
2078 // collector is responsible for
2079 assert_lock_strong(_cmsGen->freelistLock());
2080 assert_lock_strong(_permGen->freelistLock());
2081 PRODUCT_ONLY(ShouldNotReachHere());
2082 return true;
2083 }
2085 // A utility class that is used by the CMS collector to
2086 // temporarily "release" the foreground collector from its
2087 // usual obligation to wait for the background collector to
2088 // complete an ongoing phase before proceeding.
2089 class ReleaseForegroundGC: public StackObj {
2090 private:
2091 CMSCollector* _c;
2092 public:
2093 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2094 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2095 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2096 // allow a potentially blocked foreground collector to proceed
2097 _c->_foregroundGCShouldWait = false;
2098 if (_c->_foregroundGCIsActive) {
2099 CGC_lock->notify();
2100 }
2101 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2102 "Possible deadlock");
2103 }
2105 ~ReleaseForegroundGC() {
2106 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2107 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2108 _c->_foregroundGCShouldWait = true;
2109 }
2110 };
2112 // There are separate collect_in_background and collect_in_foreground because of
2113 // the different locking requirements of the background collector and the
2114 // foreground collector. There was originally an attempt to share
2115 // one "collect" method between the background collector and the foreground
2116 // collector but the if-then-else required made it cleaner to have
2117 // separate methods.
2118 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2119 assert(Thread::current()->is_ConcurrentGC_thread(),
2120 "A CMS asynchronous collection is only allowed on a CMS thread.");
2122 GenCollectedHeap* gch = GenCollectedHeap::heap();
2123 {
2124 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2125 MutexLockerEx hl(Heap_lock, safepoint_check);
2126 FreelistLocker fll(this);
2127 MutexLockerEx x(CGC_lock, safepoint_check);
2128 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2129 // The foreground collector is active or we're
2130 // not using asynchronous collections. Skip this
2131 // background collection.
2132 assert(!_foregroundGCShouldWait, "Should be clear");
2133 return;
2134 } else {
2135 assert(_collectorState == Idling, "Should be idling before start.");
2136 _collectorState = InitialMarking;
2137 // Reset the expansion cause, now that we are about to begin
2138 // a new cycle.
2139 clear_expansion_cause();
2140 }
2141 // Decide if we want to enable class unloading as part of the
2142 // ensuing concurrent GC cycle.
2143 update_should_unload_classes();
2144 _full_gc_requested = false; // acks all outstanding full gc requests
2145 // Signal that we are about to start a collection
2146 gch->increment_total_full_collections(); // ... starting a collection cycle
2147 _collection_count_start = gch->total_full_collections();
2148 }
2150 // Used for PrintGC
2151 size_t prev_used;
2152 if (PrintGC && Verbose) {
2153 prev_used = _cmsGen->used(); // XXXPERM
2154 }
2156 // The change of the collection state is normally done at this level;
2157 // the exceptions are phases that are executed while the world is
2158 // stopped. For those phases the change of state is done while the
2159 // world is stopped. For baton passing purposes this allows the
2160 // background collector to finish the phase and change state atomically.
2161 // The foreground collector cannot wait on a phase that is done
2162 // while the world is stopped because the foreground collector already
2163 // has the world stopped and would deadlock.
2164 while (_collectorState != Idling) {
2165 if (TraceCMSState) {
2166 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2167 Thread::current(), _collectorState);
2168 }
2169 // The foreground collector
2170 // holds the Heap_lock throughout its collection.
2171 // holds the CMS token (but not the lock)
2172 // except while it is waiting for the background collector to yield.
2173 //
2174 // The foreground collector should be blocked (not for long)
2175 // if the background collector is about to start a phase
2176 // executed with world stopped. If the background
2177 // collector has already started such a phase, the
2178 // foreground collector is blocked waiting for the
2179 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2180 // are executed in the VM thread.
2181 //
2182 // The locking order is
2183 // PendingListLock (PLL) -- if applicable (FinalMarking)
2184 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2185 // CMS token (claimed in
2186 // stop_world_and_do() -->
2187 // safepoint_synchronize() -->
2188 // CMSThread::synchronize())
2190 {
2191 // Check if the FG collector wants us to yield.
2192 CMSTokenSync x(true); // is cms thread
2193 if (waitForForegroundGC()) {
2194 // We yielded to a foreground GC, nothing more to be
2195 // done this round.
2196 assert(_foregroundGCShouldWait == false, "We set it to false in "
2197 "waitForForegroundGC()");
2198 if (TraceCMSState) {
2199 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2200 " exiting collection CMS state %d",
2201 Thread::current(), _collectorState);
2202 }
2203 return;
2204 } else {
2205 // The background collector can run but check to see if the
2206 // foreground collector has done a collection while the
2207 // background collector was waiting to get the CGC_lock
2208 // above. If yes, break so that _foregroundGCShouldWait
2209 // is cleared before returning.
2210 if (_collectorState == Idling) {
2211 break;
2212 }
2213 }
2214 }
2216 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2217 "should be waiting");
2219 switch (_collectorState) {
2220 case InitialMarking:
2221 {
2222 ReleaseForegroundGC x(this);
2223 stats().record_cms_begin();
2225 VM_CMS_Initial_Mark initial_mark_op(this);
2226 VMThread::execute(&initial_mark_op);
2227 }
2228 // The collector state may be any legal state at this point
2229 // since the background collector may have yielded to the
2230 // foreground collector.
2231 break;
2232 case Marking:
2233 // initial marking in checkpointRootsInitialWork has been completed
2234 if (markFromRoots(true)) { // we were successful
2235 assert(_collectorState == Precleaning, "Collector state should "
2236 "have changed");
2237 } else {
2238 assert(_foregroundGCIsActive, "Internal state inconsistency");
2239 }
2240 break;
2241 case Precleaning:
2242 if (UseAdaptiveSizePolicy) {
2243 size_policy()->concurrent_precleaning_begin();
2244 }
2245 // marking from roots in markFromRoots has been completed
2246 preclean();
2247 if (UseAdaptiveSizePolicy) {
2248 size_policy()->concurrent_precleaning_end();
2249 }
2250 assert(_collectorState == AbortablePreclean ||
2251 _collectorState == FinalMarking,
2252 "Collector state should have changed");
2253 break;
2254 case AbortablePreclean:
2255 if (UseAdaptiveSizePolicy) {
2256 size_policy()->concurrent_phases_resume();
2257 }
2258 abortable_preclean();
2259 if (UseAdaptiveSizePolicy) {
2260 size_policy()->concurrent_precleaning_end();
2261 }
2262 assert(_collectorState == FinalMarking, "Collector state should "
2263 "have changed");
2264 break;
2265 case FinalMarking:
2266 {
2267 ReleaseForegroundGC x(this);
2269 VM_CMS_Final_Remark final_remark_op(this);
2270 VMThread::execute(&final_remark_op);
2271 }
2272 assert(_foregroundGCShouldWait, "block post-condition");
2273 break;
2274 case Sweeping:
2275 if (UseAdaptiveSizePolicy) {
2276 size_policy()->concurrent_sweeping_begin();
2277 }
2278 // final marking in checkpointRootsFinal has been completed
2279 sweep(true);
2280 assert(_collectorState == Resizing, "Collector state change "
2281 "to Resizing must be done under the free_list_lock");
2282 _full_gcs_since_conc_gc = 0;
2284 // Stop the timers for adaptive size policy for the concurrent phases
2285 if (UseAdaptiveSizePolicy) {
2286 size_policy()->concurrent_sweeping_end();
2287 size_policy()->concurrent_phases_end(gch->gc_cause(),
2288 gch->prev_gen(_cmsGen)->capacity(),
2289 _cmsGen->free());
2290 }
2292 case Resizing: {
2293 // Sweeping has been completed...
2294 // At this point the background collection has completed.
2295 // Don't move the call to compute_new_size() down
2296 // into code that might be executed if the background
2297 // collection was preempted.
2298 {
2299 ReleaseForegroundGC x(this); // unblock FG collection
2300 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2301 CMSTokenSync z(true); // not strictly needed.
2302 if (_collectorState == Resizing) {
2303 compute_new_size();
2304 _collectorState = Resetting;
2305 } else {
2306 assert(_collectorState == Idling, "The state should only change"
2307 " because the foreground collector has finished the collection");
2308 }
2309 }
2310 break;
2311 }
2312 case Resetting:
2313 // CMS heap resizing has been completed
2314 reset(true);
2315 assert(_collectorState == Idling, "Collector state should "
2316 "have changed");
2317 stats().record_cms_end();
2318 // Don't move the concurrent_phases_end() and compute_new_size()
2319 // calls to here because a preempted background collection
2320 // has it's state set to "Resetting".
2321 break;
2322 case Idling:
2323 default:
2324 ShouldNotReachHere();
2325 break;
2326 }
2327 if (TraceCMSState) {
2328 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2329 Thread::current(), _collectorState);
2330 }
2331 assert(_foregroundGCShouldWait, "block post-condition");
2332 }
2334 // Should this be in gc_epilogue?
2335 collector_policy()->counters()->update_counters();
2337 {
2338 // Clear _foregroundGCShouldWait and, in the event that the
2339 // foreground collector is waiting, notify it, before
2340 // returning.
2341 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2342 _foregroundGCShouldWait = false;
2343 if (_foregroundGCIsActive) {
2344 CGC_lock->notify();
2345 }
2346 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2347 "Possible deadlock");
2348 }
2349 if (TraceCMSState) {
2350 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2351 " exiting collection CMS state %d",
2352 Thread::current(), _collectorState);
2353 }
2354 if (PrintGC && Verbose) {
2355 _cmsGen->print_heap_change(prev_used);
2356 }
2357 }
2359 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2360 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2361 "Foreground collector should be waiting, not executing");
2362 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2363 "may only be done by the VM Thread with the world stopped");
2364 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2365 "VM thread should have CMS token");
2367 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2368 true, gclog_or_tty);)
2369 if (UseAdaptiveSizePolicy) {
2370 size_policy()->ms_collection_begin();
2371 }
2372 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2374 HandleMark hm; // Discard invalid handles created during verification
2376 if (VerifyBeforeGC &&
2377 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2378 Universe::verify(true);
2379 }
2381 // Snapshot the soft reference policy to be used in this collection cycle.
2382 ref_processor()->setup_policy(clear_all_soft_refs);
2384 bool init_mark_was_synchronous = false; // until proven otherwise
2385 while (_collectorState != Idling) {
2386 if (TraceCMSState) {
2387 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2388 Thread::current(), _collectorState);
2389 }
2390 switch (_collectorState) {
2391 case InitialMarking:
2392 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2393 checkpointRootsInitial(false);
2394 assert(_collectorState == Marking, "Collector state should have changed"
2395 " within checkpointRootsInitial()");
2396 break;
2397 case Marking:
2398 // initial marking in checkpointRootsInitialWork has been completed
2399 if (VerifyDuringGC &&
2400 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2401 gclog_or_tty->print("Verify before initial mark: ");
2402 Universe::verify(true);
2403 }
2404 {
2405 bool res = markFromRoots(false);
2406 assert(res && _collectorState == FinalMarking, "Collector state should "
2407 "have changed");
2408 break;
2409 }
2410 case FinalMarking:
2411 if (VerifyDuringGC &&
2412 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2413 gclog_or_tty->print("Verify before re-mark: ");
2414 Universe::verify(true);
2415 }
2416 checkpointRootsFinal(false, clear_all_soft_refs,
2417 init_mark_was_synchronous);
2418 assert(_collectorState == Sweeping, "Collector state should not "
2419 "have changed within checkpointRootsFinal()");
2420 break;
2421 case Sweeping:
2422 // final marking in checkpointRootsFinal has been completed
2423 if (VerifyDuringGC &&
2424 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2425 gclog_or_tty->print("Verify before sweep: ");
2426 Universe::verify(true);
2427 }
2428 sweep(false);
2429 assert(_collectorState == Resizing, "Incorrect state");
2430 break;
2431 case Resizing: {
2432 // Sweeping has been completed; the actual resize in this case
2433 // is done separately; nothing to be done in this state.
2434 _collectorState = Resetting;
2435 break;
2436 }
2437 case Resetting:
2438 // The heap has been resized.
2439 if (VerifyDuringGC &&
2440 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2441 gclog_or_tty->print("Verify before reset: ");
2442 Universe::verify(true);
2443 }
2444 reset(false);
2445 assert(_collectorState == Idling, "Collector state should "
2446 "have changed");
2447 break;
2448 case Precleaning:
2449 case AbortablePreclean:
2450 // Elide the preclean phase
2451 _collectorState = FinalMarking;
2452 break;
2453 default:
2454 ShouldNotReachHere();
2455 }
2456 if (TraceCMSState) {
2457 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2458 Thread::current(), _collectorState);
2459 }
2460 }
2462 if (UseAdaptiveSizePolicy) {
2463 GenCollectedHeap* gch = GenCollectedHeap::heap();
2464 size_policy()->ms_collection_end(gch->gc_cause());
2465 }
2467 if (VerifyAfterGC &&
2468 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2469 Universe::verify(true);
2470 }
2471 if (TraceCMSState) {
2472 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2473 " exiting collection CMS state %d",
2474 Thread::current(), _collectorState);
2475 }
2476 }
2478 bool CMSCollector::waitForForegroundGC() {
2479 bool res = false;
2480 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2481 "CMS thread should have CMS token");
2482 // Block the foreground collector until the
2483 // background collectors decides whether to
2484 // yield.
2485 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2486 _foregroundGCShouldWait = true;
2487 if (_foregroundGCIsActive) {
2488 // The background collector yields to the
2489 // foreground collector and returns a value
2490 // indicating that it has yielded. The foreground
2491 // collector can proceed.
2492 res = true;
2493 _foregroundGCShouldWait = false;
2494 ConcurrentMarkSweepThread::clear_CMS_flag(
2495 ConcurrentMarkSweepThread::CMS_cms_has_token);
2496 ConcurrentMarkSweepThread::set_CMS_flag(
2497 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2498 // Get a possibly blocked foreground thread going
2499 CGC_lock->notify();
2500 if (TraceCMSState) {
2501 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2502 Thread::current(), _collectorState);
2503 }
2504 while (_foregroundGCIsActive) {
2505 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2506 }
2507 ConcurrentMarkSweepThread::set_CMS_flag(
2508 ConcurrentMarkSweepThread::CMS_cms_has_token);
2509 ConcurrentMarkSweepThread::clear_CMS_flag(
2510 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2511 }
2512 if (TraceCMSState) {
2513 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2514 Thread::current(), _collectorState);
2515 }
2516 return res;
2517 }
2519 // Because of the need to lock the free lists and other structures in
2520 // the collector, common to all the generations that the collector is
2521 // collecting, we need the gc_prologues of individual CMS generations
2522 // delegate to their collector. It may have been simpler had the
2523 // current infrastructure allowed one to call a prologue on a
2524 // collector. In the absence of that we have the generation's
2525 // prologue delegate to the collector, which delegates back
2526 // some "local" work to a worker method in the individual generations
2527 // that it's responsible for collecting, while itself doing any
2528 // work common to all generations it's responsible for. A similar
2529 // comment applies to the gc_epilogue()'s.
2530 // The role of the varaible _between_prologue_and_epilogue is to
2531 // enforce the invocation protocol.
2532 void CMSCollector::gc_prologue(bool full) {
2533 // Call gc_prologue_work() for each CMSGen and PermGen that
2534 // we are responsible for.
2536 // The following locking discipline assumes that we are only called
2537 // when the world is stopped.
2538 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2540 // The CMSCollector prologue must call the gc_prologues for the
2541 // "generations" (including PermGen if any) that it's responsible
2542 // for.
2544 assert( Thread::current()->is_VM_thread()
2545 || ( CMSScavengeBeforeRemark
2546 && Thread::current()->is_ConcurrentGC_thread()),
2547 "Incorrect thread type for prologue execution");
2549 if (_between_prologue_and_epilogue) {
2550 // We have already been invoked; this is a gc_prologue delegation
2551 // from yet another CMS generation that we are responsible for, just
2552 // ignore it since all relevant work has already been done.
2553 return;
2554 }
2556 // set a bit saying prologue has been called; cleared in epilogue
2557 _between_prologue_and_epilogue = true;
2558 // Claim locks for common data structures, then call gc_prologue_work()
2559 // for each CMSGen and PermGen that we are responsible for.
2561 getFreelistLocks(); // gets free list locks on constituent spaces
2562 bitMapLock()->lock_without_safepoint_check();
2564 // Should call gc_prologue_work() for all cms gens we are responsible for
2565 bool registerClosure = _collectorState >= Marking
2566 && _collectorState < Sweeping;
2567 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2568 : &_modUnionClosure;
2569 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2570 _permGen->gc_prologue_work(full, registerClosure, muc);
2572 if (!full) {
2573 stats().record_gc0_begin();
2574 }
2575 }
2577 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2578 // Delegate to CMScollector which knows how to coordinate between
2579 // this and any other CMS generations that it is responsible for
2580 // collecting.
2581 collector()->gc_prologue(full);
2582 }
2584 // This is a "private" interface for use by this generation's CMSCollector.
2585 // Not to be called directly by any other entity (for instance,
2586 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2587 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2588 bool registerClosure, ModUnionClosure* modUnionClosure) {
2589 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2590 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2591 "Should be NULL");
2592 if (registerClosure) {
2593 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2594 }
2595 cmsSpace()->gc_prologue();
2596 // Clear stat counters
2597 NOT_PRODUCT(
2598 assert(_numObjectsPromoted == 0, "check");
2599 assert(_numWordsPromoted == 0, "check");
2600 if (Verbose && PrintGC) {
2601 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2602 SIZE_FORMAT" bytes concurrently",
2603 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2604 }
2605 _numObjectsAllocated = 0;
2606 _numWordsAllocated = 0;
2607 )
2608 }
2610 void CMSCollector::gc_epilogue(bool full) {
2611 // The following locking discipline assumes that we are only called
2612 // when the world is stopped.
2613 assert(SafepointSynchronize::is_at_safepoint(),
2614 "world is stopped assumption");
2616 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2617 // if linear allocation blocks need to be appropriately marked to allow the
2618 // the blocks to be parsable. We also check here whether we need to nudge the
2619 // CMS collector thread to start a new cycle (if it's not already active).
2620 assert( Thread::current()->is_VM_thread()
2621 || ( CMSScavengeBeforeRemark
2622 && Thread::current()->is_ConcurrentGC_thread()),
2623 "Incorrect thread type for epilogue execution");
2625 if (!_between_prologue_and_epilogue) {
2626 // We have already been invoked; this is a gc_epilogue delegation
2627 // from yet another CMS generation that we are responsible for, just
2628 // ignore it since all relevant work has already been done.
2629 return;
2630 }
2631 assert(haveFreelistLocks(), "must have freelist locks");
2632 assert_lock_strong(bitMapLock());
2634 _cmsGen->gc_epilogue_work(full);
2635 _permGen->gc_epilogue_work(full);
2637 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2638 // in case sampling was not already enabled, enable it
2639 _start_sampling = true;
2640 }
2641 // reset _eden_chunk_array so sampling starts afresh
2642 _eden_chunk_index = 0;
2644 size_t cms_used = _cmsGen->cmsSpace()->used();
2645 size_t perm_used = _permGen->cmsSpace()->used();
2647 // update performance counters - this uses a special version of
2648 // update_counters() that allows the utilization to be passed as a
2649 // parameter, avoiding multiple calls to used().
2650 //
2651 _cmsGen->update_counters(cms_used);
2652 _permGen->update_counters(perm_used);
2654 if (CMSIncrementalMode) {
2655 icms_update_allocation_limits();
2656 }
2658 bitMapLock()->unlock();
2659 releaseFreelistLocks();
2661 _between_prologue_and_epilogue = false; // ready for next cycle
2662 }
2664 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2665 collector()->gc_epilogue(full);
2667 // Also reset promotion tracking in par gc thread states.
2668 if (ParallelGCThreads > 0) {
2669 for (uint i = 0; i < ParallelGCThreads; i++) {
2670 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2671 }
2672 }
2673 }
2675 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2676 assert(!incremental_collection_failed(), "Should have been cleared");
2677 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2678 cmsSpace()->gc_epilogue();
2679 // Print stat counters
2680 NOT_PRODUCT(
2681 assert(_numObjectsAllocated == 0, "check");
2682 assert(_numWordsAllocated == 0, "check");
2683 if (Verbose && PrintGC) {
2684 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2685 SIZE_FORMAT" bytes",
2686 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2687 }
2688 _numObjectsPromoted = 0;
2689 _numWordsPromoted = 0;
2690 )
2692 if (PrintGC && Verbose) {
2693 // Call down the chain in contiguous_available needs the freelistLock
2694 // so print this out before releasing the freeListLock.
2695 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2696 contiguous_available());
2697 }
2698 }
2700 #ifndef PRODUCT
2701 bool CMSCollector::have_cms_token() {
2702 Thread* thr = Thread::current();
2703 if (thr->is_VM_thread()) {
2704 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2705 } else if (thr->is_ConcurrentGC_thread()) {
2706 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2707 } else if (thr->is_GC_task_thread()) {
2708 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2709 ParGCRareEvent_lock->owned_by_self();
2710 }
2711 return false;
2712 }
2713 #endif
2715 // Check reachability of the given heap address in CMS generation,
2716 // treating all other generations as roots.
2717 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2718 // We could "guarantee" below, rather than assert, but i'll
2719 // leave these as "asserts" so that an adventurous debugger
2720 // could try this in the product build provided some subset of
2721 // the conditions were met, provided they were intersted in the
2722 // results and knew that the computation below wouldn't interfere
2723 // with other concurrent computations mutating the structures
2724 // being read or written.
2725 assert(SafepointSynchronize::is_at_safepoint(),
2726 "Else mutations in object graph will make answer suspect");
2727 assert(have_cms_token(), "Should hold cms token");
2728 assert(haveFreelistLocks(), "must hold free list locks");
2729 assert_lock_strong(bitMapLock());
2731 // Clear the marking bit map array before starting, but, just
2732 // for kicks, first report if the given address is already marked
2733 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2734 _markBitMap.isMarked(addr) ? "" : " not");
2736 if (verify_after_remark()) {
2737 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2738 bool result = verification_mark_bm()->isMarked(addr);
2739 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2740 result ? "IS" : "is NOT");
2741 return result;
2742 } else {
2743 gclog_or_tty->print_cr("Could not compute result");
2744 return false;
2745 }
2746 }
2748 ////////////////////////////////////////////////////////
2749 // CMS Verification Support
2750 ////////////////////////////////////////////////////////
2751 // Following the remark phase, the following invariant
2752 // should hold -- each object in the CMS heap which is
2753 // marked in markBitMap() should be marked in the verification_mark_bm().
2755 class VerifyMarkedClosure: public BitMapClosure {
2756 CMSBitMap* _marks;
2757 bool _failed;
2759 public:
2760 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2762 bool do_bit(size_t offset) {
2763 HeapWord* addr = _marks->offsetToHeapWord(offset);
2764 if (!_marks->isMarked(addr)) {
2765 oop(addr)->print_on(gclog_or_tty);
2766 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2767 _failed = true;
2768 }
2769 return true;
2770 }
2772 bool failed() { return _failed; }
2773 };
2775 bool CMSCollector::verify_after_remark() {
2776 gclog_or_tty->print(" [Verifying CMS Marking... ");
2777 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2778 static bool init = false;
2780 assert(SafepointSynchronize::is_at_safepoint(),
2781 "Else mutations in object graph will make answer suspect");
2782 assert(have_cms_token(),
2783 "Else there may be mutual interference in use of "
2784 " verification data structures");
2785 assert(_collectorState > Marking && _collectorState <= Sweeping,
2786 "Else marking info checked here may be obsolete");
2787 assert(haveFreelistLocks(), "must hold free list locks");
2788 assert_lock_strong(bitMapLock());
2791 // Allocate marking bit map if not already allocated
2792 if (!init) { // first time
2793 if (!verification_mark_bm()->allocate(_span)) {
2794 return false;
2795 }
2796 init = true;
2797 }
2799 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2801 // Turn off refs discovery -- so we will be tracing through refs.
2802 // This is as intended, because by this time
2803 // GC must already have cleared any refs that need to be cleared,
2804 // and traced those that need to be marked; moreover,
2805 // the marking done here is not going to intefere in any
2806 // way with the marking information used by GC.
2807 NoRefDiscovery no_discovery(ref_processor());
2809 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2811 // Clear any marks from a previous round
2812 verification_mark_bm()->clear_all();
2813 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2814 verify_work_stacks_empty();
2816 GenCollectedHeap* gch = GenCollectedHeap::heap();
2817 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2818 // Update the saved marks which may affect the root scans.
2819 gch->save_marks();
2821 if (CMSRemarkVerifyVariant == 1) {
2822 // In this first variant of verification, we complete
2823 // all marking, then check if the new marks-verctor is
2824 // a subset of the CMS marks-vector.
2825 verify_after_remark_work_1();
2826 } else if (CMSRemarkVerifyVariant == 2) {
2827 // In this second variant of verification, we flag an error
2828 // (i.e. an object reachable in the new marks-vector not reachable
2829 // in the CMS marks-vector) immediately, also indicating the
2830 // identify of an object (A) that references the unmarked object (B) --
2831 // presumably, a mutation to A failed to be picked up by preclean/remark?
2832 verify_after_remark_work_2();
2833 } else {
2834 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2835 CMSRemarkVerifyVariant);
2836 }
2837 gclog_or_tty->print(" done] ");
2838 return true;
2839 }
2841 void CMSCollector::verify_after_remark_work_1() {
2842 ResourceMark rm;
2843 HandleMark hm;
2844 GenCollectedHeap* gch = GenCollectedHeap::heap();
2846 // Mark from roots one level into CMS
2847 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2848 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2850 gch->gen_process_strong_roots(_cmsGen->level(),
2851 true, // younger gens are roots
2852 true, // activate StrongRootsScope
2853 true, // collecting perm gen
2854 SharedHeap::ScanningOption(roots_scanning_options()),
2855 ¬Older,
2856 true, // walk code active on stacks
2857 NULL);
2859 // Now mark from the roots
2860 assert(_revisitStack.isEmpty(), "Should be empty");
2861 MarkFromRootsClosure markFromRootsClosure(this, _span,
2862 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2863 false /* don't yield */, true /* verifying */);
2864 assert(_restart_addr == NULL, "Expected pre-condition");
2865 verification_mark_bm()->iterate(&markFromRootsClosure);
2866 while (_restart_addr != NULL) {
2867 // Deal with stack overflow: by restarting at the indicated
2868 // address.
2869 HeapWord* ra = _restart_addr;
2870 markFromRootsClosure.reset(ra);
2871 _restart_addr = NULL;
2872 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2873 }
2874 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2875 verify_work_stacks_empty();
2876 // Should reset the revisit stack above, since no class tree
2877 // surgery is forthcoming.
2878 _revisitStack.reset(); // throwing away all contents
2880 // Marking completed -- now verify that each bit marked in
2881 // verification_mark_bm() is also marked in markBitMap(); flag all
2882 // errors by printing corresponding objects.
2883 VerifyMarkedClosure vcl(markBitMap());
2884 verification_mark_bm()->iterate(&vcl);
2885 if (vcl.failed()) {
2886 gclog_or_tty->print("Verification failed");
2887 Universe::heap()->print_on(gclog_or_tty);
2888 fatal("CMS: failed marking verification after remark");
2889 }
2890 }
2892 void CMSCollector::verify_after_remark_work_2() {
2893 ResourceMark rm;
2894 HandleMark hm;
2895 GenCollectedHeap* gch = GenCollectedHeap::heap();
2897 // Mark from roots one level into CMS
2898 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2899 markBitMap());
2900 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2901 gch->gen_process_strong_roots(_cmsGen->level(),
2902 true, // younger gens are roots
2903 true, // activate StrongRootsScope
2904 true, // collecting perm gen
2905 SharedHeap::ScanningOption(roots_scanning_options()),
2906 ¬Older,
2907 true, // walk code active on stacks
2908 NULL);
2910 // Now mark from the roots
2911 assert(_revisitStack.isEmpty(), "Should be empty");
2912 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2913 verification_mark_bm(), markBitMap(), verification_mark_stack());
2914 assert(_restart_addr == NULL, "Expected pre-condition");
2915 verification_mark_bm()->iterate(&markFromRootsClosure);
2916 while (_restart_addr != NULL) {
2917 // Deal with stack overflow: by restarting at the indicated
2918 // address.
2919 HeapWord* ra = _restart_addr;
2920 markFromRootsClosure.reset(ra);
2921 _restart_addr = NULL;
2922 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2923 }
2924 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2925 verify_work_stacks_empty();
2926 // Should reset the revisit stack above, since no class tree
2927 // surgery is forthcoming.
2928 _revisitStack.reset(); // throwing away all contents
2930 // Marking completed -- now verify that each bit marked in
2931 // verification_mark_bm() is also marked in markBitMap(); flag all
2932 // errors by printing corresponding objects.
2933 VerifyMarkedClosure vcl(markBitMap());
2934 verification_mark_bm()->iterate(&vcl);
2935 assert(!vcl.failed(), "Else verification above should not have succeeded");
2936 }
2938 void ConcurrentMarkSweepGeneration::save_marks() {
2939 // delegate to CMS space
2940 cmsSpace()->save_marks();
2941 for (uint i = 0; i < ParallelGCThreads; i++) {
2942 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2943 }
2944 }
2946 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2947 return cmsSpace()->no_allocs_since_save_marks();
2948 }
2950 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2951 \
2952 void ConcurrentMarkSweepGeneration:: \
2953 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2954 cl->set_generation(this); \
2955 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2956 cl->reset_generation(); \
2957 save_marks(); \
2958 }
2960 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2962 void
2963 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2964 {
2965 // Not currently implemented; need to do the following. -- ysr.
2966 // dld -- I think that is used for some sort of allocation profiler. So it
2967 // really means the objects allocated by the mutator since the last
2968 // GC. We could potentially implement this cheaply by recording only
2969 // the direct allocations in a side data structure.
2970 //
2971 // I think we probably ought not to be required to support these
2972 // iterations at any arbitrary point; I think there ought to be some
2973 // call to enable/disable allocation profiling in a generation/space,
2974 // and the iterator ought to return the objects allocated in the
2975 // gen/space since the enable call, or the last iterator call (which
2976 // will probably be at a GC.) That way, for gens like CM&S that would
2977 // require some extra data structure to support this, we only pay the
2978 // cost when it's in use...
2979 cmsSpace()->object_iterate_since_last_GC(blk);
2980 }
2982 void
2983 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2984 cl->set_generation(this);
2985 younger_refs_in_space_iterate(_cmsSpace, cl);
2986 cl->reset_generation();
2987 }
2989 void
2990 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2991 if (freelistLock()->owned_by_self()) {
2992 Generation::oop_iterate(mr, cl);
2993 } else {
2994 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2995 Generation::oop_iterate(mr, cl);
2996 }
2997 }
2999 void
3000 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3001 if (freelistLock()->owned_by_self()) {
3002 Generation::oop_iterate(cl);
3003 } else {
3004 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3005 Generation::oop_iterate(cl);
3006 }
3007 }
3009 void
3010 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3011 if (freelistLock()->owned_by_self()) {
3012 Generation::object_iterate(cl);
3013 } else {
3014 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3015 Generation::object_iterate(cl);
3016 }
3017 }
3019 void
3020 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3021 if (freelistLock()->owned_by_self()) {
3022 Generation::safe_object_iterate(cl);
3023 } else {
3024 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3025 Generation::safe_object_iterate(cl);
3026 }
3027 }
3029 void
3030 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3031 }
3033 void
3034 ConcurrentMarkSweepGeneration::post_compact() {
3035 }
3037 void
3038 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3039 // Fix the linear allocation blocks to look like free blocks.
3041 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3042 // are not called when the heap is verified during universe initialization and
3043 // at vm shutdown.
3044 if (freelistLock()->owned_by_self()) {
3045 cmsSpace()->prepare_for_verify();
3046 } else {
3047 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3048 cmsSpace()->prepare_for_verify();
3049 }
3050 }
3052 void
3053 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3054 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3055 // are not called when the heap is verified during universe initialization and
3056 // at vm shutdown.
3057 if (freelistLock()->owned_by_self()) {
3058 cmsSpace()->verify(false /* ignored */);
3059 } else {
3060 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3061 cmsSpace()->verify(false /* ignored */);
3062 }
3063 }
3065 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3066 _cmsGen->verify(allow_dirty);
3067 _permGen->verify(allow_dirty);
3068 }
3070 #ifndef PRODUCT
3071 bool CMSCollector::overflow_list_is_empty() const {
3072 assert(_num_par_pushes >= 0, "Inconsistency");
3073 if (_overflow_list == NULL) {
3074 assert(_num_par_pushes == 0, "Inconsistency");
3075 }
3076 return _overflow_list == NULL;
3077 }
3079 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3080 // merely consolidate assertion checks that appear to occur together frequently.
3081 void CMSCollector::verify_work_stacks_empty() const {
3082 assert(_markStack.isEmpty(), "Marking stack should be empty");
3083 assert(overflow_list_is_empty(), "Overflow list should be empty");
3084 }
3086 void CMSCollector::verify_overflow_empty() const {
3087 assert(overflow_list_is_empty(), "Overflow list should be empty");
3088 assert(no_preserved_marks(), "No preserved marks");
3089 }
3090 #endif // PRODUCT
3092 // Decide if we want to enable class unloading as part of the
3093 // ensuing concurrent GC cycle. We will collect the perm gen and
3094 // unload classes if it's the case that:
3095 // (1) an explicit gc request has been made and the flag
3096 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3097 // (2) (a) class unloading is enabled at the command line, and
3098 // (b) (i) perm gen threshold has been crossed, or
3099 // (ii) old gen is getting really full, or
3100 // (iii) the previous N CMS collections did not collect the
3101 // perm gen
3102 // NOTE: Provided there is no change in the state of the heap between
3103 // calls to this method, it should have idempotent results. Moreover,
3104 // its results should be monotonically increasing (i.e. going from 0 to 1,
3105 // but not 1 to 0) between successive calls between which the heap was
3106 // not collected. For the implementation below, it must thus rely on
3107 // the property that concurrent_cycles_since_last_unload()
3108 // will not decrease unless a collection cycle happened and that
3109 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3110 // themselves also monotonic in that sense. See check_monotonicity()
3111 // below.
3112 bool CMSCollector::update_should_unload_classes() {
3113 _should_unload_classes = false;
3114 // Condition 1 above
3115 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3116 _should_unload_classes = true;
3117 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3118 // Disjuncts 2.b.(i,ii,iii) above
3119 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3120 CMSClassUnloadingMaxInterval)
3121 || _permGen->should_concurrent_collect()
3122 || _cmsGen->is_too_full();
3123 }
3124 return _should_unload_classes;
3125 }
3127 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3128 bool res = should_concurrent_collect();
3129 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3130 return res;
3131 }
3133 void CMSCollector::setup_cms_unloading_and_verification_state() {
3134 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3135 || VerifyBeforeExit;
3136 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3137 | SharedHeap::SO_CodeCache;
3139 if (should_unload_classes()) { // Should unload classes this cycle
3140 remove_root_scanning_option(rso); // Shrink the root set appropriately
3141 set_verifying(should_verify); // Set verification state for this cycle
3142 return; // Nothing else needs to be done at this time
3143 }
3145 // Not unloading classes this cycle
3146 assert(!should_unload_classes(), "Inconsitency!");
3147 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3148 // We were not verifying, or we _were_ unloading classes in the last cycle,
3149 // AND some verification options are enabled this cycle; in this case,
3150 // we must make sure that the deadness map is allocated if not already so,
3151 // and cleared (if already allocated previously --
3152 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3153 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3154 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3155 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3156 "permanent generation verification disabled");
3157 return; // Note that we leave verification disabled, so we'll retry this
3158 // allocation next cycle. We _could_ remember this failure
3159 // and skip further attempts and permanently disable verification
3160 // attempts if that is considered more desirable.
3161 }
3162 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3163 "_perm_gen_ver_bit_map inconsistency?");
3164 } else {
3165 perm_gen_verify_bit_map()->clear_all();
3166 }
3167 // Include symbols, strings and code cache elements to prevent their resurrection.
3168 add_root_scanning_option(rso);
3169 set_verifying(true);
3170 } else if (verifying() && !should_verify) {
3171 // We were verifying, but some verification flags got disabled.
3172 set_verifying(false);
3173 // Exclude symbols, strings and code cache elements from root scanning to
3174 // reduce IM and RM pauses.
3175 remove_root_scanning_option(rso);
3176 }
3177 }
3180 #ifndef PRODUCT
3181 HeapWord* CMSCollector::block_start(const void* p) const {
3182 const HeapWord* addr = (HeapWord*)p;
3183 if (_span.contains(p)) {
3184 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3185 return _cmsGen->cmsSpace()->block_start(p);
3186 } else {
3187 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3188 "Inconsistent _span?");
3189 return _permGen->cmsSpace()->block_start(p);
3190 }
3191 }
3192 return NULL;
3193 }
3194 #endif
3196 HeapWord*
3197 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3198 bool tlab,
3199 bool parallel) {
3200 assert(!tlab, "Can't deal with TLAB allocation");
3201 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3202 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3203 CMSExpansionCause::_satisfy_allocation);
3204 if (GCExpandToAllocateDelayMillis > 0) {
3205 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3206 }
3207 return have_lock_and_allocate(word_size, tlab);
3208 }
3210 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3211 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3212 // to CardGeneration and share it...
3213 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3214 return CardGeneration::expand(bytes, expand_bytes);
3215 }
3217 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3218 CMSExpansionCause::Cause cause)
3219 {
3221 bool success = expand(bytes, expand_bytes);
3223 // remember why we expanded; this information is used
3224 // by shouldConcurrentCollect() when making decisions on whether to start
3225 // a new CMS cycle.
3226 if (success) {
3227 set_expansion_cause(cause);
3228 if (PrintGCDetails && Verbose) {
3229 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3230 CMSExpansionCause::to_string(cause));
3231 }
3232 }
3233 }
3235 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3236 HeapWord* res = NULL;
3237 MutexLocker x(ParGCRareEvent_lock);
3238 while (true) {
3239 // Expansion by some other thread might make alloc OK now:
3240 res = ps->lab.alloc(word_sz);
3241 if (res != NULL) return res;
3242 // If there's not enough expansion space available, give up.
3243 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3244 return NULL;
3245 }
3246 // Otherwise, we try expansion.
3247 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3248 CMSExpansionCause::_allocate_par_lab);
3249 // Now go around the loop and try alloc again;
3250 // A competing par_promote might beat us to the expansion space,
3251 // so we may go around the loop again if promotion fails agaion.
3252 if (GCExpandToAllocateDelayMillis > 0) {
3253 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3254 }
3255 }
3256 }
3259 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3260 PromotionInfo* promo) {
3261 MutexLocker x(ParGCRareEvent_lock);
3262 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3263 while (true) {
3264 // Expansion by some other thread might make alloc OK now:
3265 if (promo->ensure_spooling_space()) {
3266 assert(promo->has_spooling_space(),
3267 "Post-condition of successful ensure_spooling_space()");
3268 return true;
3269 }
3270 // If there's not enough expansion space available, give up.
3271 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3272 return false;
3273 }
3274 // Otherwise, we try expansion.
3275 expand(refill_size_bytes, MinHeapDeltaBytes,
3276 CMSExpansionCause::_allocate_par_spooling_space);
3277 // Now go around the loop and try alloc again;
3278 // A competing allocation might beat us to the expansion space,
3279 // so we may go around the loop again if allocation fails again.
3280 if (GCExpandToAllocateDelayMillis > 0) {
3281 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3282 }
3283 }
3284 }
3288 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3289 assert_locked_or_safepoint(Heap_lock);
3290 size_t size = ReservedSpace::page_align_size_down(bytes);
3291 if (size > 0) {
3292 shrink_by(size);
3293 }
3294 }
3296 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3297 assert_locked_or_safepoint(Heap_lock);
3298 bool result = _virtual_space.expand_by(bytes);
3299 if (result) {
3300 HeapWord* old_end = _cmsSpace->end();
3301 size_t new_word_size =
3302 heap_word_size(_virtual_space.committed_size());
3303 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3304 _bts->resize(new_word_size); // resize the block offset shared array
3305 Universe::heap()->barrier_set()->resize_covered_region(mr);
3306 // Hmmmm... why doesn't CFLS::set_end verify locking?
3307 // This is quite ugly; FIX ME XXX
3308 _cmsSpace->assert_locked(freelistLock());
3309 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3311 // update the space and generation capacity counters
3312 if (UsePerfData) {
3313 _space_counters->update_capacity();
3314 _gen_counters->update_all();
3315 }
3317 if (Verbose && PrintGC) {
3318 size_t new_mem_size = _virtual_space.committed_size();
3319 size_t old_mem_size = new_mem_size - bytes;
3320 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3321 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3322 }
3323 }
3324 return result;
3325 }
3327 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3328 assert_locked_or_safepoint(Heap_lock);
3329 bool success = true;
3330 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3331 if (remaining_bytes > 0) {
3332 success = grow_by(remaining_bytes);
3333 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3334 }
3335 return success;
3336 }
3338 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3339 assert_locked_or_safepoint(Heap_lock);
3340 assert_lock_strong(freelistLock());
3341 // XXX Fix when compaction is implemented.
3342 warning("Shrinking of CMS not yet implemented");
3343 return;
3344 }
3347 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3348 // phases.
3349 class CMSPhaseAccounting: public StackObj {
3350 public:
3351 CMSPhaseAccounting(CMSCollector *collector,
3352 const char *phase,
3353 bool print_cr = true);
3354 ~CMSPhaseAccounting();
3356 private:
3357 CMSCollector *_collector;
3358 const char *_phase;
3359 elapsedTimer _wallclock;
3360 bool _print_cr;
3362 public:
3363 // Not MT-safe; so do not pass around these StackObj's
3364 // where they may be accessed by other threads.
3365 jlong wallclock_millis() {
3366 assert(_wallclock.is_active(), "Wall clock should not stop");
3367 _wallclock.stop(); // to record time
3368 jlong ret = _wallclock.milliseconds();
3369 _wallclock.start(); // restart
3370 return ret;
3371 }
3372 };
3374 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3375 const char *phase,
3376 bool print_cr) :
3377 _collector(collector), _phase(phase), _print_cr(print_cr) {
3379 if (PrintCMSStatistics != 0) {
3380 _collector->resetYields();
3381 }
3382 if (PrintGCDetails && PrintGCTimeStamps) {
3383 gclog_or_tty->date_stamp(PrintGCDateStamps);
3384 gclog_or_tty->stamp();
3385 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3386 _collector->cmsGen()->short_name(), _phase);
3387 }
3388 _collector->resetTimer();
3389 _wallclock.start();
3390 _collector->startTimer();
3391 }
3393 CMSPhaseAccounting::~CMSPhaseAccounting() {
3394 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3395 _collector->stopTimer();
3396 _wallclock.stop();
3397 if (PrintGCDetails) {
3398 gclog_or_tty->date_stamp(PrintGCDateStamps);
3399 if (PrintGCTimeStamps) {
3400 gclog_or_tty->stamp();
3401 gclog_or_tty->print(": ");
3402 }
3403 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3404 _collector->cmsGen()->short_name(),
3405 _phase, _collector->timerValue(), _wallclock.seconds());
3406 if (_print_cr) {
3407 gclog_or_tty->print_cr("");
3408 }
3409 if (PrintCMSStatistics != 0) {
3410 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3411 _collector->yields());
3412 }
3413 }
3414 }
3416 // CMS work
3418 // Checkpoint the roots into this generation from outside
3419 // this generation. [Note this initial checkpoint need only
3420 // be approximate -- we'll do a catch up phase subsequently.]
3421 void CMSCollector::checkpointRootsInitial(bool asynch) {
3422 assert(_collectorState == InitialMarking, "Wrong collector state");
3423 check_correct_thread_executing();
3424 ReferenceProcessor* rp = ref_processor();
3425 SpecializationStats::clear();
3426 assert(_restart_addr == NULL, "Control point invariant");
3427 if (asynch) {
3428 // acquire locks for subsequent manipulations
3429 MutexLockerEx x(bitMapLock(),
3430 Mutex::_no_safepoint_check_flag);
3431 checkpointRootsInitialWork(asynch);
3432 rp->verify_no_references_recorded();
3433 rp->enable_discovery(); // enable ("weak") refs discovery
3434 _collectorState = Marking;
3435 } else {
3436 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3437 // which recognizes if we are a CMS generation, and doesn't try to turn on
3438 // discovery; verify that they aren't meddling.
3439 assert(!rp->discovery_is_atomic(),
3440 "incorrect setting of discovery predicate");
3441 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3442 "ref discovery for this generation kind");
3443 // already have locks
3444 checkpointRootsInitialWork(asynch);
3445 rp->enable_discovery(); // now enable ("weak") refs discovery
3446 _collectorState = Marking;
3447 }
3448 SpecializationStats::print();
3449 }
3451 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3452 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3453 assert(_collectorState == InitialMarking, "just checking");
3455 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3456 // precede our marking with a collection of all
3457 // younger generations to keep floating garbage to a minimum.
3458 // XXX: we won't do this for now -- it's an optimization to be done later.
3460 // already have locks
3461 assert_lock_strong(bitMapLock());
3462 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3464 // Setup the verification and class unloading state for this
3465 // CMS collection cycle.
3466 setup_cms_unloading_and_verification_state();
3468 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3469 PrintGCDetails && Verbose, true, gclog_or_tty);)
3470 if (UseAdaptiveSizePolicy) {
3471 size_policy()->checkpoint_roots_initial_begin();
3472 }
3474 // Reset all the PLAB chunk arrays if necessary.
3475 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3476 reset_survivor_plab_arrays();
3477 }
3479 ResourceMark rm;
3480 HandleMark hm;
3482 FalseClosure falseClosure;
3483 // In the case of a synchronous collection, we will elide the
3484 // remark step, so it's important to catch all the nmethod oops
3485 // in this step.
3486 // The final 'true' flag to gen_process_strong_roots will ensure this.
3487 // If 'async' is true, we can relax the nmethod tracing.
3488 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3489 GenCollectedHeap* gch = GenCollectedHeap::heap();
3491 verify_work_stacks_empty();
3492 verify_overflow_empty();
3494 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3495 // Update the saved marks which may affect the root scans.
3496 gch->save_marks();
3498 // weak reference processing has not started yet.
3499 ref_processor()->set_enqueuing_is_done(false);
3501 {
3502 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3503 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3504 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3505 gch->gen_process_strong_roots(_cmsGen->level(),
3506 true, // younger gens are roots
3507 true, // activate StrongRootsScope
3508 true, // collecting perm gen
3509 SharedHeap::ScanningOption(roots_scanning_options()),
3510 ¬Older,
3511 true, // walk all of code cache if (so & SO_CodeCache)
3512 NULL);
3513 }
3515 // Clear mod-union table; it will be dirtied in the prologue of
3516 // CMS generation per each younger generation collection.
3518 assert(_modUnionTable.isAllClear(),
3519 "Was cleared in most recent final checkpoint phase"
3520 " or no bits are set in the gc_prologue before the start of the next "
3521 "subsequent marking phase.");
3523 // Temporarily disabled, since pre/post-consumption closures don't
3524 // care about precleaned cards
3525 #if 0
3526 {
3527 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3528 (HeapWord*)_virtual_space.high());
3529 _ct->ct_bs()->preclean_dirty_cards(mr);
3530 }
3531 #endif
3533 // Save the end of the used_region of the constituent generations
3534 // to be used to limit the extent of sweep in each generation.
3535 save_sweep_limits();
3536 if (UseAdaptiveSizePolicy) {
3537 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3538 }
3539 verify_overflow_empty();
3540 }
3542 bool CMSCollector::markFromRoots(bool asynch) {
3543 // we might be tempted to assert that:
3544 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3545 // "inconsistent argument?");
3546 // However that wouldn't be right, because it's possible that
3547 // a safepoint is indeed in progress as a younger generation
3548 // stop-the-world GC happens even as we mark in this generation.
3549 assert(_collectorState == Marking, "inconsistent state?");
3550 check_correct_thread_executing();
3551 verify_overflow_empty();
3553 bool res;
3554 if (asynch) {
3556 // Start the timers for adaptive size policy for the concurrent phases
3557 // Do it here so that the foreground MS can use the concurrent
3558 // timer since a foreground MS might has the sweep done concurrently
3559 // or STW.
3560 if (UseAdaptiveSizePolicy) {
3561 size_policy()->concurrent_marking_begin();
3562 }
3564 // Weak ref discovery note: We may be discovering weak
3565 // refs in this generation concurrent (but interleaved) with
3566 // weak ref discovery by a younger generation collector.
3568 CMSTokenSyncWithLocks ts(true, bitMapLock());
3569 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3570 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3571 res = markFromRootsWork(asynch);
3572 if (res) {
3573 _collectorState = Precleaning;
3574 } else { // We failed and a foreground collection wants to take over
3575 assert(_foregroundGCIsActive, "internal state inconsistency");
3576 assert(_restart_addr == NULL, "foreground will restart from scratch");
3577 if (PrintGCDetails) {
3578 gclog_or_tty->print_cr("bailing out to foreground collection");
3579 }
3580 }
3581 if (UseAdaptiveSizePolicy) {
3582 size_policy()->concurrent_marking_end();
3583 }
3584 } else {
3585 assert(SafepointSynchronize::is_at_safepoint(),
3586 "inconsistent with asynch == false");
3587 if (UseAdaptiveSizePolicy) {
3588 size_policy()->ms_collection_marking_begin();
3589 }
3590 // already have locks
3591 res = markFromRootsWork(asynch);
3592 _collectorState = FinalMarking;
3593 if (UseAdaptiveSizePolicy) {
3594 GenCollectedHeap* gch = GenCollectedHeap::heap();
3595 size_policy()->ms_collection_marking_end(gch->gc_cause());
3596 }
3597 }
3598 verify_overflow_empty();
3599 return res;
3600 }
3602 bool CMSCollector::markFromRootsWork(bool asynch) {
3603 // iterate over marked bits in bit map, doing a full scan and mark
3604 // from these roots using the following algorithm:
3605 // . if oop is to the right of the current scan pointer,
3606 // mark corresponding bit (we'll process it later)
3607 // . else (oop is to left of current scan pointer)
3608 // push oop on marking stack
3609 // . drain the marking stack
3611 // Note that when we do a marking step we need to hold the
3612 // bit map lock -- recall that direct allocation (by mutators)
3613 // and promotion (by younger generation collectors) is also
3614 // marking the bit map. [the so-called allocate live policy.]
3615 // Because the implementation of bit map marking is not
3616 // robust wrt simultaneous marking of bits in the same word,
3617 // we need to make sure that there is no such interference
3618 // between concurrent such updates.
3620 // already have locks
3621 assert_lock_strong(bitMapLock());
3623 // Clear the revisit stack, just in case there are any
3624 // obsolete contents from a short-circuited previous CMS cycle.
3625 _revisitStack.reset();
3626 verify_work_stacks_empty();
3627 verify_overflow_empty();
3628 assert(_revisitStack.isEmpty(), "tabula rasa");
3629 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3630 bool result = false;
3631 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3632 result = do_marking_mt(asynch);
3633 } else {
3634 result = do_marking_st(asynch);
3635 }
3636 return result;
3637 }
3639 // Forward decl
3640 class CMSConcMarkingTask;
3642 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3643 CMSCollector* _collector;
3644 CMSConcMarkingTask* _task;
3645 bool _yield;
3646 protected:
3647 virtual void yield();
3648 public:
3649 // "n_threads" is the number of threads to be terminated.
3650 // "queue_set" is a set of work queues of other threads.
3651 // "collector" is the CMS collector associated with this task terminator.
3652 // "yield" indicates whether we need the gang as a whole to yield.
3653 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3654 CMSCollector* collector, bool yield) :
3655 ParallelTaskTerminator(n_threads, queue_set),
3656 _collector(collector),
3657 _yield(yield) { }
3659 void set_task(CMSConcMarkingTask* task) {
3660 _task = task;
3661 }
3662 };
3664 // MT Concurrent Marking Task
3665 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3666 CMSCollector* _collector;
3667 YieldingFlexibleWorkGang* _workers; // the whole gang
3668 int _n_workers; // requested/desired # workers
3669 bool _asynch;
3670 bool _result;
3671 CompactibleFreeListSpace* _cms_space;
3672 CompactibleFreeListSpace* _perm_space;
3673 HeapWord* _global_finger;
3674 HeapWord* _restart_addr;
3676 // Exposed here for yielding support
3677 Mutex* const _bit_map_lock;
3679 // The per thread work queues, available here for stealing
3680 OopTaskQueueSet* _task_queues;
3681 CMSConcMarkingTerminator _term;
3683 public:
3684 CMSConcMarkingTask(CMSCollector* collector,
3685 CompactibleFreeListSpace* cms_space,
3686 CompactibleFreeListSpace* perm_space,
3687 bool asynch, int n_workers,
3688 YieldingFlexibleWorkGang* workers,
3689 OopTaskQueueSet* task_queues):
3690 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3691 _collector(collector),
3692 _cms_space(cms_space),
3693 _perm_space(perm_space),
3694 _asynch(asynch), _n_workers(n_workers), _result(true),
3695 _workers(workers), _task_queues(task_queues),
3696 _term(n_workers, task_queues, _collector, asynch),
3697 _bit_map_lock(collector->bitMapLock())
3698 {
3699 assert(n_workers <= workers->total_workers(),
3700 "Else termination won't work correctly today"); // XXX FIX ME!
3701 _requested_size = n_workers;
3702 _term.set_task(this);
3703 assert(_cms_space->bottom() < _perm_space->bottom(),
3704 "Finger incorrectly initialized below");
3705 _restart_addr = _global_finger = _cms_space->bottom();
3706 }
3709 OopTaskQueueSet* task_queues() { return _task_queues; }
3711 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3713 HeapWord** global_finger_addr() { return &_global_finger; }
3715 CMSConcMarkingTerminator* terminator() { return &_term; }
3717 void work(int i);
3719 virtual void coordinator_yield(); // stuff done by coordinator
3720 bool result() { return _result; }
3722 void reset(HeapWord* ra) {
3723 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3724 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3725 assert(ra < _perm_space->end(), "ra too large");
3726 _restart_addr = _global_finger = ra;
3727 _term.reset_for_reuse();
3728 }
3730 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3731 OopTaskQueue* work_q);
3733 private:
3734 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3735 void do_work_steal(int i);
3736 void bump_global_finger(HeapWord* f);
3737 };
3739 void CMSConcMarkingTerminator::yield() {
3740 if (ConcurrentMarkSweepThread::should_yield() &&
3741 !_collector->foregroundGCIsActive() &&
3742 _yield) {
3743 _task->yield();
3744 } else {
3745 ParallelTaskTerminator::yield();
3746 }
3747 }
3749 ////////////////////////////////////////////////////////////////
3750 // Concurrent Marking Algorithm Sketch
3751 ////////////////////////////////////////////////////////////////
3752 // Until all tasks exhausted (both spaces):
3753 // -- claim next available chunk
3754 // -- bump global finger via CAS
3755 // -- find first object that starts in this chunk
3756 // and start scanning bitmap from that position
3757 // -- scan marked objects for oops
3758 // -- CAS-mark target, and if successful:
3759 // . if target oop is above global finger (volatile read)
3760 // nothing to do
3761 // . if target oop is in chunk and above local finger
3762 // then nothing to do
3763 // . else push on work-queue
3764 // -- Deal with possible overflow issues:
3765 // . local work-queue overflow causes stuff to be pushed on
3766 // global (common) overflow queue
3767 // . always first empty local work queue
3768 // . then get a batch of oops from global work queue if any
3769 // . then do work stealing
3770 // -- When all tasks claimed (both spaces)
3771 // and local work queue empty,
3772 // then in a loop do:
3773 // . check global overflow stack; steal a batch of oops and trace
3774 // . try to steal from other threads oif GOS is empty
3775 // . if neither is available, offer termination
3776 // -- Terminate and return result
3777 //
3778 void CMSConcMarkingTask::work(int i) {
3779 elapsedTimer _timer;
3780 ResourceMark rm;
3781 HandleMark hm;
3783 DEBUG_ONLY(_collector->verify_overflow_empty();)
3785 // Before we begin work, our work queue should be empty
3786 assert(work_queue(i)->size() == 0, "Expected to be empty");
3787 // Scan the bitmap covering _cms_space, tracing through grey objects.
3788 _timer.start();
3789 do_scan_and_mark(i, _cms_space);
3790 _timer.stop();
3791 if (PrintCMSStatistics != 0) {
3792 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3793 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3794 }
3796 // ... do the same for the _perm_space
3797 _timer.reset();
3798 _timer.start();
3799 do_scan_and_mark(i, _perm_space);
3800 _timer.stop();
3801 if (PrintCMSStatistics != 0) {
3802 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3803 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3804 }
3806 // ... do work stealing
3807 _timer.reset();
3808 _timer.start();
3809 do_work_steal(i);
3810 _timer.stop();
3811 if (PrintCMSStatistics != 0) {
3812 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3813 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3814 }
3815 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3816 assert(work_queue(i)->size() == 0, "Should have been emptied");
3817 // Note that under the current task protocol, the
3818 // following assertion is true even of the spaces
3819 // expanded since the completion of the concurrent
3820 // marking. XXX This will likely change under a strict
3821 // ABORT semantics.
3822 assert(_global_finger > _cms_space->end() &&
3823 _global_finger >= _perm_space->end(),
3824 "All tasks have been completed");
3825 DEBUG_ONLY(_collector->verify_overflow_empty();)
3826 }
3828 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3829 HeapWord* read = _global_finger;
3830 HeapWord* cur = read;
3831 while (f > read) {
3832 cur = read;
3833 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3834 if (cur == read) {
3835 // our cas succeeded
3836 assert(_global_finger >= f, "protocol consistency");
3837 break;
3838 }
3839 }
3840 }
3842 // This is really inefficient, and should be redone by
3843 // using (not yet available) block-read and -write interfaces to the
3844 // stack and the work_queue. XXX FIX ME !!!
3845 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3846 OopTaskQueue* work_q) {
3847 // Fast lock-free check
3848 if (ovflw_stk->length() == 0) {
3849 return false;
3850 }
3851 assert(work_q->size() == 0, "Shouldn't steal");
3852 MutexLockerEx ml(ovflw_stk->par_lock(),
3853 Mutex::_no_safepoint_check_flag);
3854 // Grab up to 1/4 the size of the work queue
3855 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3856 (size_t)ParGCDesiredObjsFromOverflowList);
3857 num = MIN2(num, ovflw_stk->length());
3858 for (int i = (int) num; i > 0; i--) {
3859 oop cur = ovflw_stk->pop();
3860 assert(cur != NULL, "Counted wrong?");
3861 work_q->push(cur);
3862 }
3863 return num > 0;
3864 }
3866 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3867 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3868 int n_tasks = pst->n_tasks();
3869 // We allow that there may be no tasks to do here because
3870 // we are restarting after a stack overflow.
3871 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3872 int nth_task = 0;
3874 HeapWord* aligned_start = sp->bottom();
3875 if (sp->used_region().contains(_restart_addr)) {
3876 // Align down to a card boundary for the start of 0th task
3877 // for this space.
3878 aligned_start =
3879 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3880 CardTableModRefBS::card_size);
3881 }
3883 size_t chunk_size = sp->marking_task_size();
3884 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3885 // Having claimed the nth task in this space,
3886 // compute the chunk that it corresponds to:
3887 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3888 aligned_start + (nth_task+1)*chunk_size);
3889 // Try and bump the global finger via a CAS;
3890 // note that we need to do the global finger bump
3891 // _before_ taking the intersection below, because
3892 // the task corresponding to that region will be
3893 // deemed done even if the used_region() expands
3894 // because of allocation -- as it almost certainly will
3895 // during start-up while the threads yield in the
3896 // closure below.
3897 HeapWord* finger = span.end();
3898 bump_global_finger(finger); // atomically
3899 // There are null tasks here corresponding to chunks
3900 // beyond the "top" address of the space.
3901 span = span.intersection(sp->used_region());
3902 if (!span.is_empty()) { // Non-null task
3903 HeapWord* prev_obj;
3904 assert(!span.contains(_restart_addr) || nth_task == 0,
3905 "Inconsistency");
3906 if (nth_task == 0) {
3907 // For the 0th task, we'll not need to compute a block_start.
3908 if (span.contains(_restart_addr)) {
3909 // In the case of a restart because of stack overflow,
3910 // we might additionally skip a chunk prefix.
3911 prev_obj = _restart_addr;
3912 } else {
3913 prev_obj = span.start();
3914 }
3915 } else {
3916 // We want to skip the first object because
3917 // the protocol is to scan any object in its entirety
3918 // that _starts_ in this span; a fortiori, any
3919 // object starting in an earlier span is scanned
3920 // as part of an earlier claimed task.
3921 // Below we use the "careful" version of block_start
3922 // so we do not try to navigate uninitialized objects.
3923 prev_obj = sp->block_start_careful(span.start());
3924 // Below we use a variant of block_size that uses the
3925 // Printezis bits to avoid waiting for allocated
3926 // objects to become initialized/parsable.
3927 while (prev_obj < span.start()) {
3928 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3929 if (sz > 0) {
3930 prev_obj += sz;
3931 } else {
3932 // In this case we may end up doing a bit of redundant
3933 // scanning, but that appears unavoidable, short of
3934 // locking the free list locks; see bug 6324141.
3935 break;
3936 }
3937 }
3938 }
3939 if (prev_obj < span.end()) {
3940 MemRegion my_span = MemRegion(prev_obj, span.end());
3941 // Do the marking work within a non-empty span --
3942 // the last argument to the constructor indicates whether the
3943 // iteration should be incremental with periodic yields.
3944 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3945 &_collector->_markBitMap,
3946 work_queue(i),
3947 &_collector->_markStack,
3948 &_collector->_revisitStack,
3949 _asynch);
3950 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3951 } // else nothing to do for this task
3952 } // else nothing to do for this task
3953 }
3954 // We'd be tempted to assert here that since there are no
3955 // more tasks left to claim in this space, the global_finger
3956 // must exceed space->top() and a fortiori space->end(). However,
3957 // that would not quite be correct because the bumping of
3958 // global_finger occurs strictly after the claiming of a task,
3959 // so by the time we reach here the global finger may not yet
3960 // have been bumped up by the thread that claimed the last
3961 // task.
3962 pst->all_tasks_completed();
3963 }
3965 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
3966 private:
3967 MemRegion _span;
3968 CMSBitMap* _bit_map;
3969 CMSMarkStack* _overflow_stack;
3970 OopTaskQueue* _work_queue;
3971 protected:
3972 DO_OOP_WORK_DEFN
3973 public:
3974 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3975 CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
3976 CMSMarkStack* revisit_stack):
3977 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
3978 _span(_collector->_span),
3979 _work_queue(work_queue),
3980 _bit_map(bit_map),
3981 _overflow_stack(overflow_stack)
3982 { }
3983 virtual void do_oop(oop* p);
3984 virtual void do_oop(narrowOop* p);
3985 void trim_queue(size_t max);
3986 void handle_stack_overflow(HeapWord* lost);
3987 };
3989 // Grey object scanning during work stealing phase --
3990 // the salient assumption here is that any references
3991 // that are in these stolen objects being scanned must
3992 // already have been initialized (else they would not have
3993 // been published), so we do not need to check for
3994 // uninitialized objects before pushing here.
3995 void Par_ConcMarkingClosure::do_oop(oop obj) {
3996 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
3997 HeapWord* addr = (HeapWord*)obj;
3998 // Check if oop points into the CMS generation
3999 // and is not marked
4000 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4001 // a white object ...
4002 // If we manage to "claim" the object, by being the
4003 // first thread to mark it, then we push it on our
4004 // marking stack
4005 if (_bit_map->par_mark(addr)) { // ... now grey
4006 // push on work queue (grey set)
4007 bool simulate_overflow = false;
4008 NOT_PRODUCT(
4009 if (CMSMarkStackOverflowALot &&
4010 _collector->simulate_overflow()) {
4011 // simulate a stack overflow
4012 simulate_overflow = true;
4013 }
4014 )
4015 if (simulate_overflow ||
4016 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4017 // stack overflow
4018 if (PrintCMSStatistics != 0) {
4019 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4020 SIZE_FORMAT, _overflow_stack->capacity());
4021 }
4022 // We cannot assert that the overflow stack is full because
4023 // it may have been emptied since.
4024 assert(simulate_overflow ||
4025 _work_queue->size() == _work_queue->max_elems(),
4026 "Else push should have succeeded");
4027 handle_stack_overflow(addr);
4028 }
4029 } // Else, some other thread got there first
4030 }
4031 }
4033 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4034 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4036 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4037 while (_work_queue->size() > max) {
4038 oop new_oop;
4039 if (_work_queue->pop_local(new_oop)) {
4040 assert(new_oop->is_oop(), "Should be an oop");
4041 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4042 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4043 assert(new_oop->is_parsable(), "Should be parsable");
4044 new_oop->oop_iterate(this); // do_oop() above
4045 }
4046 }
4047 }
4049 // Upon stack overflow, we discard (part of) the stack,
4050 // remembering the least address amongst those discarded
4051 // in CMSCollector's _restart_address.
4052 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4053 // We need to do this under a mutex to prevent other
4054 // workers from interfering with the work done below.
4055 MutexLockerEx ml(_overflow_stack->par_lock(),
4056 Mutex::_no_safepoint_check_flag);
4057 // Remember the least grey address discarded
4058 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4059 _collector->lower_restart_addr(ra);
4060 _overflow_stack->reset(); // discard stack contents
4061 _overflow_stack->expand(); // expand the stack if possible
4062 }
4065 void CMSConcMarkingTask::do_work_steal(int i) {
4066 OopTaskQueue* work_q = work_queue(i);
4067 oop obj_to_scan;
4068 CMSBitMap* bm = &(_collector->_markBitMap);
4069 CMSMarkStack* ovflw = &(_collector->_markStack);
4070 CMSMarkStack* revisit = &(_collector->_revisitStack);
4071 int* seed = _collector->hash_seed(i);
4072 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
4073 while (true) {
4074 cl.trim_queue(0);
4075 assert(work_q->size() == 0, "Should have been emptied above");
4076 if (get_work_from_overflow_stack(ovflw, work_q)) {
4077 // Can't assert below because the work obtained from the
4078 // overflow stack may already have been stolen from us.
4079 // assert(work_q->size() > 0, "Work from overflow stack");
4080 continue;
4081 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4082 assert(obj_to_scan->is_oop(), "Should be an oop");
4083 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4084 obj_to_scan->oop_iterate(&cl);
4085 } else if (terminator()->offer_termination()) {
4086 assert(work_q->size() == 0, "Impossible!");
4087 break;
4088 }
4089 }
4090 }
4092 // This is run by the CMS (coordinator) thread.
4093 void CMSConcMarkingTask::coordinator_yield() {
4094 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4095 "CMS thread should hold CMS token");
4096 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4097 // First give up the locks, then yield, then re-lock
4098 // We should probably use a constructor/destructor idiom to
4099 // do this unlock/lock or modify the MutexUnlocker class to
4100 // serve our purpose. XXX
4101 assert_lock_strong(_bit_map_lock);
4102 _bit_map_lock->unlock();
4103 ConcurrentMarkSweepThread::desynchronize(true);
4104 ConcurrentMarkSweepThread::acknowledge_yield_request();
4105 _collector->stopTimer();
4106 if (PrintCMSStatistics != 0) {
4107 _collector->incrementYields();
4108 }
4109 _collector->icms_wait();
4111 // It is possible for whichever thread initiated the yield request
4112 // not to get a chance to wake up and take the bitmap lock between
4113 // this thread releasing it and reacquiring it. So, while the
4114 // should_yield() flag is on, let's sleep for a bit to give the
4115 // other thread a chance to wake up. The limit imposed on the number
4116 // of iterations is defensive, to avoid any unforseen circumstances
4117 // putting us into an infinite loop. Since it's always been this
4118 // (coordinator_yield()) method that was observed to cause the
4119 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4120 // which is by default non-zero. For the other seven methods that
4121 // also perform the yield operation, as are using a different
4122 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4123 // can enable the sleeping for those methods too, if necessary.
4124 // See 6442774.
4125 //
4126 // We really need to reconsider the synchronization between the GC
4127 // thread and the yield-requesting threads in the future and we
4128 // should really use wait/notify, which is the recommended
4129 // way of doing this type of interaction. Additionally, we should
4130 // consolidate the eight methods that do the yield operation and they
4131 // are almost identical into one for better maintenability and
4132 // readability. See 6445193.
4133 //
4134 // Tony 2006.06.29
4135 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4136 ConcurrentMarkSweepThread::should_yield() &&
4137 !CMSCollector::foregroundGCIsActive(); ++i) {
4138 os::sleep(Thread::current(), 1, false);
4139 ConcurrentMarkSweepThread::acknowledge_yield_request();
4140 }
4142 ConcurrentMarkSweepThread::synchronize(true);
4143 _bit_map_lock->lock_without_safepoint_check();
4144 _collector->startTimer();
4145 }
4147 bool CMSCollector::do_marking_mt(bool asynch) {
4148 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4149 // In the future this would be determined ergonomically, based
4150 // on #cpu's, # active mutator threads (and load), and mutation rate.
4151 int num_workers = ConcGCThreads;
4153 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4154 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4156 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4157 asynch, num_workers /* number requested XXX */,
4158 conc_workers(), task_queues());
4160 // Since the actual number of workers we get may be different
4161 // from the number we requested above, do we need to do anything different
4162 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4163 // class?? XXX
4164 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4165 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4167 // Refs discovery is already non-atomic.
4168 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4169 // Mutate the Refs discovery so it is MT during the
4170 // multi-threaded marking phase.
4171 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4172 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4173 conc_workers()->start_task(&tsk);
4174 while (tsk.yielded()) {
4175 tsk.coordinator_yield();
4176 conc_workers()->continue_task(&tsk);
4177 }
4178 // If the task was aborted, _restart_addr will be non-NULL
4179 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4180 while (_restart_addr != NULL) {
4181 // XXX For now we do not make use of ABORTED state and have not
4182 // yet implemented the right abort semantics (even in the original
4183 // single-threaded CMS case). That needs some more investigation
4184 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4185 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4186 // If _restart_addr is non-NULL, a marking stack overflow
4187 // occurred; we need to do a fresh marking iteration from the
4188 // indicated restart address.
4189 if (_foregroundGCIsActive && asynch) {
4190 // We may be running into repeated stack overflows, having
4191 // reached the limit of the stack size, while making very
4192 // slow forward progress. It may be best to bail out and
4193 // let the foreground collector do its job.
4194 // Clear _restart_addr, so that foreground GC
4195 // works from scratch. This avoids the headache of
4196 // a "rescan" which would otherwise be needed because
4197 // of the dirty mod union table & card table.
4198 _restart_addr = NULL;
4199 return false;
4200 }
4201 // Adjust the task to restart from _restart_addr
4202 tsk.reset(_restart_addr);
4203 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4204 _restart_addr);
4205 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4206 _restart_addr);
4207 _restart_addr = NULL;
4208 // Get the workers going again
4209 conc_workers()->start_task(&tsk);
4210 while (tsk.yielded()) {
4211 tsk.coordinator_yield();
4212 conc_workers()->continue_task(&tsk);
4213 }
4214 }
4215 assert(tsk.completed(), "Inconsistency");
4216 assert(tsk.result() == true, "Inconsistency");
4217 return true;
4218 }
4220 bool CMSCollector::do_marking_st(bool asynch) {
4221 ResourceMark rm;
4222 HandleMark hm;
4224 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4225 &_markStack, &_revisitStack, CMSYield && asynch);
4226 // the last argument to iterate indicates whether the iteration
4227 // should be incremental with periodic yields.
4228 _markBitMap.iterate(&markFromRootsClosure);
4229 // If _restart_addr is non-NULL, a marking stack overflow
4230 // occurred; we need to do a fresh iteration from the
4231 // indicated restart address.
4232 while (_restart_addr != NULL) {
4233 if (_foregroundGCIsActive && asynch) {
4234 // We may be running into repeated stack overflows, having
4235 // reached the limit of the stack size, while making very
4236 // slow forward progress. It may be best to bail out and
4237 // let the foreground collector do its job.
4238 // Clear _restart_addr, so that foreground GC
4239 // works from scratch. This avoids the headache of
4240 // a "rescan" which would otherwise be needed because
4241 // of the dirty mod union table & card table.
4242 _restart_addr = NULL;
4243 return false; // indicating failure to complete marking
4244 }
4245 // Deal with stack overflow:
4246 // we restart marking from _restart_addr
4247 HeapWord* ra = _restart_addr;
4248 markFromRootsClosure.reset(ra);
4249 _restart_addr = NULL;
4250 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4251 }
4252 return true;
4253 }
4255 void CMSCollector::preclean() {
4256 check_correct_thread_executing();
4257 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4258 verify_work_stacks_empty();
4259 verify_overflow_empty();
4260 _abort_preclean = false;
4261 if (CMSPrecleaningEnabled) {
4262 _eden_chunk_index = 0;
4263 size_t used = get_eden_used();
4264 size_t capacity = get_eden_capacity();
4265 // Don't start sampling unless we will get sufficiently
4266 // many samples.
4267 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4268 * CMSScheduleRemarkEdenPenetration)) {
4269 _start_sampling = true;
4270 } else {
4271 _start_sampling = false;
4272 }
4273 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4274 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4275 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4276 }
4277 CMSTokenSync x(true); // is cms thread
4278 if (CMSPrecleaningEnabled) {
4279 sample_eden();
4280 _collectorState = AbortablePreclean;
4281 } else {
4282 _collectorState = FinalMarking;
4283 }
4284 verify_work_stacks_empty();
4285 verify_overflow_empty();
4286 }
4288 // Try and schedule the remark such that young gen
4289 // occupancy is CMSScheduleRemarkEdenPenetration %.
4290 void CMSCollector::abortable_preclean() {
4291 check_correct_thread_executing();
4292 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4293 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4295 // If Eden's current occupancy is below this threshold,
4296 // immediately schedule the remark; else preclean
4297 // past the next scavenge in an effort to
4298 // schedule the pause as described avove. By choosing
4299 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4300 // we will never do an actual abortable preclean cycle.
4301 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4302 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4303 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4304 // We need more smarts in the abortable preclean
4305 // loop below to deal with cases where allocation
4306 // in young gen is very very slow, and our precleaning
4307 // is running a losing race against a horde of
4308 // mutators intent on flooding us with CMS updates
4309 // (dirty cards).
4310 // One, admittedly dumb, strategy is to give up
4311 // after a certain number of abortable precleaning loops
4312 // or after a certain maximum time. We want to make
4313 // this smarter in the next iteration.
4314 // XXX FIX ME!!! YSR
4315 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4316 while (!(should_abort_preclean() ||
4317 ConcurrentMarkSweepThread::should_terminate())) {
4318 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4319 cumworkdone += workdone;
4320 loops++;
4321 // Voluntarily terminate abortable preclean phase if we have
4322 // been at it for too long.
4323 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4324 loops >= CMSMaxAbortablePrecleanLoops) {
4325 if (PrintGCDetails) {
4326 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4327 }
4328 break;
4329 }
4330 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4331 if (PrintGCDetails) {
4332 gclog_or_tty->print(" CMS: abort preclean due to time ");
4333 }
4334 break;
4335 }
4336 // If we are doing little work each iteration, we should
4337 // take a short break.
4338 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4339 // Sleep for some time, waiting for work to accumulate
4340 stopTimer();
4341 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4342 startTimer();
4343 waited++;
4344 }
4345 }
4346 if (PrintCMSStatistics > 0) {
4347 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4348 loops, waited, cumworkdone);
4349 }
4350 }
4351 CMSTokenSync x(true); // is cms thread
4352 if (_collectorState != Idling) {
4353 assert(_collectorState == AbortablePreclean,
4354 "Spontaneous state transition?");
4355 _collectorState = FinalMarking;
4356 } // Else, a foreground collection completed this CMS cycle.
4357 return;
4358 }
4360 // Respond to an Eden sampling opportunity
4361 void CMSCollector::sample_eden() {
4362 // Make sure a young gc cannot sneak in between our
4363 // reading and recording of a sample.
4364 assert(Thread::current()->is_ConcurrentGC_thread(),
4365 "Only the cms thread may collect Eden samples");
4366 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4367 "Should collect samples while holding CMS token");
4368 if (!_start_sampling) {
4369 return;
4370 }
4371 if (_eden_chunk_array) {
4372 if (_eden_chunk_index < _eden_chunk_capacity) {
4373 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4374 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4375 "Unexpected state of Eden");
4376 // We'd like to check that what we just sampled is an oop-start address;
4377 // however, we cannot do that here since the object may not yet have been
4378 // initialized. So we'll instead do the check when we _use_ this sample
4379 // later.
4380 if (_eden_chunk_index == 0 ||
4381 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4382 _eden_chunk_array[_eden_chunk_index-1])
4383 >= CMSSamplingGrain)) {
4384 _eden_chunk_index++; // commit sample
4385 }
4386 }
4387 }
4388 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4389 size_t used = get_eden_used();
4390 size_t capacity = get_eden_capacity();
4391 assert(used <= capacity, "Unexpected state of Eden");
4392 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4393 _abort_preclean = true;
4394 }
4395 }
4396 }
4399 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4400 assert(_collectorState == Precleaning ||
4401 _collectorState == AbortablePreclean, "incorrect state");
4402 ResourceMark rm;
4403 HandleMark hm;
4404 // Do one pass of scrubbing the discovered reference lists
4405 // to remove any reference objects with strongly-reachable
4406 // referents.
4407 if (clean_refs) {
4408 ReferenceProcessor* rp = ref_processor();
4409 CMSPrecleanRefsYieldClosure yield_cl(this);
4410 assert(rp->span().equals(_span), "Spans should be equal");
4411 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4412 &_markStack, &_revisitStack,
4413 true /* preclean */);
4414 CMSDrainMarkingStackClosure complete_trace(this,
4415 _span, &_markBitMap, &_markStack,
4416 &keep_alive, true /* preclean */);
4418 // We don't want this step to interfere with a young
4419 // collection because we don't want to take CPU
4420 // or memory bandwidth away from the young GC threads
4421 // (which may be as many as there are CPUs).
4422 // Note that we don't need to protect ourselves from
4423 // interference with mutators because they can't
4424 // manipulate the discovered reference lists nor affect
4425 // the computed reachability of the referents, the
4426 // only properties manipulated by the precleaning
4427 // of these reference lists.
4428 stopTimer();
4429 CMSTokenSyncWithLocks x(true /* is cms thread */,
4430 bitMapLock());
4431 startTimer();
4432 sample_eden();
4434 // The following will yield to allow foreground
4435 // collection to proceed promptly. XXX YSR:
4436 // The code in this method may need further
4437 // tweaking for better performance and some restructuring
4438 // for cleaner interfaces.
4439 rp->preclean_discovered_references(
4440 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4441 &yield_cl, should_unload_classes());
4442 }
4444 if (clean_survivor) { // preclean the active survivor space(s)
4445 assert(_young_gen->kind() == Generation::DefNew ||
4446 _young_gen->kind() == Generation::ParNew ||
4447 _young_gen->kind() == Generation::ASParNew,
4448 "incorrect type for cast");
4449 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4450 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4451 &_markBitMap, &_modUnionTable,
4452 &_markStack, &_revisitStack,
4453 true /* precleaning phase */);
4454 stopTimer();
4455 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4456 bitMapLock());
4457 startTimer();
4458 unsigned int before_count =
4459 GenCollectedHeap::heap()->total_collections();
4460 SurvivorSpacePrecleanClosure
4461 sss_cl(this, _span, &_markBitMap, &_markStack,
4462 &pam_cl, before_count, CMSYield);
4463 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4464 dng->from()->object_iterate_careful(&sss_cl);
4465 dng->to()->object_iterate_careful(&sss_cl);
4466 }
4467 MarkRefsIntoAndScanClosure
4468 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4469 &_markStack, &_revisitStack, this, CMSYield,
4470 true /* precleaning phase */);
4471 // CAUTION: The following closure has persistent state that may need to
4472 // be reset upon a decrease in the sequence of addresses it
4473 // processes.
4474 ScanMarkedObjectsAgainCarefullyClosure
4475 smoac_cl(this, _span,
4476 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4478 // Preclean dirty cards in ModUnionTable and CardTable using
4479 // appropriate convergence criterion;
4480 // repeat CMSPrecleanIter times unless we find that
4481 // we are losing.
4482 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4483 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4484 "Bad convergence multiplier");
4485 assert(CMSPrecleanThreshold >= 100,
4486 "Unreasonably low CMSPrecleanThreshold");
4488 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4489 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4490 numIter < CMSPrecleanIter;
4491 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4492 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4493 if (CMSPermGenPrecleaningEnabled) {
4494 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4495 }
4496 if (Verbose && PrintGCDetails) {
4497 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4498 }
4499 // Either there are very few dirty cards, so re-mark
4500 // pause will be small anyway, or our pre-cleaning isn't
4501 // that much faster than the rate at which cards are being
4502 // dirtied, so we might as well stop and re-mark since
4503 // precleaning won't improve our re-mark time by much.
4504 if (curNumCards <= CMSPrecleanThreshold ||
4505 (numIter > 0 &&
4506 (curNumCards * CMSPrecleanDenominator >
4507 lastNumCards * CMSPrecleanNumerator))) {
4508 numIter++;
4509 cumNumCards += curNumCards;
4510 break;
4511 }
4512 }
4513 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4514 if (CMSPermGenPrecleaningEnabled) {
4515 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4516 }
4517 cumNumCards += curNumCards;
4518 if (PrintGCDetails && PrintCMSStatistics != 0) {
4519 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4520 curNumCards, cumNumCards, numIter);
4521 }
4522 return cumNumCards; // as a measure of useful work done
4523 }
4525 // PRECLEANING NOTES:
4526 // Precleaning involves:
4527 // . reading the bits of the modUnionTable and clearing the set bits.
4528 // . For the cards corresponding to the set bits, we scan the
4529 // objects on those cards. This means we need the free_list_lock
4530 // so that we can safely iterate over the CMS space when scanning
4531 // for oops.
4532 // . When we scan the objects, we'll be both reading and setting
4533 // marks in the marking bit map, so we'll need the marking bit map.
4534 // . For protecting _collector_state transitions, we take the CGC_lock.
4535 // Note that any races in the reading of of card table entries by the
4536 // CMS thread on the one hand and the clearing of those entries by the
4537 // VM thread or the setting of those entries by the mutator threads on the
4538 // other are quite benign. However, for efficiency it makes sense to keep
4539 // the VM thread from racing with the CMS thread while the latter is
4540 // dirty card info to the modUnionTable. We therefore also use the
4541 // CGC_lock to protect the reading of the card table and the mod union
4542 // table by the CM thread.
4543 // . We run concurrently with mutator updates, so scanning
4544 // needs to be done carefully -- we should not try to scan
4545 // potentially uninitialized objects.
4546 //
4547 // Locking strategy: While holding the CGC_lock, we scan over and
4548 // reset a maximal dirty range of the mod union / card tables, then lock
4549 // the free_list_lock and bitmap lock to do a full marking, then
4550 // release these locks; and repeat the cycle. This allows for a
4551 // certain amount of fairness in the sharing of these locks between
4552 // the CMS collector on the one hand, and the VM thread and the
4553 // mutators on the other.
4555 // NOTE: preclean_mod_union_table() and preclean_card_table()
4556 // further below are largely identical; if you need to modify
4557 // one of these methods, please check the other method too.
4559 size_t CMSCollector::preclean_mod_union_table(
4560 ConcurrentMarkSweepGeneration* gen,
4561 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4562 verify_work_stacks_empty();
4563 verify_overflow_empty();
4565 // Turn off checking for this method but turn it back on
4566 // selectively. There are yield points in this method
4567 // but it is difficult to turn the checking off just around
4568 // the yield points. It is simpler to selectively turn
4569 // it on.
4570 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4572 // strategy: starting with the first card, accumulate contiguous
4573 // ranges of dirty cards; clear these cards, then scan the region
4574 // covered by these cards.
4576 // Since all of the MUT is committed ahead, we can just use
4577 // that, in case the generations expand while we are precleaning.
4578 // It might also be fine to just use the committed part of the
4579 // generation, but we might potentially miss cards when the
4580 // generation is rapidly expanding while we are in the midst
4581 // of precleaning.
4582 HeapWord* startAddr = gen->reserved().start();
4583 HeapWord* endAddr = gen->reserved().end();
4585 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4587 size_t numDirtyCards, cumNumDirtyCards;
4588 HeapWord *nextAddr, *lastAddr;
4589 for (cumNumDirtyCards = numDirtyCards = 0,
4590 nextAddr = lastAddr = startAddr;
4591 nextAddr < endAddr;
4592 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4594 ResourceMark rm;
4595 HandleMark hm;
4597 MemRegion dirtyRegion;
4598 {
4599 stopTimer();
4600 // Potential yield point
4601 CMSTokenSync ts(true);
4602 startTimer();
4603 sample_eden();
4604 // Get dirty region starting at nextOffset (inclusive),
4605 // simultaneously clearing it.
4606 dirtyRegion =
4607 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4608 assert(dirtyRegion.start() >= nextAddr,
4609 "returned region inconsistent?");
4610 }
4611 // Remember where the next search should begin.
4612 // The returned region (if non-empty) is a right open interval,
4613 // so lastOffset is obtained from the right end of that
4614 // interval.
4615 lastAddr = dirtyRegion.end();
4616 // Should do something more transparent and less hacky XXX
4617 numDirtyCards =
4618 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4620 // We'll scan the cards in the dirty region (with periodic
4621 // yields for foreground GC as needed).
4622 if (!dirtyRegion.is_empty()) {
4623 assert(numDirtyCards > 0, "consistency check");
4624 HeapWord* stop_point = NULL;
4625 stopTimer();
4626 // Potential yield point
4627 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4628 bitMapLock());
4629 startTimer();
4630 {
4631 verify_work_stacks_empty();
4632 verify_overflow_empty();
4633 sample_eden();
4634 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4635 stop_point =
4636 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4637 }
4638 if (stop_point != NULL) {
4639 // The careful iteration stopped early either because it found an
4640 // uninitialized object, or because we were in the midst of an
4641 // "abortable preclean", which should now be aborted. Redirty
4642 // the bits corresponding to the partially-scanned or unscanned
4643 // cards. We'll either restart at the next block boundary or
4644 // abort the preclean.
4645 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4646 (_collectorState == AbortablePreclean && should_abort_preclean()),
4647 "Unparsable objects should only be in perm gen.");
4648 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4649 if (should_abort_preclean()) {
4650 break; // out of preclean loop
4651 } else {
4652 // Compute the next address at which preclean should pick up;
4653 // might need bitMapLock in order to read P-bits.
4654 lastAddr = next_card_start_after_block(stop_point);
4655 }
4656 }
4657 } else {
4658 assert(lastAddr == endAddr, "consistency check");
4659 assert(numDirtyCards == 0, "consistency check");
4660 break;
4661 }
4662 }
4663 verify_work_stacks_empty();
4664 verify_overflow_empty();
4665 return cumNumDirtyCards;
4666 }
4668 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4669 // below are largely identical; if you need to modify
4670 // one of these methods, please check the other method too.
4672 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4673 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4674 // strategy: it's similar to precleamModUnionTable above, in that
4675 // we accumulate contiguous ranges of dirty cards, mark these cards
4676 // precleaned, then scan the region covered by these cards.
4677 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4678 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4680 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4682 size_t numDirtyCards, cumNumDirtyCards;
4683 HeapWord *lastAddr, *nextAddr;
4685 for (cumNumDirtyCards = numDirtyCards = 0,
4686 nextAddr = lastAddr = startAddr;
4687 nextAddr < endAddr;
4688 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4690 ResourceMark rm;
4691 HandleMark hm;
4693 MemRegion dirtyRegion;
4694 {
4695 // See comments in "Precleaning notes" above on why we
4696 // do this locking. XXX Could the locking overheads be
4697 // too high when dirty cards are sparse? [I don't think so.]
4698 stopTimer();
4699 CMSTokenSync x(true); // is cms thread
4700 startTimer();
4701 sample_eden();
4702 // Get and clear dirty region from card table
4703 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4704 MemRegion(nextAddr, endAddr),
4705 true,
4706 CardTableModRefBS::precleaned_card_val());
4708 assert(dirtyRegion.start() >= nextAddr,
4709 "returned region inconsistent?");
4710 }
4711 lastAddr = dirtyRegion.end();
4712 numDirtyCards =
4713 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4715 if (!dirtyRegion.is_empty()) {
4716 stopTimer();
4717 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4718 startTimer();
4719 sample_eden();
4720 verify_work_stacks_empty();
4721 verify_overflow_empty();
4722 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4723 HeapWord* stop_point =
4724 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4725 if (stop_point != NULL) {
4726 // The careful iteration stopped early because it found an
4727 // uninitialized object. Redirty the bits corresponding to the
4728 // partially-scanned or unscanned cards, and start again at the
4729 // next block boundary.
4730 assert(CMSPermGenPrecleaningEnabled ||
4731 (_collectorState == AbortablePreclean && should_abort_preclean()),
4732 "Unparsable objects should only be in perm gen.");
4733 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4734 if (should_abort_preclean()) {
4735 break; // out of preclean loop
4736 } else {
4737 // Compute the next address at which preclean should pick up.
4738 lastAddr = next_card_start_after_block(stop_point);
4739 }
4740 }
4741 } else {
4742 break;
4743 }
4744 }
4745 verify_work_stacks_empty();
4746 verify_overflow_empty();
4747 return cumNumDirtyCards;
4748 }
4750 void CMSCollector::checkpointRootsFinal(bool asynch,
4751 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4752 assert(_collectorState == FinalMarking, "incorrect state transition?");
4753 check_correct_thread_executing();
4754 // world is stopped at this checkpoint
4755 assert(SafepointSynchronize::is_at_safepoint(),
4756 "world should be stopped");
4757 verify_work_stacks_empty();
4758 verify_overflow_empty();
4760 SpecializationStats::clear();
4761 if (PrintGCDetails) {
4762 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4763 _young_gen->used() / K,
4764 _young_gen->capacity() / K);
4765 }
4766 if (asynch) {
4767 if (CMSScavengeBeforeRemark) {
4768 GenCollectedHeap* gch = GenCollectedHeap::heap();
4769 // Temporarily set flag to false, GCH->do_collection will
4770 // expect it to be false and set to true
4771 FlagSetting fl(gch->_is_gc_active, false);
4772 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4773 PrintGCDetails && Verbose, true, gclog_or_tty);)
4774 int level = _cmsGen->level() - 1;
4775 if (level >= 0) {
4776 gch->do_collection(true, // full (i.e. force, see below)
4777 false, // !clear_all_soft_refs
4778 0, // size
4779 false, // is_tlab
4780 level // max_level
4781 );
4782 }
4783 }
4784 FreelistLocker x(this);
4785 MutexLockerEx y(bitMapLock(),
4786 Mutex::_no_safepoint_check_flag);
4787 assert(!init_mark_was_synchronous, "but that's impossible!");
4788 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4789 } else {
4790 // already have all the locks
4791 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4792 init_mark_was_synchronous);
4793 }
4794 verify_work_stacks_empty();
4795 verify_overflow_empty();
4796 SpecializationStats::print();
4797 }
4799 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4800 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4802 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4804 assert(haveFreelistLocks(), "must have free list locks");
4805 assert_lock_strong(bitMapLock());
4807 if (UseAdaptiveSizePolicy) {
4808 size_policy()->checkpoint_roots_final_begin();
4809 }
4811 ResourceMark rm;
4812 HandleMark hm;
4814 GenCollectedHeap* gch = GenCollectedHeap::heap();
4816 if (should_unload_classes()) {
4817 CodeCache::gc_prologue();
4818 }
4819 assert(haveFreelistLocks(), "must have free list locks");
4820 assert_lock_strong(bitMapLock());
4822 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4823 if (!init_mark_was_synchronous) {
4824 // We might assume that we need not fill TLAB's when
4825 // CMSScavengeBeforeRemark is set, because we may have just done
4826 // a scavenge which would have filled all TLAB's -- and besides
4827 // Eden would be empty. This however may not always be the case --
4828 // for instance although we asked for a scavenge, it may not have
4829 // happened because of a JNI critical section. We probably need
4830 // a policy for deciding whether we can in that case wait until
4831 // the critical section releases and then do the remark following
4832 // the scavenge, and skip it here. In the absence of that policy,
4833 // or of an indication of whether the scavenge did indeed occur,
4834 // we cannot rely on TLAB's having been filled and must do
4835 // so here just in case a scavenge did not happen.
4836 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4837 // Update the saved marks which may affect the root scans.
4838 gch->save_marks();
4840 {
4841 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4843 // Note on the role of the mod union table:
4844 // Since the marker in "markFromRoots" marks concurrently with
4845 // mutators, it is possible for some reachable objects not to have been
4846 // scanned. For instance, an only reference to an object A was
4847 // placed in object B after the marker scanned B. Unless B is rescanned,
4848 // A would be collected. Such updates to references in marked objects
4849 // are detected via the mod union table which is the set of all cards
4850 // dirtied since the first checkpoint in this GC cycle and prior to
4851 // the most recent young generation GC, minus those cleaned up by the
4852 // concurrent precleaning.
4853 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4854 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4855 do_remark_parallel();
4856 } else {
4857 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4858 gclog_or_tty);
4859 do_remark_non_parallel();
4860 }
4861 }
4862 } else {
4863 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4864 // The initial mark was stop-world, so there's no rescanning to
4865 // do; go straight on to the next step below.
4866 }
4867 verify_work_stacks_empty();
4868 verify_overflow_empty();
4870 {
4871 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4872 refProcessingWork(asynch, clear_all_soft_refs);
4873 }
4874 verify_work_stacks_empty();
4875 verify_overflow_empty();
4877 if (should_unload_classes()) {
4878 CodeCache::gc_epilogue();
4879 }
4881 // If we encountered any (marking stack / work queue) overflow
4882 // events during the current CMS cycle, take appropriate
4883 // remedial measures, where possible, so as to try and avoid
4884 // recurrence of that condition.
4885 assert(_markStack.isEmpty(), "No grey objects");
4886 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4887 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4888 if (ser_ovflw > 0) {
4889 if (PrintCMSStatistics != 0) {
4890 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4891 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4892 ", kac_preclean="SIZE_FORMAT")",
4893 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4894 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4895 }
4896 _markStack.expand();
4897 _ser_pmc_remark_ovflw = 0;
4898 _ser_pmc_preclean_ovflw = 0;
4899 _ser_kac_preclean_ovflw = 0;
4900 _ser_kac_ovflw = 0;
4901 }
4902 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4903 if (PrintCMSStatistics != 0) {
4904 gclog_or_tty->print_cr("Work queue overflow (benign) "
4905 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4906 _par_pmc_remark_ovflw, _par_kac_ovflw);
4907 }
4908 _par_pmc_remark_ovflw = 0;
4909 _par_kac_ovflw = 0;
4910 }
4911 if (PrintCMSStatistics != 0) {
4912 if (_markStack._hit_limit > 0) {
4913 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4914 _markStack._hit_limit);
4915 }
4916 if (_markStack._failed_double > 0) {
4917 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4918 " current capacity "SIZE_FORMAT,
4919 _markStack._failed_double,
4920 _markStack.capacity());
4921 }
4922 }
4923 _markStack._hit_limit = 0;
4924 _markStack._failed_double = 0;
4926 // Check that all the klasses have been checked
4927 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
4929 if ((VerifyAfterGC || VerifyDuringGC) &&
4930 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4931 verify_after_remark();
4932 }
4934 // Change under the freelistLocks.
4935 _collectorState = Sweeping;
4936 // Call isAllClear() under bitMapLock
4937 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4938 " final marking");
4939 if (UseAdaptiveSizePolicy) {
4940 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4941 }
4942 }
4944 // Parallel remark task
4945 class CMSParRemarkTask: public AbstractGangTask {
4946 CMSCollector* _collector;
4947 WorkGang* _workers;
4948 int _n_workers;
4949 CompactibleFreeListSpace* _cms_space;
4950 CompactibleFreeListSpace* _perm_space;
4952 // The per-thread work queues, available here for stealing.
4953 OopTaskQueueSet* _task_queues;
4954 ParallelTaskTerminator _term;
4956 public:
4957 CMSParRemarkTask(CMSCollector* collector,
4958 CompactibleFreeListSpace* cms_space,
4959 CompactibleFreeListSpace* perm_space,
4960 int n_workers, WorkGang* workers,
4961 OopTaskQueueSet* task_queues):
4962 AbstractGangTask("Rescan roots and grey objects in parallel"),
4963 _collector(collector),
4964 _cms_space(cms_space), _perm_space(perm_space),
4965 _n_workers(n_workers),
4966 _workers(workers),
4967 _task_queues(task_queues),
4968 _term(workers->total_workers(), task_queues) { }
4970 OopTaskQueueSet* task_queues() { return _task_queues; }
4972 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4974 ParallelTaskTerminator* terminator() { return &_term; }
4976 void work(int i);
4978 private:
4979 // Work method in support of parallel rescan ... of young gen spaces
4980 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4981 ContiguousSpace* space,
4982 HeapWord** chunk_array, size_t chunk_top);
4984 // ... of dirty cards in old space
4985 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4986 Par_MarkRefsIntoAndScanClosure* cl);
4988 // ... work stealing for the above
4989 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4990 };
4992 void CMSParRemarkTask::work(int i) {
4993 elapsedTimer _timer;
4994 ResourceMark rm;
4995 HandleMark hm;
4997 // ---------- rescan from roots --------------
4998 _timer.start();
4999 GenCollectedHeap* gch = GenCollectedHeap::heap();
5000 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5001 _collector->_span, _collector->ref_processor(),
5002 &(_collector->_markBitMap),
5003 work_queue(i), &(_collector->_revisitStack));
5005 // Rescan young gen roots first since these are likely
5006 // coarsely partitioned and may, on that account, constitute
5007 // the critical path; thus, it's best to start off that
5008 // work first.
5009 // ---------- young gen roots --------------
5010 {
5011 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5012 EdenSpace* eden_space = dng->eden();
5013 ContiguousSpace* from_space = dng->from();
5014 ContiguousSpace* to_space = dng->to();
5016 HeapWord** eca = _collector->_eden_chunk_array;
5017 size_t ect = _collector->_eden_chunk_index;
5018 HeapWord** sca = _collector->_survivor_chunk_array;
5019 size_t sct = _collector->_survivor_chunk_index;
5021 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5022 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5024 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
5025 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
5026 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
5028 _timer.stop();
5029 if (PrintCMSStatistics != 0) {
5030 gclog_or_tty->print_cr(
5031 "Finished young gen rescan work in %dth thread: %3.3f sec",
5032 i, _timer.seconds());
5033 }
5034 }
5036 // ---------- remaining roots --------------
5037 _timer.reset();
5038 _timer.start();
5039 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5040 false, // yg was scanned above
5041 false, // this is parallel code
5042 true, // collecting perm gen
5043 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5044 &par_mrias_cl,
5045 true, // walk all of code cache if (so & SO_CodeCache)
5046 NULL);
5047 assert(_collector->should_unload_classes()
5048 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5049 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5050 _timer.stop();
5051 if (PrintCMSStatistics != 0) {
5052 gclog_or_tty->print_cr(
5053 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5054 i, _timer.seconds());
5055 }
5057 // ---------- rescan dirty cards ------------
5058 _timer.reset();
5059 _timer.start();
5061 // Do the rescan tasks for each of the two spaces
5062 // (cms_space and perm_space) in turn.
5063 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5064 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5065 _timer.stop();
5066 if (PrintCMSStatistics != 0) {
5067 gclog_or_tty->print_cr(
5068 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5069 i, _timer.seconds());
5070 }
5072 // ---------- steal work from other threads ...
5073 // ---------- ... and drain overflow list.
5074 _timer.reset();
5075 _timer.start();
5076 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5077 _timer.stop();
5078 if (PrintCMSStatistics != 0) {
5079 gclog_or_tty->print_cr(
5080 "Finished work stealing in %dth thread: %3.3f sec",
5081 i, _timer.seconds());
5082 }
5083 }
5085 void
5086 CMSParRemarkTask::do_young_space_rescan(int i,
5087 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5088 HeapWord** chunk_array, size_t chunk_top) {
5089 // Until all tasks completed:
5090 // . claim an unclaimed task
5091 // . compute region boundaries corresponding to task claimed
5092 // using chunk_array
5093 // . par_oop_iterate(cl) over that region
5095 ResourceMark rm;
5096 HandleMark hm;
5098 SequentialSubTasksDone* pst = space->par_seq_tasks();
5099 assert(pst->valid(), "Uninitialized use?");
5101 int nth_task = 0;
5102 int n_tasks = pst->n_tasks();
5104 HeapWord *start, *end;
5105 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5106 // We claimed task # nth_task; compute its boundaries.
5107 if (chunk_top == 0) { // no samples were taken
5108 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5109 start = space->bottom();
5110 end = space->top();
5111 } else if (nth_task == 0) {
5112 start = space->bottom();
5113 end = chunk_array[nth_task];
5114 } else if (nth_task < (jint)chunk_top) {
5115 assert(nth_task >= 1, "Control point invariant");
5116 start = chunk_array[nth_task - 1];
5117 end = chunk_array[nth_task];
5118 } else {
5119 assert(nth_task == (jint)chunk_top, "Control point invariant");
5120 start = chunk_array[chunk_top - 1];
5121 end = space->top();
5122 }
5123 MemRegion mr(start, end);
5124 // Verify that mr is in space
5125 assert(mr.is_empty() || space->used_region().contains(mr),
5126 "Should be in space");
5127 // Verify that "start" is an object boundary
5128 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5129 "Should be an oop");
5130 space->par_oop_iterate(mr, cl);
5131 }
5132 pst->all_tasks_completed();
5133 }
5135 void
5136 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5137 CompactibleFreeListSpace* sp, int i,
5138 Par_MarkRefsIntoAndScanClosure* cl) {
5139 // Until all tasks completed:
5140 // . claim an unclaimed task
5141 // . compute region boundaries corresponding to task claimed
5142 // . transfer dirty bits ct->mut for that region
5143 // . apply rescanclosure to dirty mut bits for that region
5145 ResourceMark rm;
5146 HandleMark hm;
5148 OopTaskQueue* work_q = work_queue(i);
5149 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5150 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5151 // CAUTION: This closure has state that persists across calls to
5152 // the work method dirty_range_iterate_clear() in that it has
5153 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5154 // use of that state in the imbedded UpwardsObjectClosure instance
5155 // assumes that the cards are always iterated (even if in parallel
5156 // by several threads) in monotonically increasing order per each
5157 // thread. This is true of the implementation below which picks
5158 // card ranges (chunks) in monotonically increasing order globally
5159 // and, a-fortiori, in monotonically increasing order per thread
5160 // (the latter order being a subsequence of the former).
5161 // If the work code below is ever reorganized into a more chaotic
5162 // work-partitioning form than the current "sequential tasks"
5163 // paradigm, the use of that persistent state will have to be
5164 // revisited and modified appropriately. See also related
5165 // bug 4756801 work on which should examine this code to make
5166 // sure that the changes there do not run counter to the
5167 // assumptions made here and necessary for correctness and
5168 // efficiency. Note also that this code might yield inefficient
5169 // behaviour in the case of very large objects that span one or
5170 // more work chunks. Such objects would potentially be scanned
5171 // several times redundantly. Work on 4756801 should try and
5172 // address that performance anomaly if at all possible. XXX
5173 MemRegion full_span = _collector->_span;
5174 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5175 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5176 MarkFromDirtyCardsClosure
5177 greyRescanClosure(_collector, full_span, // entire span of interest
5178 sp, bm, work_q, rs, cl);
5180 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5181 assert(pst->valid(), "Uninitialized use?");
5182 int nth_task = 0;
5183 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5184 MemRegion span = sp->used_region();
5185 HeapWord* start_addr = span.start();
5186 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5187 alignment);
5188 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5189 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5190 start_addr, "Check alignment");
5191 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5192 chunk_size, "Check alignment");
5194 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5195 // Having claimed the nth_task, compute corresponding mem-region,
5196 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5197 // The alignment restriction ensures that we do not need any
5198 // synchronization with other gang-workers while setting or
5199 // clearing bits in thus chunk of the MUT.
5200 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5201 start_addr + (nth_task+1)*chunk_size);
5202 // The last chunk's end might be way beyond end of the
5203 // used region. In that case pull back appropriately.
5204 if (this_span.end() > end_addr) {
5205 this_span.set_end(end_addr);
5206 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5207 }
5208 // Iterate over the dirty cards covering this chunk, marking them
5209 // precleaned, and setting the corresponding bits in the mod union
5210 // table. Since we have been careful to partition at Card and MUT-word
5211 // boundaries no synchronization is needed between parallel threads.
5212 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5213 &modUnionClosure);
5215 // Having transferred these marks into the modUnionTable,
5216 // rescan the marked objects on the dirty cards in the modUnionTable.
5217 // Even if this is at a synchronous collection, the initial marking
5218 // may have been done during an asynchronous collection so there
5219 // may be dirty bits in the mod-union table.
5220 _collector->_modUnionTable.dirty_range_iterate_clear(
5221 this_span, &greyRescanClosure);
5222 _collector->_modUnionTable.verifyNoOneBitsInRange(
5223 this_span.start(),
5224 this_span.end());
5225 }
5226 pst->all_tasks_completed(); // declare that i am done
5227 }
5229 // . see if we can share work_queues with ParNew? XXX
5230 void
5231 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5232 int* seed) {
5233 OopTaskQueue* work_q = work_queue(i);
5234 NOT_PRODUCT(int num_steals = 0;)
5235 oop obj_to_scan;
5236 CMSBitMap* bm = &(_collector->_markBitMap);
5238 while (true) {
5239 // Completely finish any left over work from (an) earlier round(s)
5240 cl->trim_queue(0);
5241 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5242 (size_t)ParGCDesiredObjsFromOverflowList);
5243 // Now check if there's any work in the overflow list
5244 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5245 work_q)) {
5246 // found something in global overflow list;
5247 // not yet ready to go stealing work from others.
5248 // We'd like to assert(work_q->size() != 0, ...)
5249 // because we just took work from the overflow list,
5250 // but of course we can't since all of that could have
5251 // been already stolen from us.
5252 // "He giveth and He taketh away."
5253 continue;
5254 }
5255 // Verify that we have no work before we resort to stealing
5256 assert(work_q->size() == 0, "Have work, shouldn't steal");
5257 // Try to steal from other queues that have work
5258 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5259 NOT_PRODUCT(num_steals++;)
5260 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5261 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5262 // Do scanning work
5263 obj_to_scan->oop_iterate(cl);
5264 // Loop around, finish this work, and try to steal some more
5265 } else if (terminator()->offer_termination()) {
5266 break; // nirvana from the infinite cycle
5267 }
5268 }
5269 NOT_PRODUCT(
5270 if (PrintCMSStatistics != 0) {
5271 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5272 }
5273 )
5274 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5275 "Else our work is not yet done");
5276 }
5278 // Return a thread-local PLAB recording array, as appropriate.
5279 void* CMSCollector::get_data_recorder(int thr_num) {
5280 if (_survivor_plab_array != NULL &&
5281 (CMSPLABRecordAlways ||
5282 (_collectorState > Marking && _collectorState < FinalMarking))) {
5283 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5284 ChunkArray* ca = &_survivor_plab_array[thr_num];
5285 ca->reset(); // clear it so that fresh data is recorded
5286 return (void*) ca;
5287 } else {
5288 return NULL;
5289 }
5290 }
5292 // Reset all the thread-local PLAB recording arrays
5293 void CMSCollector::reset_survivor_plab_arrays() {
5294 for (uint i = 0; i < ParallelGCThreads; i++) {
5295 _survivor_plab_array[i].reset();
5296 }
5297 }
5299 // Merge the per-thread plab arrays into the global survivor chunk
5300 // array which will provide the partitioning of the survivor space
5301 // for CMS rescan.
5302 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5303 assert(_survivor_plab_array != NULL, "Error");
5304 assert(_survivor_chunk_array != NULL, "Error");
5305 assert(_collectorState == FinalMarking, "Error");
5306 for (uint j = 0; j < ParallelGCThreads; j++) {
5307 _cursor[j] = 0;
5308 }
5309 HeapWord* top = surv->top();
5310 size_t i;
5311 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5312 HeapWord* min_val = top; // Higher than any PLAB address
5313 uint min_tid = 0; // position of min_val this round
5314 for (uint j = 0; j < ParallelGCThreads; j++) {
5315 ChunkArray* cur_sca = &_survivor_plab_array[j];
5316 if (_cursor[j] == cur_sca->end()) {
5317 continue;
5318 }
5319 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5320 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5321 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5322 if (cur_val < min_val) {
5323 min_tid = j;
5324 min_val = cur_val;
5325 } else {
5326 assert(cur_val < top, "All recorded addresses should be less");
5327 }
5328 }
5329 // At this point min_val and min_tid are respectively
5330 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5331 // and the thread (j) that witnesses that address.
5332 // We record this address in the _survivor_chunk_array[i]
5333 // and increment _cursor[min_tid] prior to the next round i.
5334 if (min_val == top) {
5335 break;
5336 }
5337 _survivor_chunk_array[i] = min_val;
5338 _cursor[min_tid]++;
5339 }
5340 // We are all done; record the size of the _survivor_chunk_array
5341 _survivor_chunk_index = i; // exclusive: [0, i)
5342 if (PrintCMSStatistics > 0) {
5343 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5344 }
5345 // Verify that we used up all the recorded entries
5346 #ifdef ASSERT
5347 size_t total = 0;
5348 for (uint j = 0; j < ParallelGCThreads; j++) {
5349 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5350 total += _cursor[j];
5351 }
5352 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5353 // Check that the merged array is in sorted order
5354 if (total > 0) {
5355 for (size_t i = 0; i < total - 1; i++) {
5356 if (PrintCMSStatistics > 0) {
5357 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5358 i, _survivor_chunk_array[i]);
5359 }
5360 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5361 "Not sorted");
5362 }
5363 }
5364 #endif // ASSERT
5365 }
5367 // Set up the space's par_seq_tasks structure for work claiming
5368 // for parallel rescan of young gen.
5369 // See ParRescanTask where this is currently used.
5370 void
5371 CMSCollector::
5372 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5373 assert(n_threads > 0, "Unexpected n_threads argument");
5374 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5376 // Eden space
5377 {
5378 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5379 assert(!pst->valid(), "Clobbering existing data?");
5380 // Each valid entry in [0, _eden_chunk_index) represents a task.
5381 size_t n_tasks = _eden_chunk_index + 1;
5382 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5383 pst->set_par_threads(n_threads);
5384 pst->set_n_tasks((int)n_tasks);
5385 }
5387 // Merge the survivor plab arrays into _survivor_chunk_array
5388 if (_survivor_plab_array != NULL) {
5389 merge_survivor_plab_arrays(dng->from());
5390 } else {
5391 assert(_survivor_chunk_index == 0, "Error");
5392 }
5394 // To space
5395 {
5396 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5397 assert(!pst->valid(), "Clobbering existing data?");
5398 pst->set_par_threads(n_threads);
5399 pst->set_n_tasks(1);
5400 assert(pst->valid(), "Error");
5401 }
5403 // From space
5404 {
5405 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5406 assert(!pst->valid(), "Clobbering existing data?");
5407 size_t n_tasks = _survivor_chunk_index + 1;
5408 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5409 pst->set_par_threads(n_threads);
5410 pst->set_n_tasks((int)n_tasks);
5411 assert(pst->valid(), "Error");
5412 }
5413 }
5415 // Parallel version of remark
5416 void CMSCollector::do_remark_parallel() {
5417 GenCollectedHeap* gch = GenCollectedHeap::heap();
5418 WorkGang* workers = gch->workers();
5419 assert(workers != NULL, "Need parallel worker threads.");
5420 int n_workers = workers->total_workers();
5421 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5422 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5424 CMSParRemarkTask tsk(this,
5425 cms_space, perm_space,
5426 n_workers, workers, task_queues());
5428 // Set up for parallel process_strong_roots work.
5429 gch->set_par_threads(n_workers);
5430 // We won't be iterating over the cards in the card table updating
5431 // the younger_gen cards, so we shouldn't call the following else
5432 // the verification code as well as subsequent younger_refs_iterate
5433 // code would get confused. XXX
5434 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5436 // The young gen rescan work will not be done as part of
5437 // process_strong_roots (which currently doesn't knw how to
5438 // parallelize such a scan), but rather will be broken up into
5439 // a set of parallel tasks (via the sampling that the [abortable]
5440 // preclean phase did of EdenSpace, plus the [two] tasks of
5441 // scanning the [two] survivor spaces. Further fine-grain
5442 // parallelization of the scanning of the survivor spaces
5443 // themselves, and of precleaning of the younger gen itself
5444 // is deferred to the future.
5445 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5447 // The dirty card rescan work is broken up into a "sequence"
5448 // of parallel tasks (per constituent space) that are dynamically
5449 // claimed by the parallel threads.
5450 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5451 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5453 // It turns out that even when we're using 1 thread, doing the work in a
5454 // separate thread causes wide variance in run times. We can't help this
5455 // in the multi-threaded case, but we special-case n=1 here to get
5456 // repeatable measurements of the 1-thread overhead of the parallel code.
5457 if (n_workers > 1) {
5458 // Make refs discovery MT-safe
5459 ReferenceProcessorMTMutator mt(ref_processor(), true);
5460 GenCollectedHeap::StrongRootsScope srs(gch);
5461 workers->run_task(&tsk);
5462 } else {
5463 GenCollectedHeap::StrongRootsScope srs(gch);
5464 tsk.work(0);
5465 }
5466 gch->set_par_threads(0); // 0 ==> non-parallel.
5467 // restore, single-threaded for now, any preserved marks
5468 // as a result of work_q overflow
5469 restore_preserved_marks_if_any();
5470 }
5472 // Non-parallel version of remark
5473 void CMSCollector::do_remark_non_parallel() {
5474 ResourceMark rm;
5475 HandleMark hm;
5476 GenCollectedHeap* gch = GenCollectedHeap::heap();
5477 MarkRefsIntoAndScanClosure
5478 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5479 &_markStack, &_revisitStack, this,
5480 false /* should_yield */, false /* not precleaning */);
5481 MarkFromDirtyCardsClosure
5482 markFromDirtyCardsClosure(this, _span,
5483 NULL, // space is set further below
5484 &_markBitMap, &_markStack, &_revisitStack,
5485 &mrias_cl);
5486 {
5487 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5488 // Iterate over the dirty cards, setting the corresponding bits in the
5489 // mod union table.
5490 {
5491 ModUnionClosure modUnionClosure(&_modUnionTable);
5492 _ct->ct_bs()->dirty_card_iterate(
5493 _cmsGen->used_region(),
5494 &modUnionClosure);
5495 _ct->ct_bs()->dirty_card_iterate(
5496 _permGen->used_region(),
5497 &modUnionClosure);
5498 }
5499 // Having transferred these marks into the modUnionTable, we just need
5500 // to rescan the marked objects on the dirty cards in the modUnionTable.
5501 // The initial marking may have been done during an asynchronous
5502 // collection so there may be dirty bits in the mod-union table.
5503 const int alignment =
5504 CardTableModRefBS::card_size * BitsPerWord;
5505 {
5506 // ... First handle dirty cards in CMS gen
5507 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5508 MemRegion ur = _cmsGen->used_region();
5509 HeapWord* lb = ur.start();
5510 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5511 MemRegion cms_span(lb, ub);
5512 _modUnionTable.dirty_range_iterate_clear(cms_span,
5513 &markFromDirtyCardsClosure);
5514 verify_work_stacks_empty();
5515 if (PrintCMSStatistics != 0) {
5516 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5517 markFromDirtyCardsClosure.num_dirty_cards());
5518 }
5519 }
5520 {
5521 // .. and then repeat for dirty cards in perm gen
5522 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5523 MemRegion ur = _permGen->used_region();
5524 HeapWord* lb = ur.start();
5525 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5526 MemRegion perm_span(lb, ub);
5527 _modUnionTable.dirty_range_iterate_clear(perm_span,
5528 &markFromDirtyCardsClosure);
5529 verify_work_stacks_empty();
5530 if (PrintCMSStatistics != 0) {
5531 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5532 markFromDirtyCardsClosure.num_dirty_cards());
5533 }
5534 }
5535 }
5536 if (VerifyDuringGC &&
5537 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5538 HandleMark hm; // Discard invalid handles created during verification
5539 Universe::verify(true);
5540 }
5541 {
5542 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5544 verify_work_stacks_empty();
5546 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5547 GenCollectedHeap::StrongRootsScope srs(gch);
5548 gch->gen_process_strong_roots(_cmsGen->level(),
5549 true, // younger gens as roots
5550 false, // use the local StrongRootsScope
5551 true, // collecting perm gen
5552 SharedHeap::ScanningOption(roots_scanning_options()),
5553 &mrias_cl,
5554 true, // walk code active on stacks
5555 NULL);
5556 assert(should_unload_classes()
5557 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5558 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5559 }
5560 verify_work_stacks_empty();
5561 // Restore evacuated mark words, if any, used for overflow list links
5562 if (!CMSOverflowEarlyRestoration) {
5563 restore_preserved_marks_if_any();
5564 }
5565 verify_overflow_empty();
5566 }
5568 ////////////////////////////////////////////////////////
5569 // Parallel Reference Processing Task Proxy Class
5570 ////////////////////////////////////////////////////////
5571 class CMSRefProcTaskProxy: public AbstractGangTask {
5572 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5573 CMSCollector* _collector;
5574 CMSBitMap* _mark_bit_map;
5575 const MemRegion _span;
5576 OopTaskQueueSet* _task_queues;
5577 ParallelTaskTerminator _term;
5578 ProcessTask& _task;
5580 public:
5581 CMSRefProcTaskProxy(ProcessTask& task,
5582 CMSCollector* collector,
5583 const MemRegion& span,
5584 CMSBitMap* mark_bit_map,
5585 int total_workers,
5586 OopTaskQueueSet* task_queues):
5587 AbstractGangTask("Process referents by policy in parallel"),
5588 _task(task),
5589 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5590 _task_queues(task_queues),
5591 _term(total_workers, task_queues)
5592 {
5593 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5594 "Inconsistency in _span");
5595 }
5597 OopTaskQueueSet* task_queues() { return _task_queues; }
5599 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5601 ParallelTaskTerminator* terminator() { return &_term; }
5603 void do_work_steal(int i,
5604 CMSParDrainMarkingStackClosure* drain,
5605 CMSParKeepAliveClosure* keep_alive,
5606 int* seed);
5608 virtual void work(int i);
5609 };
5611 void CMSRefProcTaskProxy::work(int i) {
5612 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5613 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5614 _mark_bit_map,
5615 &_collector->_revisitStack,
5616 work_queue(i));
5617 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5618 _mark_bit_map,
5619 &_collector->_revisitStack,
5620 work_queue(i));
5621 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5622 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5623 if (_task.marks_oops_alive()) {
5624 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5625 _collector->hash_seed(i));
5626 }
5627 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5628 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5629 }
5631 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5632 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5633 EnqueueTask& _task;
5635 public:
5636 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5637 : AbstractGangTask("Enqueue reference objects in parallel"),
5638 _task(task)
5639 { }
5641 virtual void work(int i)
5642 {
5643 _task.work(i);
5644 }
5645 };
5647 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5648 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5649 OopTaskQueue* work_queue):
5650 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5651 _span(span),
5652 _bit_map(bit_map),
5653 _work_queue(work_queue),
5654 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5655 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5656 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5657 { }
5659 // . see if we can share work_queues with ParNew? XXX
5660 void CMSRefProcTaskProxy::do_work_steal(int i,
5661 CMSParDrainMarkingStackClosure* drain,
5662 CMSParKeepAliveClosure* keep_alive,
5663 int* seed) {
5664 OopTaskQueue* work_q = work_queue(i);
5665 NOT_PRODUCT(int num_steals = 0;)
5666 oop obj_to_scan;
5668 while (true) {
5669 // Completely finish any left over work from (an) earlier round(s)
5670 drain->trim_queue(0);
5671 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5672 (size_t)ParGCDesiredObjsFromOverflowList);
5673 // Now check if there's any work in the overflow list
5674 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5675 work_q)) {
5676 // Found something in global overflow list;
5677 // not yet ready to go stealing work from others.
5678 // We'd like to assert(work_q->size() != 0, ...)
5679 // because we just took work from the overflow list,
5680 // but of course we can't, since all of that might have
5681 // been already stolen from us.
5682 continue;
5683 }
5684 // Verify that we have no work before we resort to stealing
5685 assert(work_q->size() == 0, "Have work, shouldn't steal");
5686 // Try to steal from other queues that have work
5687 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5688 NOT_PRODUCT(num_steals++;)
5689 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5690 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5691 // Do scanning work
5692 obj_to_scan->oop_iterate(keep_alive);
5693 // Loop around, finish this work, and try to steal some more
5694 } else if (terminator()->offer_termination()) {
5695 break; // nirvana from the infinite cycle
5696 }
5697 }
5698 NOT_PRODUCT(
5699 if (PrintCMSStatistics != 0) {
5700 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5701 }
5702 )
5703 }
5705 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5706 {
5707 GenCollectedHeap* gch = GenCollectedHeap::heap();
5708 WorkGang* workers = gch->workers();
5709 assert(workers != NULL, "Need parallel worker threads.");
5710 int n_workers = workers->total_workers();
5711 CMSRefProcTaskProxy rp_task(task, &_collector,
5712 _collector.ref_processor()->span(),
5713 _collector.markBitMap(),
5714 n_workers, _collector.task_queues());
5715 workers->run_task(&rp_task);
5716 }
5718 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5719 {
5721 GenCollectedHeap* gch = GenCollectedHeap::heap();
5722 WorkGang* workers = gch->workers();
5723 assert(workers != NULL, "Need parallel worker threads.");
5724 CMSRefEnqueueTaskProxy enq_task(task);
5725 workers->run_task(&enq_task);
5726 }
5728 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5730 ResourceMark rm;
5731 HandleMark hm;
5733 ReferenceProcessor* rp = ref_processor();
5734 assert(rp->span().equals(_span), "Spans should be equal");
5735 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5736 // Process weak references.
5737 rp->setup_policy(clear_all_soft_refs);
5738 verify_work_stacks_empty();
5740 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5741 &_markStack, &_revisitStack,
5742 false /* !preclean */);
5743 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5744 _span, &_markBitMap, &_markStack,
5745 &cmsKeepAliveClosure, false /* !preclean */);
5746 {
5747 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5748 if (rp->processing_is_mt()) {
5749 CMSRefProcTaskExecutor task_executor(*this);
5750 rp->process_discovered_references(&_is_alive_closure,
5751 &cmsKeepAliveClosure,
5752 &cmsDrainMarkingStackClosure,
5753 &task_executor);
5754 } else {
5755 rp->process_discovered_references(&_is_alive_closure,
5756 &cmsKeepAliveClosure,
5757 &cmsDrainMarkingStackClosure,
5758 NULL);
5759 }
5760 verify_work_stacks_empty();
5761 }
5763 if (should_unload_classes()) {
5764 {
5765 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5767 // Follow SystemDictionary roots and unload classes
5768 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5770 // Follow CodeCache roots and unload any methods marked for unloading
5771 CodeCache::do_unloading(&_is_alive_closure,
5772 &cmsKeepAliveClosure,
5773 purged_class);
5775 cmsDrainMarkingStackClosure.do_void();
5776 verify_work_stacks_empty();
5778 // Update subklass/sibling/implementor links in KlassKlass descendants
5779 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5780 oop k;
5781 while ((k = _revisitStack.pop()) != NULL) {
5782 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5783 &_is_alive_closure,
5784 &cmsKeepAliveClosure);
5785 }
5786 assert(!ClassUnloading ||
5787 (_markStack.isEmpty() && overflow_list_is_empty()),
5788 "Should not have found new reachable objects");
5789 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5790 cmsDrainMarkingStackClosure.do_void();
5791 verify_work_stacks_empty();
5792 }
5794 {
5795 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5796 // Now clean up stale oops in SymbolTable and StringTable
5797 SymbolTable::unlink(&_is_alive_closure);
5798 StringTable::unlink(&_is_alive_closure);
5799 }
5800 }
5802 verify_work_stacks_empty();
5803 // Restore any preserved marks as a result of mark stack or
5804 // work queue overflow
5805 restore_preserved_marks_if_any(); // done single-threaded for now
5807 rp->set_enqueuing_is_done(true);
5808 if (rp->processing_is_mt()) {
5809 CMSRefProcTaskExecutor task_executor(*this);
5810 rp->enqueue_discovered_references(&task_executor);
5811 } else {
5812 rp->enqueue_discovered_references(NULL);
5813 }
5814 rp->verify_no_references_recorded();
5815 assert(!rp->discovery_enabled(), "should have been disabled");
5817 // JVMTI object tagging is based on JNI weak refs. If any of these
5818 // refs were cleared then JVMTI needs to update its maps and
5819 // maybe post ObjectFrees to agents.
5820 JvmtiExport::cms_ref_processing_epilogue();
5821 }
5823 #ifndef PRODUCT
5824 void CMSCollector::check_correct_thread_executing() {
5825 Thread* t = Thread::current();
5826 // Only the VM thread or the CMS thread should be here.
5827 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5828 "Unexpected thread type");
5829 // If this is the vm thread, the foreground process
5830 // should not be waiting. Note that _foregroundGCIsActive is
5831 // true while the foreground collector is waiting.
5832 if (_foregroundGCShouldWait) {
5833 // We cannot be the VM thread
5834 assert(t->is_ConcurrentGC_thread(),
5835 "Should be CMS thread");
5836 } else {
5837 // We can be the CMS thread only if we are in a stop-world
5838 // phase of CMS collection.
5839 if (t->is_ConcurrentGC_thread()) {
5840 assert(_collectorState == InitialMarking ||
5841 _collectorState == FinalMarking,
5842 "Should be a stop-world phase");
5843 // The CMS thread should be holding the CMS_token.
5844 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5845 "Potential interference with concurrently "
5846 "executing VM thread");
5847 }
5848 }
5849 }
5850 #endif
5852 void CMSCollector::sweep(bool asynch) {
5853 assert(_collectorState == Sweeping, "just checking");
5854 check_correct_thread_executing();
5855 verify_work_stacks_empty();
5856 verify_overflow_empty();
5857 increment_sweep_count();
5858 _inter_sweep_timer.stop();
5859 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5860 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5862 // PermGen verification support: If perm gen sweeping is disabled in
5863 // this cycle, we preserve the perm gen object "deadness" information
5864 // in the perm_gen_verify_bit_map. In order to do that we traverse
5865 // all blocks in perm gen and mark all dead objects.
5866 if (verifying() && !should_unload_classes()) {
5867 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5868 "Should have already been allocated");
5869 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5870 markBitMap(), perm_gen_verify_bit_map());
5871 if (asynch) {
5872 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5873 bitMapLock());
5874 _permGen->cmsSpace()->blk_iterate(&mdo);
5875 } else {
5876 // In the case of synchronous sweep, we already have
5877 // the requisite locks/tokens.
5878 _permGen->cmsSpace()->blk_iterate(&mdo);
5879 }
5880 }
5882 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5883 _intra_sweep_timer.reset();
5884 _intra_sweep_timer.start();
5885 if (asynch) {
5886 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5887 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5888 // First sweep the old gen then the perm gen
5889 {
5890 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5891 bitMapLock());
5892 sweepWork(_cmsGen, asynch);
5893 }
5895 // Now repeat for perm gen
5896 if (should_unload_classes()) {
5897 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5898 bitMapLock());
5899 sweepWork(_permGen, asynch);
5900 }
5902 // Update Universe::_heap_*_at_gc figures.
5903 // We need all the free list locks to make the abstract state
5904 // transition from Sweeping to Resetting. See detailed note
5905 // further below.
5906 {
5907 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5908 _permGen->freelistLock());
5909 // Update heap occupancy information which is used as
5910 // input to soft ref clearing policy at the next gc.
5911 Universe::update_heap_info_at_gc();
5912 _collectorState = Resizing;
5913 }
5914 } else {
5915 // already have needed locks
5916 sweepWork(_cmsGen, asynch);
5918 if (should_unload_classes()) {
5919 sweepWork(_permGen, asynch);
5920 }
5921 // Update heap occupancy information which is used as
5922 // input to soft ref clearing policy at the next gc.
5923 Universe::update_heap_info_at_gc();
5924 _collectorState = Resizing;
5925 }
5926 verify_work_stacks_empty();
5927 verify_overflow_empty();
5929 _intra_sweep_timer.stop();
5930 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5932 _inter_sweep_timer.reset();
5933 _inter_sweep_timer.start();
5935 update_time_of_last_gc(os::javaTimeMillis());
5937 // NOTE on abstract state transitions:
5938 // Mutators allocate-live and/or mark the mod-union table dirty
5939 // based on the state of the collection. The former is done in
5940 // the interval [Marking, Sweeping] and the latter in the interval
5941 // [Marking, Sweeping). Thus the transitions into the Marking state
5942 // and out of the Sweeping state must be synchronously visible
5943 // globally to the mutators.
5944 // The transition into the Marking state happens with the world
5945 // stopped so the mutators will globally see it. Sweeping is
5946 // done asynchronously by the background collector so the transition
5947 // from the Sweeping state to the Resizing state must be done
5948 // under the freelistLock (as is the check for whether to
5949 // allocate-live and whether to dirty the mod-union table).
5950 assert(_collectorState == Resizing, "Change of collector state to"
5951 " Resizing must be done under the freelistLocks (plural)");
5953 // Now that sweeping has been completed, if the GCH's
5954 // incremental_collection_will_fail flag is set, clear it,
5955 // thus inviting a younger gen collection to promote into
5956 // this generation. If such a promotion may still fail,
5957 // the flag will be set again when a young collection is
5958 // attempted.
5959 // I think the incremental_collection_will_fail flag's use
5960 // is specific to a 2 generation collection policy, so i'll
5961 // assert that that's the configuration we are operating within.
5962 // The use of the flag can and should be generalized appropriately
5963 // in the future to deal with a general n-generation system.
5965 GenCollectedHeap* gch = GenCollectedHeap::heap();
5966 assert(gch->collector_policy()->is_two_generation_policy(),
5967 "Resetting of incremental_collection_will_fail flag"
5968 " may be incorrect otherwise");
5969 gch->clear_incremental_collection_will_fail();
5970 gch->update_full_collections_completed(_collection_count_start);
5971 }
5973 // FIX ME!!! Looks like this belongs in CFLSpace, with
5974 // CMSGen merely delegating to it.
5975 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5976 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5977 HeapWord* minAddr = _cmsSpace->bottom();
5978 HeapWord* largestAddr =
5979 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5980 if (largestAddr == NULL) {
5981 // The dictionary appears to be empty. In this case
5982 // try to coalesce at the end of the heap.
5983 largestAddr = _cmsSpace->end();
5984 }
5985 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5986 size_t nearLargestOffset =
5987 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5988 if (PrintFLSStatistics != 0) {
5989 gclog_or_tty->print_cr(
5990 "CMS: Large Block: " PTR_FORMAT ";"
5991 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5992 largestAddr,
5993 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
5994 }
5995 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5996 }
5998 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5999 return addr >= _cmsSpace->nearLargestChunk();
6000 }
6002 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6003 return _cmsSpace->find_chunk_at_end();
6004 }
6006 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6007 bool full) {
6008 // The next lower level has been collected. Gather any statistics
6009 // that are of interest at this point.
6010 if (!full && (current_level + 1) == level()) {
6011 // Gather statistics on the young generation collection.
6012 collector()->stats().record_gc0_end(used());
6013 }
6014 }
6016 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6017 GenCollectedHeap* gch = GenCollectedHeap::heap();
6018 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6019 "Wrong type of heap");
6020 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6021 gch->gen_policy()->size_policy();
6022 assert(sp->is_gc_cms_adaptive_size_policy(),
6023 "Wrong type of size policy");
6024 return sp;
6025 }
6027 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6028 if (PrintGCDetails && Verbose) {
6029 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6030 }
6031 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6032 _debug_collection_type =
6033 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6034 if (PrintGCDetails && Verbose) {
6035 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6036 }
6037 }
6039 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6040 bool asynch) {
6041 // We iterate over the space(s) underlying this generation,
6042 // checking the mark bit map to see if the bits corresponding
6043 // to specific blocks are marked or not. Blocks that are
6044 // marked are live and are not swept up. All remaining blocks
6045 // are swept up, with coalescing on-the-fly as we sweep up
6046 // contiguous free and/or garbage blocks:
6047 // We need to ensure that the sweeper synchronizes with allocators
6048 // and stop-the-world collectors. In particular, the following
6049 // locks are used:
6050 // . CMS token: if this is held, a stop the world collection cannot occur
6051 // . freelistLock: if this is held no allocation can occur from this
6052 // generation by another thread
6053 // . bitMapLock: if this is held, no other thread can access or update
6054 //
6056 // Note that we need to hold the freelistLock if we use
6057 // block iterate below; else the iterator might go awry if
6058 // a mutator (or promotion) causes block contents to change
6059 // (for instance if the allocator divvies up a block).
6060 // If we hold the free list lock, for all practical purposes
6061 // young generation GC's can't occur (they'll usually need to
6062 // promote), so we might as well prevent all young generation
6063 // GC's while we do a sweeping step. For the same reason, we might
6064 // as well take the bit map lock for the entire duration
6066 // check that we hold the requisite locks
6067 assert(have_cms_token(), "Should hold cms token");
6068 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6069 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6070 "Should possess CMS token to sweep");
6071 assert_lock_strong(gen->freelistLock());
6072 assert_lock_strong(bitMapLock());
6074 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6075 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6076 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6077 _inter_sweep_estimate.padded_average(),
6078 _intra_sweep_estimate.padded_average());
6079 gen->setNearLargestChunk();
6081 {
6082 SweepClosure sweepClosure(this, gen, &_markBitMap,
6083 CMSYield && asynch);
6084 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6085 // We need to free-up/coalesce garbage/blocks from a
6086 // co-terminal free run. This is done in the SweepClosure
6087 // destructor; so, do not remove this scope, else the
6088 // end-of-sweep-census below will be off by a little bit.
6089 }
6090 gen->cmsSpace()->sweep_completed();
6091 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6092 if (should_unload_classes()) { // unloaded classes this cycle,
6093 _concurrent_cycles_since_last_unload = 0; // ... reset count
6094 } else { // did not unload classes,
6095 _concurrent_cycles_since_last_unload++; // ... increment count
6096 }
6097 }
6099 // Reset CMS data structures (for now just the marking bit map)
6100 // preparatory for the next cycle.
6101 void CMSCollector::reset(bool asynch) {
6102 GenCollectedHeap* gch = GenCollectedHeap::heap();
6103 CMSAdaptiveSizePolicy* sp = size_policy();
6104 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6105 if (asynch) {
6106 CMSTokenSyncWithLocks ts(true, bitMapLock());
6108 // If the state is not "Resetting", the foreground thread
6109 // has done a collection and the resetting.
6110 if (_collectorState != Resetting) {
6111 assert(_collectorState == Idling, "The state should only change"
6112 " because the foreground collector has finished the collection");
6113 return;
6114 }
6116 // Clear the mark bitmap (no grey objects to start with)
6117 // for the next cycle.
6118 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6119 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6121 HeapWord* curAddr = _markBitMap.startWord();
6122 while (curAddr < _markBitMap.endWord()) {
6123 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6124 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6125 _markBitMap.clear_large_range(chunk);
6126 if (ConcurrentMarkSweepThread::should_yield() &&
6127 !foregroundGCIsActive() &&
6128 CMSYield) {
6129 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6130 "CMS thread should hold CMS token");
6131 assert_lock_strong(bitMapLock());
6132 bitMapLock()->unlock();
6133 ConcurrentMarkSweepThread::desynchronize(true);
6134 ConcurrentMarkSweepThread::acknowledge_yield_request();
6135 stopTimer();
6136 if (PrintCMSStatistics != 0) {
6137 incrementYields();
6138 }
6139 icms_wait();
6141 // See the comment in coordinator_yield()
6142 for (unsigned i = 0; i < CMSYieldSleepCount &&
6143 ConcurrentMarkSweepThread::should_yield() &&
6144 !CMSCollector::foregroundGCIsActive(); ++i) {
6145 os::sleep(Thread::current(), 1, false);
6146 ConcurrentMarkSweepThread::acknowledge_yield_request();
6147 }
6149 ConcurrentMarkSweepThread::synchronize(true);
6150 bitMapLock()->lock_without_safepoint_check();
6151 startTimer();
6152 }
6153 curAddr = chunk.end();
6154 }
6155 // A successful mostly concurrent collection has been done.
6156 // Because only the full (i.e., concurrent mode failure) collections
6157 // are being measured for gc overhead limits, clean the "near" flag
6158 // and count.
6159 sp->reset_gc_overhead_limit_count();
6160 _collectorState = Idling;
6161 } else {
6162 // already have the lock
6163 assert(_collectorState == Resetting, "just checking");
6164 assert_lock_strong(bitMapLock());
6165 _markBitMap.clear_all();
6166 _collectorState = Idling;
6167 }
6169 // Stop incremental mode after a cycle completes, so that any future cycles
6170 // are triggered by allocation.
6171 stop_icms();
6173 NOT_PRODUCT(
6174 if (RotateCMSCollectionTypes) {
6175 _cmsGen->rotate_debug_collection_type();
6176 }
6177 )
6178 }
6180 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6181 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6182 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6183 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6184 TraceCollectorStats tcs(counters());
6186 switch (op) {
6187 case CMS_op_checkpointRootsInitial: {
6188 checkpointRootsInitial(true); // asynch
6189 if (PrintGC) {
6190 _cmsGen->printOccupancy("initial-mark");
6191 }
6192 break;
6193 }
6194 case CMS_op_checkpointRootsFinal: {
6195 checkpointRootsFinal(true, // asynch
6196 false, // !clear_all_soft_refs
6197 false); // !init_mark_was_synchronous
6198 if (PrintGC) {
6199 _cmsGen->printOccupancy("remark");
6200 }
6201 break;
6202 }
6203 default:
6204 fatal("No such CMS_op");
6205 }
6206 }
6208 #ifndef PRODUCT
6209 size_t const CMSCollector::skip_header_HeapWords() {
6210 return FreeChunk::header_size();
6211 }
6213 // Try and collect here conditions that should hold when
6214 // CMS thread is exiting. The idea is that the foreground GC
6215 // thread should not be blocked if it wants to terminate
6216 // the CMS thread and yet continue to run the VM for a while
6217 // after that.
6218 void CMSCollector::verify_ok_to_terminate() const {
6219 assert(Thread::current()->is_ConcurrentGC_thread(),
6220 "should be called by CMS thread");
6221 assert(!_foregroundGCShouldWait, "should be false");
6222 // We could check here that all the various low-level locks
6223 // are not held by the CMS thread, but that is overkill; see
6224 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6225 // is checked.
6226 }
6227 #endif
6229 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6230 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6231 "missing Printezis mark?");
6232 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6233 size_t size = pointer_delta(nextOneAddr + 1, addr);
6234 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6235 "alignment problem");
6236 assert(size >= 3, "Necessary for Printezis marks to work");
6237 return size;
6238 }
6240 // A variant of the above (block_size_using_printezis_bits()) except
6241 // that we return 0 if the P-bits are not yet set.
6242 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6243 if (_markBitMap.isMarked(addr)) {
6244 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6245 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6246 size_t size = pointer_delta(nextOneAddr + 1, addr);
6247 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6248 "alignment problem");
6249 assert(size >= 3, "Necessary for Printezis marks to work");
6250 return size;
6251 } else {
6252 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6253 return 0;
6254 }
6255 }
6257 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6258 size_t sz = 0;
6259 oop p = (oop)addr;
6260 if (p->klass_or_null() != NULL && p->is_parsable()) {
6261 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6262 } else {
6263 sz = block_size_using_printezis_bits(addr);
6264 }
6265 assert(sz > 0, "size must be nonzero");
6266 HeapWord* next_block = addr + sz;
6267 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6268 CardTableModRefBS::card_size);
6269 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6270 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6271 "must be different cards");
6272 return next_card;
6273 }
6276 // CMS Bit Map Wrapper /////////////////////////////////////////
6278 // Construct a CMS bit map infrastructure, but don't create the
6279 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6280 // further below.
6281 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6282 _bm(),
6283 _shifter(shifter),
6284 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6285 {
6286 _bmStartWord = 0;
6287 _bmWordSize = 0;
6288 }
6290 bool CMSBitMap::allocate(MemRegion mr) {
6291 _bmStartWord = mr.start();
6292 _bmWordSize = mr.word_size();
6293 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6294 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6295 if (!brs.is_reserved()) {
6296 warning("CMS bit map allocation failure");
6297 return false;
6298 }
6299 // For now we'll just commit all of the bit map up fromt.
6300 // Later on we'll try to be more parsimonious with swap.
6301 if (!_virtual_space.initialize(brs, brs.size())) {
6302 warning("CMS bit map backing store failure");
6303 return false;
6304 }
6305 assert(_virtual_space.committed_size() == brs.size(),
6306 "didn't reserve backing store for all of CMS bit map?");
6307 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6308 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6309 _bmWordSize, "inconsistency in bit map sizing");
6310 _bm.set_size(_bmWordSize >> _shifter);
6312 // bm.clear(); // can we rely on getting zero'd memory? verify below
6313 assert(isAllClear(),
6314 "Expected zero'd memory from ReservedSpace constructor");
6315 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6316 "consistency check");
6317 return true;
6318 }
6320 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6321 HeapWord *next_addr, *end_addr, *last_addr;
6322 assert_locked();
6323 assert(covers(mr), "out-of-range error");
6324 // XXX assert that start and end are appropriately aligned
6325 for (next_addr = mr.start(), end_addr = mr.end();
6326 next_addr < end_addr; next_addr = last_addr) {
6327 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6328 last_addr = dirty_region.end();
6329 if (!dirty_region.is_empty()) {
6330 cl->do_MemRegion(dirty_region);
6331 } else {
6332 assert(last_addr == end_addr, "program logic");
6333 return;
6334 }
6335 }
6336 }
6338 #ifndef PRODUCT
6339 void CMSBitMap::assert_locked() const {
6340 CMSLockVerifier::assert_locked(lock());
6341 }
6343 bool CMSBitMap::covers(MemRegion mr) const {
6344 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6345 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6346 "size inconsistency");
6347 return (mr.start() >= _bmStartWord) &&
6348 (mr.end() <= endWord());
6349 }
6351 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6352 return (start >= _bmStartWord && (start + size) <= endWord());
6353 }
6355 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6356 // verify that there are no 1 bits in the interval [left, right)
6357 FalseBitMapClosure falseBitMapClosure;
6358 iterate(&falseBitMapClosure, left, right);
6359 }
6361 void CMSBitMap::region_invariant(MemRegion mr)
6362 {
6363 assert_locked();
6364 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6365 assert(!mr.is_empty(), "unexpected empty region");
6366 assert(covers(mr), "mr should be covered by bit map");
6367 // convert address range into offset range
6368 size_t start_ofs = heapWordToOffset(mr.start());
6369 // Make sure that end() is appropriately aligned
6370 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6371 (1 << (_shifter+LogHeapWordSize))),
6372 "Misaligned mr.end()");
6373 size_t end_ofs = heapWordToOffset(mr.end());
6374 assert(end_ofs > start_ofs, "Should mark at least one bit");
6375 }
6377 #endif
6379 bool CMSMarkStack::allocate(size_t size) {
6380 // allocate a stack of the requisite depth
6381 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6382 size * sizeof(oop)));
6383 if (!rs.is_reserved()) {
6384 warning("CMSMarkStack allocation failure");
6385 return false;
6386 }
6387 if (!_virtual_space.initialize(rs, rs.size())) {
6388 warning("CMSMarkStack backing store failure");
6389 return false;
6390 }
6391 assert(_virtual_space.committed_size() == rs.size(),
6392 "didn't reserve backing store for all of CMS stack?");
6393 _base = (oop*)(_virtual_space.low());
6394 _index = 0;
6395 _capacity = size;
6396 NOT_PRODUCT(_max_depth = 0);
6397 return true;
6398 }
6400 // XXX FIX ME !!! In the MT case we come in here holding a
6401 // leaf lock. For printing we need to take a further lock
6402 // which has lower rank. We need to recallibrate the two
6403 // lock-ranks involved in order to be able to rpint the
6404 // messages below. (Or defer the printing to the caller.
6405 // For now we take the expedient path of just disabling the
6406 // messages for the problematic case.)
6407 void CMSMarkStack::expand() {
6408 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6409 if (_capacity == MarkStackSizeMax) {
6410 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6411 // We print a warning message only once per CMS cycle.
6412 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6413 }
6414 return;
6415 }
6416 // Double capacity if possible
6417 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6418 // Do not give up existing stack until we have managed to
6419 // get the double capacity that we desired.
6420 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6421 new_capacity * sizeof(oop)));
6422 if (rs.is_reserved()) {
6423 // Release the backing store associated with old stack
6424 _virtual_space.release();
6425 // Reinitialize virtual space for new stack
6426 if (!_virtual_space.initialize(rs, rs.size())) {
6427 fatal("Not enough swap for expanded marking stack");
6428 }
6429 _base = (oop*)(_virtual_space.low());
6430 _index = 0;
6431 _capacity = new_capacity;
6432 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6433 // Failed to double capacity, continue;
6434 // we print a detail message only once per CMS cycle.
6435 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6436 SIZE_FORMAT"K",
6437 _capacity / K, new_capacity / K);
6438 }
6439 }
6442 // Closures
6443 // XXX: there seems to be a lot of code duplication here;
6444 // should refactor and consolidate common code.
6446 // This closure is used to mark refs into the CMS generation in
6447 // the CMS bit map. Called at the first checkpoint. This closure
6448 // assumes that we do not need to re-mark dirty cards; if the CMS
6449 // generation on which this is used is not an oldest (modulo perm gen)
6450 // generation then this will lose younger_gen cards!
6452 MarkRefsIntoClosure::MarkRefsIntoClosure(
6453 MemRegion span, CMSBitMap* bitMap):
6454 _span(span),
6455 _bitMap(bitMap)
6456 {
6457 assert(_ref_processor == NULL, "deliberately left NULL");
6458 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6459 }
6461 void MarkRefsIntoClosure::do_oop(oop obj) {
6462 // if p points into _span, then mark corresponding bit in _markBitMap
6463 assert(obj->is_oop(), "expected an oop");
6464 HeapWord* addr = (HeapWord*)obj;
6465 if (_span.contains(addr)) {
6466 // this should be made more efficient
6467 _bitMap->mark(addr);
6468 }
6469 }
6471 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6472 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6474 // A variant of the above, used for CMS marking verification.
6475 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6476 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6477 _span(span),
6478 _verification_bm(verification_bm),
6479 _cms_bm(cms_bm)
6480 {
6481 assert(_ref_processor == NULL, "deliberately left NULL");
6482 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6483 }
6485 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6486 // if p points into _span, then mark corresponding bit in _markBitMap
6487 assert(obj->is_oop(), "expected an oop");
6488 HeapWord* addr = (HeapWord*)obj;
6489 if (_span.contains(addr)) {
6490 _verification_bm->mark(addr);
6491 if (!_cms_bm->isMarked(addr)) {
6492 oop(addr)->print();
6493 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6494 fatal("... aborting");
6495 }
6496 }
6497 }
6499 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6500 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6502 //////////////////////////////////////////////////
6503 // MarkRefsIntoAndScanClosure
6504 //////////////////////////////////////////////////
6506 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6507 ReferenceProcessor* rp,
6508 CMSBitMap* bit_map,
6509 CMSBitMap* mod_union_table,
6510 CMSMarkStack* mark_stack,
6511 CMSMarkStack* revisit_stack,
6512 CMSCollector* collector,
6513 bool should_yield,
6514 bool concurrent_precleaning):
6515 _collector(collector),
6516 _span(span),
6517 _bit_map(bit_map),
6518 _mark_stack(mark_stack),
6519 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6520 mark_stack, revisit_stack, concurrent_precleaning),
6521 _yield(should_yield),
6522 _concurrent_precleaning(concurrent_precleaning),
6523 _freelistLock(NULL)
6524 {
6525 _ref_processor = rp;
6526 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6527 }
6529 // This closure is used to mark refs into the CMS generation at the
6530 // second (final) checkpoint, and to scan and transitively follow
6531 // the unmarked oops. It is also used during the concurrent precleaning
6532 // phase while scanning objects on dirty cards in the CMS generation.
6533 // The marks are made in the marking bit map and the marking stack is
6534 // used for keeping the (newly) grey objects during the scan.
6535 // The parallel version (Par_...) appears further below.
6536 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6537 if (obj != NULL) {
6538 assert(obj->is_oop(), "expected an oop");
6539 HeapWord* addr = (HeapWord*)obj;
6540 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6541 assert(_collector->overflow_list_is_empty(),
6542 "overflow list should be empty");
6543 if (_span.contains(addr) &&
6544 !_bit_map->isMarked(addr)) {
6545 // mark bit map (object is now grey)
6546 _bit_map->mark(addr);
6547 // push on marking stack (stack should be empty), and drain the
6548 // stack by applying this closure to the oops in the oops popped
6549 // from the stack (i.e. blacken the grey objects)
6550 bool res = _mark_stack->push(obj);
6551 assert(res, "Should have space to push on empty stack");
6552 do {
6553 oop new_oop = _mark_stack->pop();
6554 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6555 assert(new_oop->is_parsable(), "Found unparsable oop");
6556 assert(_bit_map->isMarked((HeapWord*)new_oop),
6557 "only grey objects on this stack");
6558 // iterate over the oops in this oop, marking and pushing
6559 // the ones in CMS heap (i.e. in _span).
6560 new_oop->oop_iterate(&_pushAndMarkClosure);
6561 // check if it's time to yield
6562 do_yield_check();
6563 } while (!_mark_stack->isEmpty() ||
6564 (!_concurrent_precleaning && take_from_overflow_list()));
6565 // if marking stack is empty, and we are not doing this
6566 // during precleaning, then check the overflow list
6567 }
6568 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6569 assert(_collector->overflow_list_is_empty(),
6570 "overflow list was drained above");
6571 // We could restore evacuated mark words, if any, used for
6572 // overflow list links here because the overflow list is
6573 // provably empty here. That would reduce the maximum
6574 // size requirements for preserved_{oop,mark}_stack.
6575 // But we'll just postpone it until we are all done
6576 // so we can just stream through.
6577 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6578 _collector->restore_preserved_marks_if_any();
6579 assert(_collector->no_preserved_marks(), "No preserved marks");
6580 }
6581 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6582 "All preserved marks should have been restored above");
6583 }
6584 }
6586 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6587 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6589 void MarkRefsIntoAndScanClosure::do_yield_work() {
6590 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6591 "CMS thread should hold CMS token");
6592 assert_lock_strong(_freelistLock);
6593 assert_lock_strong(_bit_map->lock());
6594 // relinquish the free_list_lock and bitMaplock()
6595 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6596 _bit_map->lock()->unlock();
6597 _freelistLock->unlock();
6598 ConcurrentMarkSweepThread::desynchronize(true);
6599 ConcurrentMarkSweepThread::acknowledge_yield_request();
6600 _collector->stopTimer();
6601 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6602 if (PrintCMSStatistics != 0) {
6603 _collector->incrementYields();
6604 }
6605 _collector->icms_wait();
6607 // See the comment in coordinator_yield()
6608 for (unsigned i = 0;
6609 i < CMSYieldSleepCount &&
6610 ConcurrentMarkSweepThread::should_yield() &&
6611 !CMSCollector::foregroundGCIsActive();
6612 ++i) {
6613 os::sleep(Thread::current(), 1, false);
6614 ConcurrentMarkSweepThread::acknowledge_yield_request();
6615 }
6617 ConcurrentMarkSweepThread::synchronize(true);
6618 _freelistLock->lock_without_safepoint_check();
6619 _bit_map->lock()->lock_without_safepoint_check();
6620 _collector->startTimer();
6621 }
6623 ///////////////////////////////////////////////////////////
6624 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6625 // MarkRefsIntoAndScanClosure
6626 ///////////////////////////////////////////////////////////
6627 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6628 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6629 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6630 _span(span),
6631 _bit_map(bit_map),
6632 _work_queue(work_queue),
6633 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6634 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6635 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6636 revisit_stack)
6637 {
6638 _ref_processor = rp;
6639 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6640 }
6642 // This closure is used to mark refs into the CMS generation at the
6643 // second (final) checkpoint, and to scan and transitively follow
6644 // the unmarked oops. The marks are made in the marking bit map and
6645 // the work_queue is used for keeping the (newly) grey objects during
6646 // the scan phase whence they are also available for stealing by parallel
6647 // threads. Since the marking bit map is shared, updates are
6648 // synchronized (via CAS).
6649 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6650 if (obj != NULL) {
6651 // Ignore mark word because this could be an already marked oop
6652 // that may be chained at the end of the overflow list.
6653 assert(obj->is_oop(true), "expected an oop");
6654 HeapWord* addr = (HeapWord*)obj;
6655 if (_span.contains(addr) &&
6656 !_bit_map->isMarked(addr)) {
6657 // mark bit map (object will become grey):
6658 // It is possible for several threads to be
6659 // trying to "claim" this object concurrently;
6660 // the unique thread that succeeds in marking the
6661 // object first will do the subsequent push on
6662 // to the work queue (or overflow list).
6663 if (_bit_map->par_mark(addr)) {
6664 // push on work_queue (which may not be empty), and trim the
6665 // queue to an appropriate length by applying this closure to
6666 // the oops in the oops popped from the stack (i.e. blacken the
6667 // grey objects)
6668 bool res = _work_queue->push(obj);
6669 assert(res, "Low water mark should be less than capacity?");
6670 trim_queue(_low_water_mark);
6671 } // Else, another thread claimed the object
6672 }
6673 }
6674 }
6676 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6677 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6679 // This closure is used to rescan the marked objects on the dirty cards
6680 // in the mod union table and the card table proper.
6681 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6682 oop p, MemRegion mr) {
6684 size_t size = 0;
6685 HeapWord* addr = (HeapWord*)p;
6686 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6687 assert(_span.contains(addr), "we are scanning the CMS generation");
6688 // check if it's time to yield
6689 if (do_yield_check()) {
6690 // We yielded for some foreground stop-world work,
6691 // and we have been asked to abort this ongoing preclean cycle.
6692 return 0;
6693 }
6694 if (_bitMap->isMarked(addr)) {
6695 // it's marked; is it potentially uninitialized?
6696 if (p->klass_or_null() != NULL) {
6697 // If is_conc_safe is false, the object may be undergoing
6698 // change by the VM outside a safepoint. Don't try to
6699 // scan it, but rather leave it for the remark phase.
6700 if (CMSPermGenPrecleaningEnabled &&
6701 (!p->is_conc_safe() || !p->is_parsable())) {
6702 // Signal precleaning to redirty the card since
6703 // the klass pointer is already installed.
6704 assert(size == 0, "Initial value");
6705 } else {
6706 assert(p->is_parsable(), "must be parsable.");
6707 // an initialized object; ignore mark word in verification below
6708 // since we are running concurrent with mutators
6709 assert(p->is_oop(true), "should be an oop");
6710 if (p->is_objArray()) {
6711 // objArrays are precisely marked; restrict scanning
6712 // to dirty cards only.
6713 size = CompactibleFreeListSpace::adjustObjectSize(
6714 p->oop_iterate(_scanningClosure, mr));
6715 } else {
6716 // A non-array may have been imprecisely marked; we need
6717 // to scan object in its entirety.
6718 size = CompactibleFreeListSpace::adjustObjectSize(
6719 p->oop_iterate(_scanningClosure));
6720 }
6721 #ifdef DEBUG
6722 size_t direct_size =
6723 CompactibleFreeListSpace::adjustObjectSize(p->size());
6724 assert(size == direct_size, "Inconsistency in size");
6725 assert(size >= 3, "Necessary for Printezis marks to work");
6726 if (!_bitMap->isMarked(addr+1)) {
6727 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6728 } else {
6729 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6730 assert(_bitMap->isMarked(addr+size-1),
6731 "inconsistent Printezis mark");
6732 }
6733 #endif // DEBUG
6734 }
6735 } else {
6736 // an unitialized object
6737 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6738 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6739 size = pointer_delta(nextOneAddr + 1, addr);
6740 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6741 "alignment problem");
6742 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6743 // will dirty the card when the klass pointer is installed in the
6744 // object (signalling the completion of initialization).
6745 }
6746 } else {
6747 // Either a not yet marked object or an uninitialized object
6748 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6749 // An uninitialized object, skip to the next card, since
6750 // we may not be able to read its P-bits yet.
6751 assert(size == 0, "Initial value");
6752 } else {
6753 // An object not (yet) reached by marking: we merely need to
6754 // compute its size so as to go look at the next block.
6755 assert(p->is_oop(true), "should be an oop");
6756 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6757 }
6758 }
6759 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6760 return size;
6761 }
6763 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6764 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6765 "CMS thread should hold CMS token");
6766 assert_lock_strong(_freelistLock);
6767 assert_lock_strong(_bitMap->lock());
6768 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6769 // relinquish the free_list_lock and bitMaplock()
6770 _bitMap->lock()->unlock();
6771 _freelistLock->unlock();
6772 ConcurrentMarkSweepThread::desynchronize(true);
6773 ConcurrentMarkSweepThread::acknowledge_yield_request();
6774 _collector->stopTimer();
6775 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6776 if (PrintCMSStatistics != 0) {
6777 _collector->incrementYields();
6778 }
6779 _collector->icms_wait();
6781 // See the comment in coordinator_yield()
6782 for (unsigned i = 0; i < CMSYieldSleepCount &&
6783 ConcurrentMarkSweepThread::should_yield() &&
6784 !CMSCollector::foregroundGCIsActive(); ++i) {
6785 os::sleep(Thread::current(), 1, false);
6786 ConcurrentMarkSweepThread::acknowledge_yield_request();
6787 }
6789 ConcurrentMarkSweepThread::synchronize(true);
6790 _freelistLock->lock_without_safepoint_check();
6791 _bitMap->lock()->lock_without_safepoint_check();
6792 _collector->startTimer();
6793 }
6796 //////////////////////////////////////////////////////////////////
6797 // SurvivorSpacePrecleanClosure
6798 //////////////////////////////////////////////////////////////////
6799 // This (single-threaded) closure is used to preclean the oops in
6800 // the survivor spaces.
6801 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6803 HeapWord* addr = (HeapWord*)p;
6804 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6805 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6806 assert(p->klass_or_null() != NULL, "object should be initializd");
6807 assert(p->is_parsable(), "must be parsable.");
6808 // an initialized object; ignore mark word in verification below
6809 // since we are running concurrent with mutators
6810 assert(p->is_oop(true), "should be an oop");
6811 // Note that we do not yield while we iterate over
6812 // the interior oops of p, pushing the relevant ones
6813 // on our marking stack.
6814 size_t size = p->oop_iterate(_scanning_closure);
6815 do_yield_check();
6816 // Observe that below, we do not abandon the preclean
6817 // phase as soon as we should; rather we empty the
6818 // marking stack before returning. This is to satisfy
6819 // some existing assertions. In general, it may be a
6820 // good idea to abort immediately and complete the marking
6821 // from the grey objects at a later time.
6822 while (!_mark_stack->isEmpty()) {
6823 oop new_oop = _mark_stack->pop();
6824 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6825 assert(new_oop->is_parsable(), "Found unparsable oop");
6826 assert(_bit_map->isMarked((HeapWord*)new_oop),
6827 "only grey objects on this stack");
6828 // iterate over the oops in this oop, marking and pushing
6829 // the ones in CMS heap (i.e. in _span).
6830 new_oop->oop_iterate(_scanning_closure);
6831 // check if it's time to yield
6832 do_yield_check();
6833 }
6834 unsigned int after_count =
6835 GenCollectedHeap::heap()->total_collections();
6836 bool abort = (_before_count != after_count) ||
6837 _collector->should_abort_preclean();
6838 return abort ? 0 : size;
6839 }
6841 void SurvivorSpacePrecleanClosure::do_yield_work() {
6842 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6843 "CMS thread should hold CMS token");
6844 assert_lock_strong(_bit_map->lock());
6845 DEBUG_ONLY(RememberKlassesChecker smx(false);)
6846 // Relinquish the bit map lock
6847 _bit_map->lock()->unlock();
6848 ConcurrentMarkSweepThread::desynchronize(true);
6849 ConcurrentMarkSweepThread::acknowledge_yield_request();
6850 _collector->stopTimer();
6851 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6852 if (PrintCMSStatistics != 0) {
6853 _collector->incrementYields();
6854 }
6855 _collector->icms_wait();
6857 // See the comment in coordinator_yield()
6858 for (unsigned i = 0; i < CMSYieldSleepCount &&
6859 ConcurrentMarkSweepThread::should_yield() &&
6860 !CMSCollector::foregroundGCIsActive(); ++i) {
6861 os::sleep(Thread::current(), 1, false);
6862 ConcurrentMarkSweepThread::acknowledge_yield_request();
6863 }
6865 ConcurrentMarkSweepThread::synchronize(true);
6866 _bit_map->lock()->lock_without_safepoint_check();
6867 _collector->startTimer();
6868 }
6870 // This closure is used to rescan the marked objects on the dirty cards
6871 // in the mod union table and the card table proper. In the parallel
6872 // case, although the bitMap is shared, we do a single read so the
6873 // isMarked() query is "safe".
6874 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6875 // Ignore mark word because we are running concurrent with mutators
6876 assert(p->is_oop_or_null(true), "expected an oop or null");
6877 HeapWord* addr = (HeapWord*)p;
6878 assert(_span.contains(addr), "we are scanning the CMS generation");
6879 bool is_obj_array = false;
6880 #ifdef DEBUG
6881 if (!_parallel) {
6882 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6883 assert(_collector->overflow_list_is_empty(),
6884 "overflow list should be empty");
6886 }
6887 #endif // DEBUG
6888 if (_bit_map->isMarked(addr)) {
6889 // Obj arrays are precisely marked, non-arrays are not;
6890 // so we scan objArrays precisely and non-arrays in their
6891 // entirety.
6892 if (p->is_objArray()) {
6893 is_obj_array = true;
6894 if (_parallel) {
6895 p->oop_iterate(_par_scan_closure, mr);
6896 } else {
6897 p->oop_iterate(_scan_closure, mr);
6898 }
6899 } else {
6900 if (_parallel) {
6901 p->oop_iterate(_par_scan_closure);
6902 } else {
6903 p->oop_iterate(_scan_closure);
6904 }
6905 }
6906 }
6907 #ifdef DEBUG
6908 if (!_parallel) {
6909 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6910 assert(_collector->overflow_list_is_empty(),
6911 "overflow list should be empty");
6913 }
6914 #endif // DEBUG
6915 return is_obj_array;
6916 }
6918 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6919 MemRegion span,
6920 CMSBitMap* bitMap, CMSMarkStack* markStack,
6921 CMSMarkStack* revisitStack,
6922 bool should_yield, bool verifying):
6923 _collector(collector),
6924 _span(span),
6925 _bitMap(bitMap),
6926 _mut(&collector->_modUnionTable),
6927 _markStack(markStack),
6928 _revisitStack(revisitStack),
6929 _yield(should_yield),
6930 _skipBits(0)
6931 {
6932 assert(_markStack->isEmpty(), "stack should be empty");
6933 _finger = _bitMap->startWord();
6934 _threshold = _finger;
6935 assert(_collector->_restart_addr == NULL, "Sanity check");
6936 assert(_span.contains(_finger), "Out of bounds _finger?");
6937 DEBUG_ONLY(_verifying = verifying;)
6938 }
6940 void MarkFromRootsClosure::reset(HeapWord* addr) {
6941 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6942 assert(_span.contains(addr), "Out of bounds _finger?");
6943 _finger = addr;
6944 _threshold = (HeapWord*)round_to(
6945 (intptr_t)_finger, CardTableModRefBS::card_size);
6946 }
6948 // Should revisit to see if this should be restructured for
6949 // greater efficiency.
6950 bool MarkFromRootsClosure::do_bit(size_t offset) {
6951 if (_skipBits > 0) {
6952 _skipBits--;
6953 return true;
6954 }
6955 // convert offset into a HeapWord*
6956 HeapWord* addr = _bitMap->startWord() + offset;
6957 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6958 "address out of range");
6959 assert(_bitMap->isMarked(addr), "tautology");
6960 if (_bitMap->isMarked(addr+1)) {
6961 // this is an allocated but not yet initialized object
6962 assert(_skipBits == 0, "tautology");
6963 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6964 oop p = oop(addr);
6965 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6966 DEBUG_ONLY(if (!_verifying) {)
6967 // We re-dirty the cards on which this object lies and increase
6968 // the _threshold so that we'll come back to scan this object
6969 // during the preclean or remark phase. (CMSCleanOnEnter)
6970 if (CMSCleanOnEnter) {
6971 size_t sz = _collector->block_size_using_printezis_bits(addr);
6972 HeapWord* end_card_addr = (HeapWord*)round_to(
6973 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6974 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6975 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6976 // Bump _threshold to end_card_addr; note that
6977 // _threshold cannot possibly exceed end_card_addr, anyhow.
6978 // This prevents future clearing of the card as the scan proceeds
6979 // to the right.
6980 assert(_threshold <= end_card_addr,
6981 "Because we are just scanning into this object");
6982 if (_threshold < end_card_addr) {
6983 _threshold = end_card_addr;
6984 }
6985 if (p->klass_or_null() != NULL) {
6986 // Redirty the range of cards...
6987 _mut->mark_range(redirty_range);
6988 } // ...else the setting of klass will dirty the card anyway.
6989 }
6990 DEBUG_ONLY(})
6991 return true;
6992 }
6993 }
6994 scanOopsInOop(addr);
6995 return true;
6996 }
6998 // We take a break if we've been at this for a while,
6999 // so as to avoid monopolizing the locks involved.
7000 void MarkFromRootsClosure::do_yield_work() {
7001 // First give up the locks, then yield, then re-lock
7002 // We should probably use a constructor/destructor idiom to
7003 // do this unlock/lock or modify the MutexUnlocker class to
7004 // serve our purpose. XXX
7005 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7006 "CMS thread should hold CMS token");
7007 assert_lock_strong(_bitMap->lock());
7008 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7009 _bitMap->lock()->unlock();
7010 ConcurrentMarkSweepThread::desynchronize(true);
7011 ConcurrentMarkSweepThread::acknowledge_yield_request();
7012 _collector->stopTimer();
7013 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7014 if (PrintCMSStatistics != 0) {
7015 _collector->incrementYields();
7016 }
7017 _collector->icms_wait();
7019 // See the comment in coordinator_yield()
7020 for (unsigned i = 0; i < CMSYieldSleepCount &&
7021 ConcurrentMarkSweepThread::should_yield() &&
7022 !CMSCollector::foregroundGCIsActive(); ++i) {
7023 os::sleep(Thread::current(), 1, false);
7024 ConcurrentMarkSweepThread::acknowledge_yield_request();
7025 }
7027 ConcurrentMarkSweepThread::synchronize(true);
7028 _bitMap->lock()->lock_without_safepoint_check();
7029 _collector->startTimer();
7030 }
7032 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7033 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7034 assert(_markStack->isEmpty(),
7035 "should drain stack to limit stack usage");
7036 // convert ptr to an oop preparatory to scanning
7037 oop obj = oop(ptr);
7038 // Ignore mark word in verification below, since we
7039 // may be running concurrent with mutators.
7040 assert(obj->is_oop(true), "should be an oop");
7041 assert(_finger <= ptr, "_finger runneth ahead");
7042 // advance the finger to right end of this object
7043 _finger = ptr + obj->size();
7044 assert(_finger > ptr, "we just incremented it above");
7045 // On large heaps, it may take us some time to get through
7046 // the marking phase (especially if running iCMS). During
7047 // this time it's possible that a lot of mutations have
7048 // accumulated in the card table and the mod union table --
7049 // these mutation records are redundant until we have
7050 // actually traced into the corresponding card.
7051 // Here, we check whether advancing the finger would make
7052 // us cross into a new card, and if so clear corresponding
7053 // cards in the MUT (preclean them in the card-table in the
7054 // future).
7056 DEBUG_ONLY(if (!_verifying) {)
7057 // The clean-on-enter optimization is disabled by default,
7058 // until we fix 6178663.
7059 if (CMSCleanOnEnter && (_finger > _threshold)) {
7060 // [_threshold, _finger) represents the interval
7061 // of cards to be cleared in MUT (or precleaned in card table).
7062 // The set of cards to be cleared is all those that overlap
7063 // with the interval [_threshold, _finger); note that
7064 // _threshold is always kept card-aligned but _finger isn't
7065 // always card-aligned.
7066 HeapWord* old_threshold = _threshold;
7067 assert(old_threshold == (HeapWord*)round_to(
7068 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7069 "_threshold should always be card-aligned");
7070 _threshold = (HeapWord*)round_to(
7071 (intptr_t)_finger, CardTableModRefBS::card_size);
7072 MemRegion mr(old_threshold, _threshold);
7073 assert(!mr.is_empty(), "Control point invariant");
7074 assert(_span.contains(mr), "Should clear within span");
7075 // XXX When _finger crosses from old gen into perm gen
7076 // we may be doing unnecessary cleaning; do better in the
7077 // future by detecting that condition and clearing fewer
7078 // MUT/CT entries.
7079 _mut->clear_range(mr);
7080 }
7081 DEBUG_ONLY(})
7082 // Note: the finger doesn't advance while we drain
7083 // the stack below.
7084 PushOrMarkClosure pushOrMarkClosure(_collector,
7085 _span, _bitMap, _markStack,
7086 _revisitStack,
7087 _finger, this);
7088 bool res = _markStack->push(obj);
7089 assert(res, "Empty non-zero size stack should have space for single push");
7090 while (!_markStack->isEmpty()) {
7091 oop new_oop = _markStack->pop();
7092 // Skip verifying header mark word below because we are
7093 // running concurrent with mutators.
7094 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7095 // now scan this oop's oops
7096 new_oop->oop_iterate(&pushOrMarkClosure);
7097 do_yield_check();
7098 }
7099 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7100 }
7102 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7103 CMSCollector* collector, MemRegion span,
7104 CMSBitMap* bit_map,
7105 OopTaskQueue* work_queue,
7106 CMSMarkStack* overflow_stack,
7107 CMSMarkStack* revisit_stack,
7108 bool should_yield):
7109 _collector(collector),
7110 _whole_span(collector->_span),
7111 _span(span),
7112 _bit_map(bit_map),
7113 _mut(&collector->_modUnionTable),
7114 _work_queue(work_queue),
7115 _overflow_stack(overflow_stack),
7116 _revisit_stack(revisit_stack),
7117 _yield(should_yield),
7118 _skip_bits(0),
7119 _task(task)
7120 {
7121 assert(_work_queue->size() == 0, "work_queue should be empty");
7122 _finger = span.start();
7123 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7124 assert(_span.contains(_finger), "Out of bounds _finger?");
7125 }
7127 // Should revisit to see if this should be restructured for
7128 // greater efficiency.
7129 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7130 if (_skip_bits > 0) {
7131 _skip_bits--;
7132 return true;
7133 }
7134 // convert offset into a HeapWord*
7135 HeapWord* addr = _bit_map->startWord() + offset;
7136 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7137 "address out of range");
7138 assert(_bit_map->isMarked(addr), "tautology");
7139 if (_bit_map->isMarked(addr+1)) {
7140 // this is an allocated object that might not yet be initialized
7141 assert(_skip_bits == 0, "tautology");
7142 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7143 oop p = oop(addr);
7144 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7145 // in the case of Clean-on-Enter optimization, redirty card
7146 // and avoid clearing card by increasing the threshold.
7147 return true;
7148 }
7149 }
7150 scan_oops_in_oop(addr);
7151 return true;
7152 }
7154 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7155 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7156 // Should we assert that our work queue is empty or
7157 // below some drain limit?
7158 assert(_work_queue->size() == 0,
7159 "should drain stack to limit stack usage");
7160 // convert ptr to an oop preparatory to scanning
7161 oop obj = oop(ptr);
7162 // Ignore mark word in verification below, since we
7163 // may be running concurrent with mutators.
7164 assert(obj->is_oop(true), "should be an oop");
7165 assert(_finger <= ptr, "_finger runneth ahead");
7166 // advance the finger to right end of this object
7167 _finger = ptr + obj->size();
7168 assert(_finger > ptr, "we just incremented it above");
7169 // On large heaps, it may take us some time to get through
7170 // the marking phase (especially if running iCMS). During
7171 // this time it's possible that a lot of mutations have
7172 // accumulated in the card table and the mod union table --
7173 // these mutation records are redundant until we have
7174 // actually traced into the corresponding card.
7175 // Here, we check whether advancing the finger would make
7176 // us cross into a new card, and if so clear corresponding
7177 // cards in the MUT (preclean them in the card-table in the
7178 // future).
7180 // The clean-on-enter optimization is disabled by default,
7181 // until we fix 6178663.
7182 if (CMSCleanOnEnter && (_finger > _threshold)) {
7183 // [_threshold, _finger) represents the interval
7184 // of cards to be cleared in MUT (or precleaned in card table).
7185 // The set of cards to be cleared is all those that overlap
7186 // with the interval [_threshold, _finger); note that
7187 // _threshold is always kept card-aligned but _finger isn't
7188 // always card-aligned.
7189 HeapWord* old_threshold = _threshold;
7190 assert(old_threshold == (HeapWord*)round_to(
7191 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7192 "_threshold should always be card-aligned");
7193 _threshold = (HeapWord*)round_to(
7194 (intptr_t)_finger, CardTableModRefBS::card_size);
7195 MemRegion mr(old_threshold, _threshold);
7196 assert(!mr.is_empty(), "Control point invariant");
7197 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7198 // XXX When _finger crosses from old gen into perm gen
7199 // we may be doing unnecessary cleaning; do better in the
7200 // future by detecting that condition and clearing fewer
7201 // MUT/CT entries.
7202 _mut->clear_range(mr);
7203 }
7205 // Note: the local finger doesn't advance while we drain
7206 // the stack below, but the global finger sure can and will.
7207 HeapWord** gfa = _task->global_finger_addr();
7208 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7209 _span, _bit_map,
7210 _work_queue,
7211 _overflow_stack,
7212 _revisit_stack,
7213 _finger,
7214 gfa, this);
7215 bool res = _work_queue->push(obj); // overflow could occur here
7216 assert(res, "Will hold once we use workqueues");
7217 while (true) {
7218 oop new_oop;
7219 if (!_work_queue->pop_local(new_oop)) {
7220 // We emptied our work_queue; check if there's stuff that can
7221 // be gotten from the overflow stack.
7222 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7223 _overflow_stack, _work_queue)) {
7224 do_yield_check();
7225 continue;
7226 } else { // done
7227 break;
7228 }
7229 }
7230 // Skip verifying header mark word below because we are
7231 // running concurrent with mutators.
7232 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7233 // now scan this oop's oops
7234 new_oop->oop_iterate(&pushOrMarkClosure);
7235 do_yield_check();
7236 }
7237 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7238 }
7240 // Yield in response to a request from VM Thread or
7241 // from mutators.
7242 void Par_MarkFromRootsClosure::do_yield_work() {
7243 assert(_task != NULL, "sanity");
7244 _task->yield();
7245 }
7247 // A variant of the above used for verifying CMS marking work.
7248 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7249 MemRegion span,
7250 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7251 CMSMarkStack* mark_stack):
7252 _collector(collector),
7253 _span(span),
7254 _verification_bm(verification_bm),
7255 _cms_bm(cms_bm),
7256 _mark_stack(mark_stack),
7257 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7258 mark_stack)
7259 {
7260 assert(_mark_stack->isEmpty(), "stack should be empty");
7261 _finger = _verification_bm->startWord();
7262 assert(_collector->_restart_addr == NULL, "Sanity check");
7263 assert(_span.contains(_finger), "Out of bounds _finger?");
7264 }
7266 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7267 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7268 assert(_span.contains(addr), "Out of bounds _finger?");
7269 _finger = addr;
7270 }
7272 // Should revisit to see if this should be restructured for
7273 // greater efficiency.
7274 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7275 // convert offset into a HeapWord*
7276 HeapWord* addr = _verification_bm->startWord() + offset;
7277 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7278 "address out of range");
7279 assert(_verification_bm->isMarked(addr), "tautology");
7280 assert(_cms_bm->isMarked(addr), "tautology");
7282 assert(_mark_stack->isEmpty(),
7283 "should drain stack to limit stack usage");
7284 // convert addr to an oop preparatory to scanning
7285 oop obj = oop(addr);
7286 assert(obj->is_oop(), "should be an oop");
7287 assert(_finger <= addr, "_finger runneth ahead");
7288 // advance the finger to right end of this object
7289 _finger = addr + obj->size();
7290 assert(_finger > addr, "we just incremented it above");
7291 // Note: the finger doesn't advance while we drain
7292 // the stack below.
7293 bool res = _mark_stack->push(obj);
7294 assert(res, "Empty non-zero size stack should have space for single push");
7295 while (!_mark_stack->isEmpty()) {
7296 oop new_oop = _mark_stack->pop();
7297 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7298 // now scan this oop's oops
7299 new_oop->oop_iterate(&_pam_verify_closure);
7300 }
7301 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7302 return true;
7303 }
7305 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7306 CMSCollector* collector, MemRegion span,
7307 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7308 CMSMarkStack* mark_stack):
7309 OopClosure(collector->ref_processor()),
7310 _collector(collector),
7311 _span(span),
7312 _verification_bm(verification_bm),
7313 _cms_bm(cms_bm),
7314 _mark_stack(mark_stack)
7315 { }
7317 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7318 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7320 // Upon stack overflow, we discard (part of) the stack,
7321 // remembering the least address amongst those discarded
7322 // in CMSCollector's _restart_address.
7323 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7324 // Remember the least grey address discarded
7325 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7326 _collector->lower_restart_addr(ra);
7327 _mark_stack->reset(); // discard stack contents
7328 _mark_stack->expand(); // expand the stack if possible
7329 }
7331 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7332 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7333 HeapWord* addr = (HeapWord*)obj;
7334 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7335 // Oop lies in _span and isn't yet grey or black
7336 _verification_bm->mark(addr); // now grey
7337 if (!_cms_bm->isMarked(addr)) {
7338 oop(addr)->print();
7339 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7340 addr);
7341 fatal("... aborting");
7342 }
7344 if (!_mark_stack->push(obj)) { // stack overflow
7345 if (PrintCMSStatistics != 0) {
7346 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7347 SIZE_FORMAT, _mark_stack->capacity());
7348 }
7349 assert(_mark_stack->isFull(), "Else push should have succeeded");
7350 handle_stack_overflow(addr);
7351 }
7352 // anything including and to the right of _finger
7353 // will be scanned as we iterate over the remainder of the
7354 // bit map
7355 }
7356 }
7358 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7359 MemRegion span,
7360 CMSBitMap* bitMap, CMSMarkStack* markStack,
7361 CMSMarkStack* revisitStack,
7362 HeapWord* finger, MarkFromRootsClosure* parent) :
7363 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7364 _span(span),
7365 _bitMap(bitMap),
7366 _markStack(markStack),
7367 _finger(finger),
7368 _parent(parent)
7369 { }
7371 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7372 MemRegion span,
7373 CMSBitMap* bit_map,
7374 OopTaskQueue* work_queue,
7375 CMSMarkStack* overflow_stack,
7376 CMSMarkStack* revisit_stack,
7377 HeapWord* finger,
7378 HeapWord** global_finger_addr,
7379 Par_MarkFromRootsClosure* parent) :
7380 Par_KlassRememberingOopClosure(collector,
7381 collector->ref_processor(),
7382 revisit_stack),
7383 _whole_span(collector->_span),
7384 _span(span),
7385 _bit_map(bit_map),
7386 _work_queue(work_queue),
7387 _overflow_stack(overflow_stack),
7388 _finger(finger),
7389 _global_finger_addr(global_finger_addr),
7390 _parent(parent)
7391 { }
7393 // Assumes thread-safe access by callers, who are
7394 // responsible for mutual exclusion.
7395 void CMSCollector::lower_restart_addr(HeapWord* low) {
7396 assert(_span.contains(low), "Out of bounds addr");
7397 if (_restart_addr == NULL) {
7398 _restart_addr = low;
7399 } else {
7400 _restart_addr = MIN2(_restart_addr, low);
7401 }
7402 }
7404 // Upon stack overflow, we discard (part of) the stack,
7405 // remembering the least address amongst those discarded
7406 // in CMSCollector's _restart_address.
7407 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7408 // Remember the least grey address discarded
7409 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7410 _collector->lower_restart_addr(ra);
7411 _markStack->reset(); // discard stack contents
7412 _markStack->expand(); // expand the stack if possible
7413 }
7415 // Upon stack overflow, we discard (part of) the stack,
7416 // remembering the least address amongst those discarded
7417 // in CMSCollector's _restart_address.
7418 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7419 // We need to do this under a mutex to prevent other
7420 // workers from interfering with the work done below.
7421 MutexLockerEx ml(_overflow_stack->par_lock(),
7422 Mutex::_no_safepoint_check_flag);
7423 // Remember the least grey address discarded
7424 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7425 _collector->lower_restart_addr(ra);
7426 _overflow_stack->reset(); // discard stack contents
7427 _overflow_stack->expand(); // expand the stack if possible
7428 }
7430 void PushOrMarkClosure::do_oop(oop obj) {
7431 // Ignore mark word because we are running concurrent with mutators.
7432 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7433 HeapWord* addr = (HeapWord*)obj;
7434 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7435 // Oop lies in _span and isn't yet grey or black
7436 _bitMap->mark(addr); // now grey
7437 if (addr < _finger) {
7438 // the bit map iteration has already either passed, or
7439 // sampled, this bit in the bit map; we'll need to
7440 // use the marking stack to scan this oop's oops.
7441 bool simulate_overflow = false;
7442 NOT_PRODUCT(
7443 if (CMSMarkStackOverflowALot &&
7444 _collector->simulate_overflow()) {
7445 // simulate a stack overflow
7446 simulate_overflow = true;
7447 }
7448 )
7449 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7450 if (PrintCMSStatistics != 0) {
7451 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7452 SIZE_FORMAT, _markStack->capacity());
7453 }
7454 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7455 handle_stack_overflow(addr);
7456 }
7457 }
7458 // anything including and to the right of _finger
7459 // will be scanned as we iterate over the remainder of the
7460 // bit map
7461 do_yield_check();
7462 }
7463 }
7465 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7466 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7468 void Par_PushOrMarkClosure::do_oop(oop obj) {
7469 // Ignore mark word because we are running concurrent with mutators.
7470 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7471 HeapWord* addr = (HeapWord*)obj;
7472 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7473 // Oop lies in _span and isn't yet grey or black
7474 // We read the global_finger (volatile read) strictly after marking oop
7475 bool res = _bit_map->par_mark(addr); // now grey
7476 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7477 // Should we push this marked oop on our stack?
7478 // -- if someone else marked it, nothing to do
7479 // -- if target oop is above global finger nothing to do
7480 // -- if target oop is in chunk and above local finger
7481 // then nothing to do
7482 // -- else push on work queue
7483 if ( !res // someone else marked it, they will deal with it
7484 || (addr >= *gfa) // will be scanned in a later task
7485 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7486 return;
7487 }
7488 // the bit map iteration has already either passed, or
7489 // sampled, this bit in the bit map; we'll need to
7490 // use the marking stack to scan this oop's oops.
7491 bool simulate_overflow = false;
7492 NOT_PRODUCT(
7493 if (CMSMarkStackOverflowALot &&
7494 _collector->simulate_overflow()) {
7495 // simulate a stack overflow
7496 simulate_overflow = true;
7497 }
7498 )
7499 if (simulate_overflow ||
7500 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7501 // stack overflow
7502 if (PrintCMSStatistics != 0) {
7503 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7504 SIZE_FORMAT, _overflow_stack->capacity());
7505 }
7506 // We cannot assert that the overflow stack is full because
7507 // it may have been emptied since.
7508 assert(simulate_overflow ||
7509 _work_queue->size() == _work_queue->max_elems(),
7510 "Else push should have succeeded");
7511 handle_stack_overflow(addr);
7512 }
7513 do_yield_check();
7514 }
7515 }
7517 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7518 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7520 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7521 ReferenceProcessor* rp,
7522 CMSMarkStack* revisit_stack) :
7523 OopClosure(rp),
7524 _collector(collector),
7525 _revisit_stack(revisit_stack),
7526 _should_remember_klasses(collector->should_unload_classes()) {}
7528 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7529 MemRegion span,
7530 ReferenceProcessor* rp,
7531 CMSBitMap* bit_map,
7532 CMSBitMap* mod_union_table,
7533 CMSMarkStack* mark_stack,
7534 CMSMarkStack* revisit_stack,
7535 bool concurrent_precleaning):
7536 KlassRememberingOopClosure(collector, rp, revisit_stack),
7537 _span(span),
7538 _bit_map(bit_map),
7539 _mod_union_table(mod_union_table),
7540 _mark_stack(mark_stack),
7541 _concurrent_precleaning(concurrent_precleaning)
7542 {
7543 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7544 }
7546 // Grey object rescan during pre-cleaning and second checkpoint phases --
7547 // the non-parallel version (the parallel version appears further below.)
7548 void PushAndMarkClosure::do_oop(oop obj) {
7549 // Ignore mark word verification. If during concurrent precleaning,
7550 // the object monitor may be locked. If during the checkpoint
7551 // phases, the object may already have been reached by a different
7552 // path and may be at the end of the global overflow list (so
7553 // the mark word may be NULL).
7554 assert(obj->is_oop_or_null(true /* ignore mark word */),
7555 "expected an oop or NULL");
7556 HeapWord* addr = (HeapWord*)obj;
7557 // Check if oop points into the CMS generation
7558 // and is not marked
7559 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7560 // a white object ...
7561 _bit_map->mark(addr); // ... now grey
7562 // push on the marking stack (grey set)
7563 bool simulate_overflow = false;
7564 NOT_PRODUCT(
7565 if (CMSMarkStackOverflowALot &&
7566 _collector->simulate_overflow()) {
7567 // simulate a stack overflow
7568 simulate_overflow = true;
7569 }
7570 )
7571 if (simulate_overflow || !_mark_stack->push(obj)) {
7572 if (_concurrent_precleaning) {
7573 // During precleaning we can just dirty the appropriate card(s)
7574 // in the mod union table, thus ensuring that the object remains
7575 // in the grey set and continue. In the case of object arrays
7576 // we need to dirty all of the cards that the object spans,
7577 // since the rescan of object arrays will be limited to the
7578 // dirty cards.
7579 // Note that no one can be intefering with us in this action
7580 // of dirtying the mod union table, so no locking or atomics
7581 // are required.
7582 if (obj->is_objArray()) {
7583 size_t sz = obj->size();
7584 HeapWord* end_card_addr = (HeapWord*)round_to(
7585 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7586 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7587 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7588 _mod_union_table->mark_range(redirty_range);
7589 } else {
7590 _mod_union_table->mark(addr);
7591 }
7592 _collector->_ser_pmc_preclean_ovflw++;
7593 } else {
7594 // During the remark phase, we need to remember this oop
7595 // in the overflow list.
7596 _collector->push_on_overflow_list(obj);
7597 _collector->_ser_pmc_remark_ovflw++;
7598 }
7599 }
7600 }
7601 }
7603 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7604 MemRegion span,
7605 ReferenceProcessor* rp,
7606 CMSBitMap* bit_map,
7607 OopTaskQueue* work_queue,
7608 CMSMarkStack* revisit_stack):
7609 Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7610 _span(span),
7611 _bit_map(bit_map),
7612 _work_queue(work_queue)
7613 {
7614 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7615 }
7617 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7618 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7620 // Grey object rescan during second checkpoint phase --
7621 // the parallel version.
7622 void Par_PushAndMarkClosure::do_oop(oop obj) {
7623 // In the assert below, we ignore the mark word because
7624 // this oop may point to an already visited object that is
7625 // on the overflow stack (in which case the mark word has
7626 // been hijacked for chaining into the overflow stack --
7627 // if this is the last object in the overflow stack then
7628 // its mark word will be NULL). Because this object may
7629 // have been subsequently popped off the global overflow
7630 // stack, and the mark word possibly restored to the prototypical
7631 // value, by the time we get to examined this failing assert in
7632 // the debugger, is_oop_or_null(false) may subsequently start
7633 // to hold.
7634 assert(obj->is_oop_or_null(true),
7635 "expected an oop or NULL");
7636 HeapWord* addr = (HeapWord*)obj;
7637 // Check if oop points into the CMS generation
7638 // and is not marked
7639 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7640 // a white object ...
7641 // If we manage to "claim" the object, by being the
7642 // first thread to mark it, then we push it on our
7643 // marking stack
7644 if (_bit_map->par_mark(addr)) { // ... now grey
7645 // push on work queue (grey set)
7646 bool simulate_overflow = false;
7647 NOT_PRODUCT(
7648 if (CMSMarkStackOverflowALot &&
7649 _collector->par_simulate_overflow()) {
7650 // simulate a stack overflow
7651 simulate_overflow = true;
7652 }
7653 )
7654 if (simulate_overflow || !_work_queue->push(obj)) {
7655 _collector->par_push_on_overflow_list(obj);
7656 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7657 }
7658 } // Else, some other thread got there first
7659 }
7660 }
7662 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7663 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7665 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7666 // TBD
7667 }
7669 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7670 // TBD
7671 }
7673 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7674 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7675 Mutex* bml = _collector->bitMapLock();
7676 assert_lock_strong(bml);
7677 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7678 "CMS thread should hold CMS token");
7680 bml->unlock();
7681 ConcurrentMarkSweepThread::desynchronize(true);
7683 ConcurrentMarkSweepThread::acknowledge_yield_request();
7685 _collector->stopTimer();
7686 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7687 if (PrintCMSStatistics != 0) {
7688 _collector->incrementYields();
7689 }
7690 _collector->icms_wait();
7692 // See the comment in coordinator_yield()
7693 for (unsigned i = 0; i < CMSYieldSleepCount &&
7694 ConcurrentMarkSweepThread::should_yield() &&
7695 !CMSCollector::foregroundGCIsActive(); ++i) {
7696 os::sleep(Thread::current(), 1, false);
7697 ConcurrentMarkSweepThread::acknowledge_yield_request();
7698 }
7700 ConcurrentMarkSweepThread::synchronize(true);
7701 bml->lock();
7703 _collector->startTimer();
7704 }
7706 bool CMSPrecleanRefsYieldClosure::should_return() {
7707 if (ConcurrentMarkSweepThread::should_yield()) {
7708 do_yield_work();
7709 }
7710 return _collector->foregroundGCIsActive();
7711 }
7713 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7714 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7715 "mr should be aligned to start at a card boundary");
7716 // We'd like to assert:
7717 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7718 // "mr should be a range of cards");
7719 // However, that would be too strong in one case -- the last
7720 // partition ends at _unallocated_block which, in general, can be
7721 // an arbitrary boundary, not necessarily card aligned.
7722 if (PrintCMSStatistics != 0) {
7723 _num_dirty_cards +=
7724 mr.word_size()/CardTableModRefBS::card_size_in_words;
7725 }
7726 _space->object_iterate_mem(mr, &_scan_cl);
7727 }
7729 SweepClosure::SweepClosure(CMSCollector* collector,
7730 ConcurrentMarkSweepGeneration* g,
7731 CMSBitMap* bitMap, bool should_yield) :
7732 _collector(collector),
7733 _g(g),
7734 _sp(g->cmsSpace()),
7735 _limit(_sp->sweep_limit()),
7736 _freelistLock(_sp->freelistLock()),
7737 _bitMap(bitMap),
7738 _yield(should_yield),
7739 _inFreeRange(false), // No free range at beginning of sweep
7740 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7741 _lastFreeRangeCoalesced(false),
7742 _freeFinger(g->used_region().start())
7743 {
7744 NOT_PRODUCT(
7745 _numObjectsFreed = 0;
7746 _numWordsFreed = 0;
7747 _numObjectsLive = 0;
7748 _numWordsLive = 0;
7749 _numObjectsAlreadyFree = 0;
7750 _numWordsAlreadyFree = 0;
7751 _last_fc = NULL;
7753 _sp->initializeIndexedFreeListArrayReturnedBytes();
7754 _sp->dictionary()->initializeDictReturnedBytes();
7755 )
7756 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7757 "sweep _limit out of bounds");
7758 if (CMSTraceSweeper) {
7759 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7760 }
7761 }
7763 // We need this destructor to reclaim any space at the end
7764 // of the space, which do_blk below may not have added back to
7765 // the free lists. [basically dealing with the "fringe effect"]
7766 SweepClosure::~SweepClosure() {
7767 assert_lock_strong(_freelistLock);
7768 // this should be treated as the end of a free run if any
7769 // The current free range should be returned to the free lists
7770 // as one coalesced chunk.
7771 if (inFreeRange()) {
7772 flushCurFreeChunk(freeFinger(),
7773 pointer_delta(_limit, freeFinger()));
7774 assert(freeFinger() < _limit, "the finger pointeth off base");
7775 if (CMSTraceSweeper) {
7776 gclog_or_tty->print("destructor:");
7777 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7778 "[coalesced:"SIZE_FORMAT"]\n",
7779 freeFinger(), pointer_delta(_limit, freeFinger()),
7780 lastFreeRangeCoalesced());
7781 }
7782 }
7783 NOT_PRODUCT(
7784 if (Verbose && PrintGC) {
7785 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7786 SIZE_FORMAT " bytes",
7787 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7788 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7789 SIZE_FORMAT" bytes "
7790 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7791 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7792 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7793 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7794 sizeof(HeapWord);
7795 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7797 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7798 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7799 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7800 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7801 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7802 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7803 indexListReturnedBytes);
7804 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7805 dictReturnedBytes);
7806 }
7807 }
7808 )
7809 // Now, in debug mode, just null out the sweep_limit
7810 NOT_PRODUCT(_sp->clear_sweep_limit();)
7811 if (CMSTraceSweeper) {
7812 gclog_or_tty->print("end of sweep\n================\n");
7813 }
7814 }
7816 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7817 bool freeRangeInFreeLists) {
7818 if (CMSTraceSweeper) {
7819 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7820 freeFinger, _sp->block_size(freeFinger),
7821 freeRangeInFreeLists);
7822 }
7823 assert(!inFreeRange(), "Trampling existing free range");
7824 set_inFreeRange(true);
7825 set_lastFreeRangeCoalesced(false);
7827 set_freeFinger(freeFinger);
7828 set_freeRangeInFreeLists(freeRangeInFreeLists);
7829 if (CMSTestInFreeList) {
7830 if (freeRangeInFreeLists) {
7831 FreeChunk* fc = (FreeChunk*) freeFinger;
7832 assert(fc->isFree(), "A chunk on the free list should be free.");
7833 assert(fc->size() > 0, "Free range should have a size");
7834 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7835 }
7836 }
7837 }
7839 // Note that the sweeper runs concurrently with mutators. Thus,
7840 // it is possible for direct allocation in this generation to happen
7841 // in the middle of the sweep. Note that the sweeper also coalesces
7842 // contiguous free blocks. Thus, unless the sweeper and the allocator
7843 // synchronize appropriately freshly allocated blocks may get swept up.
7844 // This is accomplished by the sweeper locking the free lists while
7845 // it is sweeping. Thus blocks that are determined to be free are
7846 // indeed free. There is however one additional complication:
7847 // blocks that have been allocated since the final checkpoint and
7848 // mark, will not have been marked and so would be treated as
7849 // unreachable and swept up. To prevent this, the allocator marks
7850 // the bit map when allocating during the sweep phase. This leads,
7851 // however, to a further complication -- objects may have been allocated
7852 // but not yet initialized -- in the sense that the header isn't yet
7853 // installed. The sweeper can not then determine the size of the block
7854 // in order to skip over it. To deal with this case, we use a technique
7855 // (due to Printezis) to encode such uninitialized block sizes in the
7856 // bit map. Since the bit map uses a bit per every HeapWord, but the
7857 // CMS generation has a minimum object size of 3 HeapWords, it follows
7858 // that "normal marks" won't be adjacent in the bit map (there will
7859 // always be at least two 0 bits between successive 1 bits). We make use
7860 // of these "unused" bits to represent uninitialized blocks -- the bit
7861 // corresponding to the start of the uninitialized object and the next
7862 // bit are both set. Finally, a 1 bit marks the end of the object that
7863 // started with the two consecutive 1 bits to indicate its potentially
7864 // uninitialized state.
7866 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7867 FreeChunk* fc = (FreeChunk*)addr;
7868 size_t res;
7870 // check if we are done sweepinrg
7871 if (addr == _limit) { // we have swept up to the limit, do nothing more
7872 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7873 "sweep _limit out of bounds");
7874 // help the closure application finish
7875 return pointer_delta(_sp->end(), _limit);
7876 }
7877 assert(addr <= _limit, "sweep invariant");
7879 // check if we should yield
7880 do_yield_check(addr);
7881 if (fc->isFree()) {
7882 // Chunk that is already free
7883 res = fc->size();
7884 doAlreadyFreeChunk(fc);
7885 debug_only(_sp->verifyFreeLists());
7886 assert(res == fc->size(), "Don't expect the size to change");
7887 NOT_PRODUCT(
7888 _numObjectsAlreadyFree++;
7889 _numWordsAlreadyFree += res;
7890 )
7891 NOT_PRODUCT(_last_fc = fc;)
7892 } else if (!_bitMap->isMarked(addr)) {
7893 // Chunk is fresh garbage
7894 res = doGarbageChunk(fc);
7895 debug_only(_sp->verifyFreeLists());
7896 NOT_PRODUCT(
7897 _numObjectsFreed++;
7898 _numWordsFreed += res;
7899 )
7900 } else {
7901 // Chunk that is alive.
7902 res = doLiveChunk(fc);
7903 debug_only(_sp->verifyFreeLists());
7904 NOT_PRODUCT(
7905 _numObjectsLive++;
7906 _numWordsLive += res;
7907 )
7908 }
7909 return res;
7910 }
7912 // For the smart allocation, record following
7913 // split deaths - a free chunk is removed from its free list because
7914 // it is being split into two or more chunks.
7915 // split birth - a free chunk is being added to its free list because
7916 // a larger free chunk has been split and resulted in this free chunk.
7917 // coal death - a free chunk is being removed from its free list because
7918 // it is being coalesced into a large free chunk.
7919 // coal birth - a free chunk is being added to its free list because
7920 // it was created when two or more free chunks where coalesced into
7921 // this free chunk.
7922 //
7923 // These statistics are used to determine the desired number of free
7924 // chunks of a given size. The desired number is chosen to be relative
7925 // to the end of a CMS sweep. The desired number at the end of a sweep
7926 // is the
7927 // count-at-end-of-previous-sweep (an amount that was enough)
7928 // - count-at-beginning-of-current-sweep (the excess)
7929 // + split-births (gains in this size during interval)
7930 // - split-deaths (demands on this size during interval)
7931 // where the interval is from the end of one sweep to the end of the
7932 // next.
7933 //
7934 // When sweeping the sweeper maintains an accumulated chunk which is
7935 // the chunk that is made up of chunks that have been coalesced. That
7936 // will be termed the left-hand chunk. A new chunk of garbage that
7937 // is being considered for coalescing will be referred to as the
7938 // right-hand chunk.
7939 //
7940 // When making a decision on whether to coalesce a right-hand chunk with
7941 // the current left-hand chunk, the current count vs. the desired count
7942 // of the left-hand chunk is considered. Also if the right-hand chunk
7943 // is near the large chunk at the end of the heap (see
7944 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7945 // left-hand chunk is coalesced.
7946 //
7947 // When making a decision about whether to split a chunk, the desired count
7948 // vs. the current count of the candidate to be split is also considered.
7949 // If the candidate is underpopulated (currently fewer chunks than desired)
7950 // a chunk of an overpopulated (currently more chunks than desired) size may
7951 // be chosen. The "hint" associated with a free list, if non-null, points
7952 // to a free list which may be overpopulated.
7953 //
7955 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7956 size_t size = fc->size();
7957 // Chunks that cannot be coalesced are not in the
7958 // free lists.
7959 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7960 assert(_sp->verifyChunkInFreeLists(fc),
7961 "free chunk should be in free lists");
7962 }
7963 // a chunk that is already free, should not have been
7964 // marked in the bit map
7965 HeapWord* addr = (HeapWord*) fc;
7966 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7967 // Verify that the bit map has no bits marked between
7968 // addr and purported end of this block.
7969 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7971 // Some chunks cannot be coalesced in under any circumstances.
7972 // See the definition of cantCoalesce().
7973 if (!fc->cantCoalesce()) {
7974 // This chunk can potentially be coalesced.
7975 if (_sp->adaptive_freelists()) {
7976 // All the work is done in
7977 doPostIsFreeOrGarbageChunk(fc, size);
7978 } else { // Not adaptive free lists
7979 // this is a free chunk that can potentially be coalesced by the sweeper;
7980 if (!inFreeRange()) {
7981 // if the next chunk is a free block that can't be coalesced
7982 // it doesn't make sense to remove this chunk from the free lists
7983 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7984 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7985 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7986 nextChunk->isFree() && // which is free...
7987 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7988 // nothing to do
7989 } else {
7990 // Potentially the start of a new free range:
7991 // Don't eagerly remove it from the free lists.
7992 // No need to remove it if it will just be put
7993 // back again. (Also from a pragmatic point of view
7994 // if it is a free block in a region that is beyond
7995 // any allocated blocks, an assertion will fail)
7996 // Remember the start of a free run.
7997 initialize_free_range(addr, true);
7998 // end - can coalesce with next chunk
7999 }
8000 } else {
8001 // the midst of a free range, we are coalescing
8002 debug_only(record_free_block_coalesced(fc);)
8003 if (CMSTraceSweeper) {
8004 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8005 }
8006 // remove it from the free lists
8007 _sp->removeFreeChunkFromFreeLists(fc);
8008 set_lastFreeRangeCoalesced(true);
8009 // If the chunk is being coalesced and the current free range is
8010 // in the free lists, remove the current free range so that it
8011 // will be returned to the free lists in its entirety - all
8012 // the coalesced pieces included.
8013 if (freeRangeInFreeLists()) {
8014 FreeChunk* ffc = (FreeChunk*) freeFinger();
8015 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8016 "Size of free range is inconsistent with chunk size.");
8017 if (CMSTestInFreeList) {
8018 assert(_sp->verifyChunkInFreeLists(ffc),
8019 "free range is not in free lists");
8020 }
8021 _sp->removeFreeChunkFromFreeLists(ffc);
8022 set_freeRangeInFreeLists(false);
8023 }
8024 }
8025 }
8026 } else {
8027 // Code path common to both original and adaptive free lists.
8029 // cant coalesce with previous block; this should be treated
8030 // as the end of a free run if any
8031 if (inFreeRange()) {
8032 // we kicked some butt; time to pick up the garbage
8033 assert(freeFinger() < addr, "the finger pointeth off base");
8034 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8035 }
8036 // else, nothing to do, just continue
8037 }
8038 }
8040 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
8041 // This is a chunk of garbage. It is not in any free list.
8042 // Add it to a free list or let it possibly be coalesced into
8043 // a larger chunk.
8044 HeapWord* addr = (HeapWord*) fc;
8045 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8047 if (_sp->adaptive_freelists()) {
8048 // Verify that the bit map has no bits marked between
8049 // addr and purported end of just dead object.
8050 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8052 doPostIsFreeOrGarbageChunk(fc, size);
8053 } else {
8054 if (!inFreeRange()) {
8055 // start of a new free range
8056 assert(size > 0, "A free range should have a size");
8057 initialize_free_range(addr, false);
8059 } else {
8060 // this will be swept up when we hit the end of the
8061 // free range
8062 if (CMSTraceSweeper) {
8063 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8064 }
8065 // If the chunk is being coalesced and the current free range is
8066 // in the free lists, remove the current free range so that it
8067 // will be returned to the free lists in its entirety - all
8068 // the coalesced pieces included.
8069 if (freeRangeInFreeLists()) {
8070 FreeChunk* ffc = (FreeChunk*)freeFinger();
8071 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8072 "Size of free range is inconsistent with chunk size.");
8073 if (CMSTestInFreeList) {
8074 assert(_sp->verifyChunkInFreeLists(ffc),
8075 "free range is not in free lists");
8076 }
8077 _sp->removeFreeChunkFromFreeLists(ffc);
8078 set_freeRangeInFreeLists(false);
8079 }
8080 set_lastFreeRangeCoalesced(true);
8081 }
8082 // this will be swept up when we hit the end of the free range
8084 // Verify that the bit map has no bits marked between
8085 // addr and purported end of just dead object.
8086 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8087 }
8088 return size;
8089 }
8091 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8092 HeapWord* addr = (HeapWord*) fc;
8093 // The sweeper has just found a live object. Return any accumulated
8094 // left hand chunk to the free lists.
8095 if (inFreeRange()) {
8096 if (_sp->adaptive_freelists()) {
8097 flushCurFreeChunk(freeFinger(),
8098 pointer_delta(addr, freeFinger()));
8099 } else { // not adaptive freelists
8100 set_inFreeRange(false);
8101 // Add the free range back to the free list if it is not already
8102 // there.
8103 if (!freeRangeInFreeLists()) {
8104 assert(freeFinger() < addr, "the finger pointeth off base");
8105 if (CMSTraceSweeper) {
8106 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8107 "[coalesced:%d]\n",
8108 freeFinger(), pointer_delta(addr, freeFinger()),
8109 lastFreeRangeCoalesced());
8110 }
8111 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8112 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8113 }
8114 }
8115 }
8117 // Common code path for original and adaptive free lists.
8119 // this object is live: we'd normally expect this to be
8120 // an oop, and like to assert the following:
8121 // assert(oop(addr)->is_oop(), "live block should be an oop");
8122 // However, as we commented above, this may be an object whose
8123 // header hasn't yet been initialized.
8124 size_t size;
8125 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8126 if (_bitMap->isMarked(addr + 1)) {
8127 // Determine the size from the bit map, rather than trying to
8128 // compute it from the object header.
8129 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8130 size = pointer_delta(nextOneAddr + 1, addr);
8131 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8132 "alignment problem");
8134 #ifdef DEBUG
8135 if (oop(addr)->klass_or_null() != NULL &&
8136 ( !_collector->should_unload_classes()
8137 || (oop(addr)->is_parsable()) &&
8138 oop(addr)->is_conc_safe())) {
8139 // Ignore mark word because we are running concurrent with mutators
8140 assert(oop(addr)->is_oop(true), "live block should be an oop");
8141 // is_conc_safe is checked before performing this assertion
8142 // because an object that is not is_conc_safe may yet have
8143 // the return from size() correct.
8144 assert(size ==
8145 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8146 "P-mark and computed size do not agree");
8147 }
8148 #endif
8150 } else {
8151 // This should be an initialized object that's alive.
8152 assert(oop(addr)->klass_or_null() != NULL &&
8153 (!_collector->should_unload_classes()
8154 || oop(addr)->is_parsable()),
8155 "Should be an initialized object");
8156 // Note that there are objects used during class redefinition
8157 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
8158 // which are discarded with their is_conc_safe state still
8159 // false. These object may be floating garbage so may be
8160 // seen here. If they are floating garbage their size
8161 // should be attainable from their klass. Do not that
8162 // is_conc_safe() is true for oop(addr).
8163 // Ignore mark word because we are running concurrent with mutators
8164 assert(oop(addr)->is_oop(true), "live block should be an oop");
8165 // Verify that the bit map has no bits marked between
8166 // addr and purported end of this block.
8167 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8168 assert(size >= 3, "Necessary for Printezis marks to work");
8169 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8170 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8171 }
8172 return size;
8173 }
8175 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8176 size_t chunkSize) {
8177 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8178 // scheme.
8179 bool fcInFreeLists = fc->isFree();
8180 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8181 assert((HeapWord*)fc <= _limit, "sweep invariant");
8182 if (CMSTestInFreeList && fcInFreeLists) {
8183 assert(_sp->verifyChunkInFreeLists(fc),
8184 "free chunk is not in free lists");
8185 }
8188 if (CMSTraceSweeper) {
8189 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8190 }
8192 HeapWord* addr = (HeapWord*) fc;
8194 bool coalesce;
8195 size_t left = pointer_delta(addr, freeFinger());
8196 size_t right = chunkSize;
8197 switch (FLSCoalescePolicy) {
8198 // numeric value forms a coalition aggressiveness metric
8199 case 0: { // never coalesce
8200 coalesce = false;
8201 break;
8202 }
8203 case 1: { // coalesce if left & right chunks on overpopulated lists
8204 coalesce = _sp->coalOverPopulated(left) &&
8205 _sp->coalOverPopulated(right);
8206 break;
8207 }
8208 case 2: { // coalesce if left chunk on overpopulated list (default)
8209 coalesce = _sp->coalOverPopulated(left);
8210 break;
8211 }
8212 case 3: { // coalesce if left OR right chunk on overpopulated list
8213 coalesce = _sp->coalOverPopulated(left) ||
8214 _sp->coalOverPopulated(right);
8215 break;
8216 }
8217 case 4: { // always coalesce
8218 coalesce = true;
8219 break;
8220 }
8221 default:
8222 ShouldNotReachHere();
8223 }
8225 // Should the current free range be coalesced?
8226 // If the chunk is in a free range and either we decided to coalesce above
8227 // or the chunk is near the large block at the end of the heap
8228 // (isNearLargestChunk() returns true), then coalesce this chunk.
8229 bool doCoalesce = inFreeRange() &&
8230 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8231 if (doCoalesce) {
8232 // Coalesce the current free range on the left with the new
8233 // chunk on the right. If either is on a free list,
8234 // it must be removed from the list and stashed in the closure.
8235 if (freeRangeInFreeLists()) {
8236 FreeChunk* ffc = (FreeChunk*)freeFinger();
8237 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8238 "Size of free range is inconsistent with chunk size.");
8239 if (CMSTestInFreeList) {
8240 assert(_sp->verifyChunkInFreeLists(ffc),
8241 "Chunk is not in free lists");
8242 }
8243 _sp->coalDeath(ffc->size());
8244 _sp->removeFreeChunkFromFreeLists(ffc);
8245 set_freeRangeInFreeLists(false);
8246 }
8247 if (fcInFreeLists) {
8248 _sp->coalDeath(chunkSize);
8249 assert(fc->size() == chunkSize,
8250 "The chunk has the wrong size or is not in the free lists");
8251 _sp->removeFreeChunkFromFreeLists(fc);
8252 }
8253 set_lastFreeRangeCoalesced(true);
8254 } else { // not in a free range and/or should not coalesce
8255 // Return the current free range and start a new one.
8256 if (inFreeRange()) {
8257 // In a free range but cannot coalesce with the right hand chunk.
8258 // Put the current free range into the free lists.
8259 flushCurFreeChunk(freeFinger(),
8260 pointer_delta(addr, freeFinger()));
8261 }
8262 // Set up for new free range. Pass along whether the right hand
8263 // chunk is in the free lists.
8264 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8265 }
8266 }
8267 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8268 assert(inFreeRange(), "Should only be called if currently in a free range.");
8269 assert(size > 0,
8270 "A zero sized chunk cannot be added to the free lists.");
8271 if (!freeRangeInFreeLists()) {
8272 if(CMSTestInFreeList) {
8273 FreeChunk* fc = (FreeChunk*) chunk;
8274 fc->setSize(size);
8275 assert(!_sp->verifyChunkInFreeLists(fc),
8276 "chunk should not be in free lists yet");
8277 }
8278 if (CMSTraceSweeper) {
8279 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8280 chunk, size);
8281 }
8282 // A new free range is going to be starting. The current
8283 // free range has not been added to the free lists yet or
8284 // was removed so add it back.
8285 // If the current free range was coalesced, then the death
8286 // of the free range was recorded. Record a birth now.
8287 if (lastFreeRangeCoalesced()) {
8288 _sp->coalBirth(size);
8289 }
8290 _sp->addChunkAndRepairOffsetTable(chunk, size,
8291 lastFreeRangeCoalesced());
8292 }
8293 set_inFreeRange(false);
8294 set_freeRangeInFreeLists(false);
8295 }
8297 // We take a break if we've been at this for a while,
8298 // so as to avoid monopolizing the locks involved.
8299 void SweepClosure::do_yield_work(HeapWord* addr) {
8300 // Return current free chunk being used for coalescing (if any)
8301 // to the appropriate freelist. After yielding, the next
8302 // free block encountered will start a coalescing range of
8303 // free blocks. If the next free block is adjacent to the
8304 // chunk just flushed, they will need to wait for the next
8305 // sweep to be coalesced.
8306 if (inFreeRange()) {
8307 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8308 }
8310 // First give up the locks, then yield, then re-lock.
8311 // We should probably use a constructor/destructor idiom to
8312 // do this unlock/lock or modify the MutexUnlocker class to
8313 // serve our purpose. XXX
8314 assert_lock_strong(_bitMap->lock());
8315 assert_lock_strong(_freelistLock);
8316 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8317 "CMS thread should hold CMS token");
8318 _bitMap->lock()->unlock();
8319 _freelistLock->unlock();
8320 ConcurrentMarkSweepThread::desynchronize(true);
8321 ConcurrentMarkSweepThread::acknowledge_yield_request();
8322 _collector->stopTimer();
8323 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8324 if (PrintCMSStatistics != 0) {
8325 _collector->incrementYields();
8326 }
8327 _collector->icms_wait();
8329 // See the comment in coordinator_yield()
8330 for (unsigned i = 0; i < CMSYieldSleepCount &&
8331 ConcurrentMarkSweepThread::should_yield() &&
8332 !CMSCollector::foregroundGCIsActive(); ++i) {
8333 os::sleep(Thread::current(), 1, false);
8334 ConcurrentMarkSweepThread::acknowledge_yield_request();
8335 }
8337 ConcurrentMarkSweepThread::synchronize(true);
8338 _freelistLock->lock();
8339 _bitMap->lock()->lock_without_safepoint_check();
8340 _collector->startTimer();
8341 }
8343 #ifndef PRODUCT
8344 // This is actually very useful in a product build if it can
8345 // be called from the debugger. Compile it into the product
8346 // as needed.
8347 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8348 return debug_cms_space->verifyChunkInFreeLists(fc);
8349 }
8351 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8352 if (CMSTraceSweeper) {
8353 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8354 }
8355 }
8356 #endif
8358 // CMSIsAliveClosure
8359 bool CMSIsAliveClosure::do_object_b(oop obj) {
8360 HeapWord* addr = (HeapWord*)obj;
8361 return addr != NULL &&
8362 (!_span.contains(addr) || _bit_map->isMarked(addr));
8363 }
8365 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8366 MemRegion span,
8367 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8368 CMSMarkStack* revisit_stack, bool cpc):
8369 KlassRememberingOopClosure(collector, NULL, revisit_stack),
8370 _span(span),
8371 _bit_map(bit_map),
8372 _mark_stack(mark_stack),
8373 _concurrent_precleaning(cpc) {
8374 assert(!_span.is_empty(), "Empty span could spell trouble");
8375 }
8378 // CMSKeepAliveClosure: the serial version
8379 void CMSKeepAliveClosure::do_oop(oop obj) {
8380 HeapWord* addr = (HeapWord*)obj;
8381 if (_span.contains(addr) &&
8382 !_bit_map->isMarked(addr)) {
8383 _bit_map->mark(addr);
8384 bool simulate_overflow = false;
8385 NOT_PRODUCT(
8386 if (CMSMarkStackOverflowALot &&
8387 _collector->simulate_overflow()) {
8388 // simulate a stack overflow
8389 simulate_overflow = true;
8390 }
8391 )
8392 if (simulate_overflow || !_mark_stack->push(obj)) {
8393 if (_concurrent_precleaning) {
8394 // We dirty the overflown object and let the remark
8395 // phase deal with it.
8396 assert(_collector->overflow_list_is_empty(), "Error");
8397 // In the case of object arrays, we need to dirty all of
8398 // the cards that the object spans. No locking or atomics
8399 // are needed since no one else can be mutating the mod union
8400 // table.
8401 if (obj->is_objArray()) {
8402 size_t sz = obj->size();
8403 HeapWord* end_card_addr =
8404 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8405 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8406 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8407 _collector->_modUnionTable.mark_range(redirty_range);
8408 } else {
8409 _collector->_modUnionTable.mark(addr);
8410 }
8411 _collector->_ser_kac_preclean_ovflw++;
8412 } else {
8413 _collector->push_on_overflow_list(obj);
8414 _collector->_ser_kac_ovflw++;
8415 }
8416 }
8417 }
8418 }
8420 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8421 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8423 // CMSParKeepAliveClosure: a parallel version of the above.
8424 // The work queues are private to each closure (thread),
8425 // but (may be) available for stealing by other threads.
8426 void CMSParKeepAliveClosure::do_oop(oop obj) {
8427 HeapWord* addr = (HeapWord*)obj;
8428 if (_span.contains(addr) &&
8429 !_bit_map->isMarked(addr)) {
8430 // In general, during recursive tracing, several threads
8431 // may be concurrently getting here; the first one to
8432 // "tag" it, claims it.
8433 if (_bit_map->par_mark(addr)) {
8434 bool res = _work_queue->push(obj);
8435 assert(res, "Low water mark should be much less than capacity");
8436 // Do a recursive trim in the hope that this will keep
8437 // stack usage lower, but leave some oops for potential stealers
8438 trim_queue(_low_water_mark);
8439 } // Else, another thread got there first
8440 }
8441 }
8443 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8444 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8446 void CMSParKeepAliveClosure::trim_queue(uint max) {
8447 while (_work_queue->size() > max) {
8448 oop new_oop;
8449 if (_work_queue->pop_local(new_oop)) {
8450 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8451 assert(_bit_map->isMarked((HeapWord*)new_oop),
8452 "no white objects on this stack!");
8453 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8454 // iterate over the oops in this oop, marking and pushing
8455 // the ones in CMS heap (i.e. in _span).
8456 new_oop->oop_iterate(&_mark_and_push);
8457 }
8458 }
8459 }
8461 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8462 CMSCollector* collector,
8463 MemRegion span, CMSBitMap* bit_map,
8464 CMSMarkStack* revisit_stack,
8465 OopTaskQueue* work_queue):
8466 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8467 _span(span),
8468 _bit_map(bit_map),
8469 _work_queue(work_queue) { }
8471 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8472 HeapWord* addr = (HeapWord*)obj;
8473 if (_span.contains(addr) &&
8474 !_bit_map->isMarked(addr)) {
8475 if (_bit_map->par_mark(addr)) {
8476 bool simulate_overflow = false;
8477 NOT_PRODUCT(
8478 if (CMSMarkStackOverflowALot &&
8479 _collector->par_simulate_overflow()) {
8480 // simulate a stack overflow
8481 simulate_overflow = true;
8482 }
8483 )
8484 if (simulate_overflow || !_work_queue->push(obj)) {
8485 _collector->par_push_on_overflow_list(obj);
8486 _collector->_par_kac_ovflw++;
8487 }
8488 } // Else another thread got there already
8489 }
8490 }
8492 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8493 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8495 //////////////////////////////////////////////////////////////////
8496 // CMSExpansionCause /////////////////////////////
8497 //////////////////////////////////////////////////////////////////
8498 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8499 switch (cause) {
8500 case _no_expansion:
8501 return "No expansion";
8502 case _satisfy_free_ratio:
8503 return "Free ratio";
8504 case _satisfy_promotion:
8505 return "Satisfy promotion";
8506 case _satisfy_allocation:
8507 return "allocation";
8508 case _allocate_par_lab:
8509 return "Par LAB";
8510 case _allocate_par_spooling_space:
8511 return "Par Spooling Space";
8512 case _adaptive_size_policy:
8513 return "Ergonomics";
8514 default:
8515 return "unknown";
8516 }
8517 }
8519 void CMSDrainMarkingStackClosure::do_void() {
8520 // the max number to take from overflow list at a time
8521 const size_t num = _mark_stack->capacity()/4;
8522 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8523 "Overflow list should be NULL during concurrent phases");
8524 while (!_mark_stack->isEmpty() ||
8525 // if stack is empty, check the overflow list
8526 _collector->take_from_overflow_list(num, _mark_stack)) {
8527 oop obj = _mark_stack->pop();
8528 HeapWord* addr = (HeapWord*)obj;
8529 assert(_span.contains(addr), "Should be within span");
8530 assert(_bit_map->isMarked(addr), "Should be marked");
8531 assert(obj->is_oop(), "Should be an oop");
8532 obj->oop_iterate(_keep_alive);
8533 }
8534 }
8536 void CMSParDrainMarkingStackClosure::do_void() {
8537 // drain queue
8538 trim_queue(0);
8539 }
8541 // Trim our work_queue so its length is below max at return
8542 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8543 while (_work_queue->size() > max) {
8544 oop new_oop;
8545 if (_work_queue->pop_local(new_oop)) {
8546 assert(new_oop->is_oop(), "Expected an oop");
8547 assert(_bit_map->isMarked((HeapWord*)new_oop),
8548 "no white objects on this stack!");
8549 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8550 // iterate over the oops in this oop, marking and pushing
8551 // the ones in CMS heap (i.e. in _span).
8552 new_oop->oop_iterate(&_mark_and_push);
8553 }
8554 }
8555 }
8557 ////////////////////////////////////////////////////////////////////
8558 // Support for Marking Stack Overflow list handling and related code
8559 ////////////////////////////////////////////////////////////////////
8560 // Much of the following code is similar in shape and spirit to the
8561 // code used in ParNewGC. We should try and share that code
8562 // as much as possible in the future.
8564 #ifndef PRODUCT
8565 // Debugging support for CMSStackOverflowALot
8567 // It's OK to call this multi-threaded; the worst thing
8568 // that can happen is that we'll get a bunch of closely
8569 // spaced simulated oveflows, but that's OK, in fact
8570 // probably good as it would exercise the overflow code
8571 // under contention.
8572 bool CMSCollector::simulate_overflow() {
8573 if (_overflow_counter-- <= 0) { // just being defensive
8574 _overflow_counter = CMSMarkStackOverflowInterval;
8575 return true;
8576 } else {
8577 return false;
8578 }
8579 }
8581 bool CMSCollector::par_simulate_overflow() {
8582 return simulate_overflow();
8583 }
8584 #endif
8586 // Single-threaded
8587 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8588 assert(stack->isEmpty(), "Expected precondition");
8589 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8590 size_t i = num;
8591 oop cur = _overflow_list;
8592 const markOop proto = markOopDesc::prototype();
8593 NOT_PRODUCT(ssize_t n = 0;)
8594 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8595 next = oop(cur->mark());
8596 cur->set_mark(proto); // until proven otherwise
8597 assert(cur->is_oop(), "Should be an oop");
8598 bool res = stack->push(cur);
8599 assert(res, "Bit off more than can chew?");
8600 NOT_PRODUCT(n++;)
8601 }
8602 _overflow_list = cur;
8603 #ifndef PRODUCT
8604 assert(_num_par_pushes >= n, "Too many pops?");
8605 _num_par_pushes -=n;
8606 #endif
8607 return !stack->isEmpty();
8608 }
8610 #define BUSY (oop(0x1aff1aff))
8611 // (MT-safe) Get a prefix of at most "num" from the list.
8612 // The overflow list is chained through the mark word of
8613 // each object in the list. We fetch the entire list,
8614 // break off a prefix of the right size and return the
8615 // remainder. If other threads try to take objects from
8616 // the overflow list at that time, they will wait for
8617 // some time to see if data becomes available. If (and
8618 // only if) another thread places one or more object(s)
8619 // on the global list before we have returned the suffix
8620 // to the global list, we will walk down our local list
8621 // to find its end and append the global list to
8622 // our suffix before returning it. This suffix walk can
8623 // prove to be expensive (quadratic in the amount of traffic)
8624 // when there are many objects in the overflow list and
8625 // there is much producer-consumer contention on the list.
8626 // *NOTE*: The overflow list manipulation code here and
8627 // in ParNewGeneration:: are very similar in shape,
8628 // except that in the ParNew case we use the old (from/eden)
8629 // copy of the object to thread the list via its klass word.
8630 // Because of the common code, if you make any changes in
8631 // the code below, please check the ParNew version to see if
8632 // similar changes might be needed.
8633 // CR 6797058 has been filed to consolidate the common code.
8634 bool CMSCollector::par_take_from_overflow_list(size_t num,
8635 OopTaskQueue* work_q) {
8636 assert(work_q->size() == 0, "First empty local work queue");
8637 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8638 if (_overflow_list == NULL) {
8639 return false;
8640 }
8641 // Grab the entire list; we'll put back a suffix
8642 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8643 Thread* tid = Thread::current();
8644 size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
8645 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8646 // If the list is busy, we spin for a short while,
8647 // sleeping between attempts to get the list.
8648 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8649 os::sleep(tid, sleep_time_millis, false);
8650 if (_overflow_list == NULL) {
8651 // Nothing left to take
8652 return false;
8653 } else if (_overflow_list != BUSY) {
8654 // Try and grab the prefix
8655 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8656 }
8657 }
8658 // If the list was found to be empty, or we spun long
8659 // enough, we give up and return empty-handed. If we leave
8660 // the list in the BUSY state below, it must be the case that
8661 // some other thread holds the overflow list and will set it
8662 // to a non-BUSY state in the future.
8663 if (prefix == NULL || prefix == BUSY) {
8664 // Nothing to take or waited long enough
8665 if (prefix == NULL) {
8666 // Write back the NULL in case we overwrote it with BUSY above
8667 // and it is still the same value.
8668 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8669 }
8670 return false;
8671 }
8672 assert(prefix != NULL && prefix != BUSY, "Error");
8673 size_t i = num;
8674 oop cur = prefix;
8675 // Walk down the first "num" objects, unless we reach the end.
8676 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8677 if (cur->mark() == NULL) {
8678 // We have "num" or fewer elements in the list, so there
8679 // is nothing to return to the global list.
8680 // Write back the NULL in lieu of the BUSY we wrote
8681 // above, if it is still the same value.
8682 if (_overflow_list == BUSY) {
8683 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8684 }
8685 } else {
8686 // Chop off the suffix and rerturn it to the global list.
8687 assert(cur->mark() != BUSY, "Error");
8688 oop suffix_head = cur->mark(); // suffix will be put back on global list
8689 cur->set_mark(NULL); // break off suffix
8690 // It's possible that the list is still in the empty(busy) state
8691 // we left it in a short while ago; in that case we may be
8692 // able to place back the suffix without incurring the cost
8693 // of a walk down the list.
8694 oop observed_overflow_list = _overflow_list;
8695 oop cur_overflow_list = observed_overflow_list;
8696 bool attached = false;
8697 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8698 observed_overflow_list =
8699 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8700 if (cur_overflow_list == observed_overflow_list) {
8701 attached = true;
8702 break;
8703 } else cur_overflow_list = observed_overflow_list;
8704 }
8705 if (!attached) {
8706 // Too bad, someone else sneaked in (at least) an element; we'll need
8707 // to do a splice. Find tail of suffix so we can prepend suffix to global
8708 // list.
8709 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8710 oop suffix_tail = cur;
8711 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8712 "Tautology");
8713 observed_overflow_list = _overflow_list;
8714 do {
8715 cur_overflow_list = observed_overflow_list;
8716 if (cur_overflow_list != BUSY) {
8717 // Do the splice ...
8718 suffix_tail->set_mark(markOop(cur_overflow_list));
8719 } else { // cur_overflow_list == BUSY
8720 suffix_tail->set_mark(NULL);
8721 }
8722 // ... and try to place spliced list back on overflow_list ...
8723 observed_overflow_list =
8724 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8725 } while (cur_overflow_list != observed_overflow_list);
8726 // ... until we have succeeded in doing so.
8727 }
8728 }
8730 // Push the prefix elements on work_q
8731 assert(prefix != NULL, "control point invariant");
8732 const markOop proto = markOopDesc::prototype();
8733 oop next;
8734 NOT_PRODUCT(ssize_t n = 0;)
8735 for (cur = prefix; cur != NULL; cur = next) {
8736 next = oop(cur->mark());
8737 cur->set_mark(proto); // until proven otherwise
8738 assert(cur->is_oop(), "Should be an oop");
8739 bool res = work_q->push(cur);
8740 assert(res, "Bit off more than we can chew?");
8741 NOT_PRODUCT(n++;)
8742 }
8743 #ifndef PRODUCT
8744 assert(_num_par_pushes >= n, "Too many pops?");
8745 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8746 #endif
8747 return true;
8748 }
8750 // Single-threaded
8751 void CMSCollector::push_on_overflow_list(oop p) {
8752 NOT_PRODUCT(_num_par_pushes++;)
8753 assert(p->is_oop(), "Not an oop");
8754 preserve_mark_if_necessary(p);
8755 p->set_mark((markOop)_overflow_list);
8756 _overflow_list = p;
8757 }
8759 // Multi-threaded; use CAS to prepend to overflow list
8760 void CMSCollector::par_push_on_overflow_list(oop p) {
8761 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8762 assert(p->is_oop(), "Not an oop");
8763 par_preserve_mark_if_necessary(p);
8764 oop observed_overflow_list = _overflow_list;
8765 oop cur_overflow_list;
8766 do {
8767 cur_overflow_list = observed_overflow_list;
8768 if (cur_overflow_list != BUSY) {
8769 p->set_mark(markOop(cur_overflow_list));
8770 } else {
8771 p->set_mark(NULL);
8772 }
8773 observed_overflow_list =
8774 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8775 } while (cur_overflow_list != observed_overflow_list);
8776 }
8777 #undef BUSY
8779 // Single threaded
8780 // General Note on GrowableArray: pushes may silently fail
8781 // because we are (temporarily) out of C-heap for expanding
8782 // the stack. The problem is quite ubiquitous and affects
8783 // a lot of code in the JVM. The prudent thing for GrowableArray
8784 // to do (for now) is to exit with an error. However, that may
8785 // be too draconian in some cases because the caller may be
8786 // able to recover without much harm. For such cases, we
8787 // should probably introduce a "soft_push" method which returns
8788 // an indication of success or failure with the assumption that
8789 // the caller may be able to recover from a failure; code in
8790 // the VM can then be changed, incrementally, to deal with such
8791 // failures where possible, thus, incrementally hardening the VM
8792 // in such low resource situations.
8793 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8794 if (_preserved_oop_stack == NULL) {
8795 assert(_preserved_mark_stack == NULL,
8796 "bijection with preserved_oop_stack");
8797 // Allocate the stacks
8798 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8799 GrowableArray<oop>(PreserveMarkStackSize, true);
8800 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8801 GrowableArray<markOop>(PreserveMarkStackSize, true);
8802 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8803 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8804 "Preserved Mark/Oop Stack for CMS (C-heap)");
8805 }
8806 }
8807 _preserved_oop_stack->push(p);
8808 _preserved_mark_stack->push(m);
8809 assert(m == p->mark(), "Mark word changed");
8810 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8811 "bijection");
8812 }
8814 // Single threaded
8815 void CMSCollector::preserve_mark_if_necessary(oop p) {
8816 markOop m = p->mark();
8817 if (m->must_be_preserved(p)) {
8818 preserve_mark_work(p, m);
8819 }
8820 }
8822 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8823 markOop m = p->mark();
8824 if (m->must_be_preserved(p)) {
8825 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8826 // Even though we read the mark word without holding
8827 // the lock, we are assured that it will not change
8828 // because we "own" this oop, so no other thread can
8829 // be trying to push it on the overflow list; see
8830 // the assertion in preserve_mark_work() that checks
8831 // that m == p->mark().
8832 preserve_mark_work(p, m);
8833 }
8834 }
8836 // We should be able to do this multi-threaded,
8837 // a chunk of stack being a task (this is
8838 // correct because each oop only ever appears
8839 // once in the overflow list. However, it's
8840 // not very easy to completely overlap this with
8841 // other operations, so will generally not be done
8842 // until all work's been completed. Because we
8843 // expect the preserved oop stack (set) to be small,
8844 // it's probably fine to do this single-threaded.
8845 // We can explore cleverer concurrent/overlapped/parallel
8846 // processing of preserved marks if we feel the
8847 // need for this in the future. Stack overflow should
8848 // be so rare in practice and, when it happens, its
8849 // effect on performance so great that this will
8850 // likely just be in the noise anyway.
8851 void CMSCollector::restore_preserved_marks_if_any() {
8852 if (_preserved_oop_stack == NULL) {
8853 assert(_preserved_mark_stack == NULL,
8854 "bijection with preserved_oop_stack");
8855 return;
8856 }
8858 assert(SafepointSynchronize::is_at_safepoint(),
8859 "world should be stopped");
8860 assert(Thread::current()->is_ConcurrentGC_thread() ||
8861 Thread::current()->is_VM_thread(),
8862 "should be single-threaded");
8864 int length = _preserved_oop_stack->length();
8865 assert(_preserved_mark_stack->length() == length, "bijection");
8866 for (int i = 0; i < length; i++) {
8867 oop p = _preserved_oop_stack->at(i);
8868 assert(p->is_oop(), "Should be an oop");
8869 assert(_span.contains(p), "oop should be in _span");
8870 assert(p->mark() == markOopDesc::prototype(),
8871 "Set when taken from overflow list");
8872 markOop m = _preserved_mark_stack->at(i);
8873 p->set_mark(m);
8874 }
8875 _preserved_mark_stack->clear();
8876 _preserved_oop_stack->clear();
8877 assert(_preserved_mark_stack->is_empty() &&
8878 _preserved_oop_stack->is_empty(),
8879 "stacks were cleared above");
8880 }
8882 #ifndef PRODUCT
8883 bool CMSCollector::no_preserved_marks() const {
8884 return ( ( _preserved_mark_stack == NULL
8885 && _preserved_oop_stack == NULL)
8886 || ( _preserved_mark_stack->is_empty()
8887 && _preserved_oop_stack->is_empty()));
8888 }
8889 #endif
8891 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8892 {
8893 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8894 CMSAdaptiveSizePolicy* size_policy =
8895 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8896 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8897 "Wrong type for size policy");
8898 return size_policy;
8899 }
8901 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8902 size_t desired_promo_size) {
8903 if (cur_promo_size < desired_promo_size) {
8904 size_t expand_bytes = desired_promo_size - cur_promo_size;
8905 if (PrintAdaptiveSizePolicy && Verbose) {
8906 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8907 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8908 expand_bytes);
8909 }
8910 expand(expand_bytes,
8911 MinHeapDeltaBytes,
8912 CMSExpansionCause::_adaptive_size_policy);
8913 } else if (desired_promo_size < cur_promo_size) {
8914 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8915 if (PrintAdaptiveSizePolicy && Verbose) {
8916 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8917 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8918 shrink_bytes);
8919 }
8920 shrink(shrink_bytes);
8921 }
8922 }
8924 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8925 GenCollectedHeap* gch = GenCollectedHeap::heap();
8926 CMSGCAdaptivePolicyCounters* counters =
8927 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8928 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8929 "Wrong kind of counters");
8930 return counters;
8931 }
8934 void ASConcurrentMarkSweepGeneration::update_counters() {
8935 if (UsePerfData) {
8936 _space_counters->update_all();
8937 _gen_counters->update_all();
8938 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8939 GenCollectedHeap* gch = GenCollectedHeap::heap();
8940 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8941 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8942 "Wrong gc statistics type");
8943 counters->update_counters(gc_stats_l);
8944 }
8945 }
8947 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8948 if (UsePerfData) {
8949 _space_counters->update_used(used);
8950 _space_counters->update_capacity();
8951 _gen_counters->update_all();
8953 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8954 GenCollectedHeap* gch = GenCollectedHeap::heap();
8955 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8956 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8957 "Wrong gc statistics type");
8958 counters->update_counters(gc_stats_l);
8959 }
8960 }
8962 // The desired expansion delta is computed so that:
8963 // . desired free percentage or greater is used
8964 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8965 assert_locked_or_safepoint(Heap_lock);
8967 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8969 // If incremental collection failed, we just want to expand
8970 // to the limit.
8971 if (incremental_collection_failed()) {
8972 clear_incremental_collection_failed();
8973 grow_to_reserved();
8974 return;
8975 }
8977 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8979 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8980 "Wrong type of heap");
8981 int prev_level = level() - 1;
8982 assert(prev_level >= 0, "The cms generation is the lowest generation");
8983 Generation* prev_gen = gch->get_gen(prev_level);
8984 assert(prev_gen->kind() == Generation::ASParNew,
8985 "Wrong type of young generation");
8986 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8987 size_t cur_eden = younger_gen->eden()->capacity();
8988 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8989 size_t cur_promo = free();
8990 size_policy->compute_tenured_generation_free_space(cur_promo,
8991 max_available(),
8992 cur_eden);
8993 resize(cur_promo, size_policy->promo_size());
8995 // Record the new size of the space in the cms generation
8996 // that is available for promotions. This is temporary.
8997 // It should be the desired promo size.
8998 size_policy->avg_cms_promo()->sample(free());
8999 size_policy->avg_old_live()->sample(used());
9001 if (UsePerfData) {
9002 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9003 counters->update_cms_capacity_counter(capacity());
9004 }
9005 }
9007 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9008 assert_locked_or_safepoint(Heap_lock);
9009 assert_lock_strong(freelistLock());
9010 HeapWord* old_end = _cmsSpace->end();
9011 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9012 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9013 FreeChunk* chunk_at_end = find_chunk_at_end();
9014 if (chunk_at_end == NULL) {
9015 // No room to shrink
9016 if (PrintGCDetails && Verbose) {
9017 gclog_or_tty->print_cr("No room to shrink: old_end "
9018 PTR_FORMAT " unallocated_start " PTR_FORMAT
9019 " chunk_at_end " PTR_FORMAT,
9020 old_end, unallocated_start, chunk_at_end);
9021 }
9022 return;
9023 } else {
9025 // Find the chunk at the end of the space and determine
9026 // how much it can be shrunk.
9027 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9028 size_t aligned_shrinkable_size_in_bytes =
9029 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9030 assert(unallocated_start <= chunk_at_end->end(),
9031 "Inconsistent chunk at end of space");
9032 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9033 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9035 // Shrink the underlying space
9036 _virtual_space.shrink_by(bytes);
9037 if (PrintGCDetails && Verbose) {
9038 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9039 " desired_bytes " SIZE_FORMAT
9040 " shrinkable_size_in_bytes " SIZE_FORMAT
9041 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9042 " bytes " SIZE_FORMAT,
9043 desired_bytes, shrinkable_size_in_bytes,
9044 aligned_shrinkable_size_in_bytes, bytes);
9045 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9046 " unallocated_start " SIZE_FORMAT,
9047 old_end, unallocated_start);
9048 }
9050 // If the space did shrink (shrinking is not guaranteed),
9051 // shrink the chunk at the end by the appropriate amount.
9052 if (((HeapWord*)_virtual_space.high()) < old_end) {
9053 size_t new_word_size =
9054 heap_word_size(_virtual_space.committed_size());
9056 // Have to remove the chunk from the dictionary because it is changing
9057 // size and might be someplace elsewhere in the dictionary.
9059 // Get the chunk at end, shrink it, and put it
9060 // back.
9061 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9062 size_t word_size_change = word_size_before - new_word_size;
9063 size_t chunk_at_end_old_size = chunk_at_end->size();
9064 assert(chunk_at_end_old_size >= word_size_change,
9065 "Shrink is too large");
9066 chunk_at_end->setSize(chunk_at_end_old_size -
9067 word_size_change);
9068 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9069 word_size_change);
9071 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9073 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9074 _bts->resize(new_word_size); // resize the block offset shared array
9075 Universe::heap()->barrier_set()->resize_covered_region(mr);
9076 _cmsSpace->assert_locked();
9077 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9079 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9081 // update the space and generation capacity counters
9082 if (UsePerfData) {
9083 _space_counters->update_capacity();
9084 _gen_counters->update_all();
9085 }
9087 if (Verbose && PrintGCDetails) {
9088 size_t new_mem_size = _virtual_space.committed_size();
9089 size_t old_mem_size = new_mem_size + bytes;
9090 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9091 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9092 }
9093 }
9095 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9096 "Inconsistency at end of space");
9097 assert(chunk_at_end->end() == _cmsSpace->end(),
9098 "Shrinking is inconsistent");
9099 return;
9100 }
9101 }
9103 // Transfer some number of overflown objects to usual marking
9104 // stack. Return true if some objects were transferred.
9105 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9106 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9107 (size_t)ParGCDesiredObjsFromOverflowList);
9109 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9110 assert(_collector->overflow_list_is_empty() || res,
9111 "If list is not empty, we should have taken something");
9112 assert(!res || !_mark_stack->isEmpty(),
9113 "If we took something, it should now be on our stack");
9114 return res;
9115 }
9117 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9118 size_t res = _sp->block_size_no_stall(addr, _collector);
9119 assert(res != 0, "Should always be able to compute a size");
9120 if (_sp->block_is_obj(addr)) {
9121 if (_live_bit_map->isMarked(addr)) {
9122 // It can't have been dead in a previous cycle
9123 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9124 } else {
9125 _dead_bit_map->mark(addr); // mark the dead object
9126 }
9127 }
9128 return res;
9129 }