Wed, 23 Sep 2009 23:57:44 -0700
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
Summary: After mechanically merging changes, some by-hand adjustments are needed.
Reviewed-by: ysr
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
186 _gc_stats = new CMSGCStats();
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(UseCompressedOops ||
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
195 "Offset of FreeChunk::_prev within FreeChunk must match"
196 " that of OopDesc::_klass within OopDesc");
197 )
198 if (ParallelGCThreads > 0) {
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
200 _par_gc_thread_states =
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
202 if (_par_gc_thread_states == NULL) {
203 vm_exit_during_initialization("Could not allocate par gc structs");
204 }
205 for (uint i = 0; i < ParallelGCThreads; i++) {
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
207 if (_par_gc_thread_states[i] == NULL) {
208 vm_exit_during_initialization("Could not allocate par gc structs");
209 }
210 }
211 } else {
212 _par_gc_thread_states = NULL;
213 }
214 _incremental_collection_failed = false;
215 // The "dilatation_factor" is the expansion that can occur on
216 // account of the fact that the minimum object size in the CMS
217 // generation may be larger than that in, say, a contiguous young
218 // generation.
219 // Ideally, in the calculation below, we'd compute the dilatation
220 // factor as: MinChunkSize/(promoting_gen's min object size)
221 // Since we do not have such a general query interface for the
222 // promoting generation, we'll instead just use the mimimum
223 // object size (which today is a header's worth of space);
224 // note that all arithmetic is in units of HeapWords.
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
226 assert(_dilatation_factor >= 1.0, "from previous assert");
227 }
230 // The field "_initiating_occupancy" represents the occupancy percentage
231 // at which we trigger a new collection cycle. Unless explicitly specified
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
233 // is calculated by:
234 //
235 // Let "f" be MinHeapFreeRatio in
236 //
237 // _intiating_occupancy = 100-f +
238 // f * (CMSTrigger[Perm]Ratio/100)
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
240 //
241 // That is, if we assume the heap is at its desired maximum occupancy at the
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
243 // space be allocated before initiating a new collection cycle.
244 //
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
247 if (io >= 0) {
248 _initiating_occupancy = (double)io / 100.0;
249 } else {
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0;
253 }
254 }
257 void ConcurrentMarkSweepGeneration::ref_processor_init() {
258 assert(collector() != NULL, "no collector");
259 collector()->ref_processor_init();
260 }
262 void CMSCollector::ref_processor_init() {
263 if (_ref_processor == NULL) {
264 // Allocate and initialize a reference processor
265 _ref_processor = ReferenceProcessor::create_ref_processor(
266 _span, // span
267 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
268 _cmsGen->refs_discovery_is_mt(), // mt_discovery
269 &_is_alive_closure,
270 ParallelGCThreads,
271 ParallelRefProcEnabled);
272 // Initialize the _ref_processor field of CMSGen
273 _cmsGen->set_ref_processor(_ref_processor);
275 // Allocate a dummy ref processor for perm gen.
276 ReferenceProcessor* rp2 = new ReferenceProcessor();
277 if (rp2 == NULL) {
278 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
279 }
280 _permGen->set_ref_processor(rp2);
281 }
282 }
284 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
285 GenCollectedHeap* gch = GenCollectedHeap::heap();
286 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
287 "Wrong type of heap");
288 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
289 gch->gen_policy()->size_policy();
290 assert(sp->is_gc_cms_adaptive_size_policy(),
291 "Wrong type of size policy");
292 return sp;
293 }
295 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
296 CMSGCAdaptivePolicyCounters* results =
297 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
298 assert(
299 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
300 "Wrong gc policy counter kind");
301 return results;
302 }
305 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
307 const char* gen_name = "old";
309 // Generation Counters - generation 1, 1 subspace
310 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
312 _space_counters = new GSpaceCounters(gen_name, 0,
313 _virtual_space.reserved_size(),
314 this, _gen_counters);
315 }
317 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
318 _cms_gen(cms_gen)
319 {
320 assert(alpha <= 100, "bad value");
321 _saved_alpha = alpha;
323 // Initialize the alphas to the bootstrap value of 100.
324 _gc0_alpha = _cms_alpha = 100;
326 _cms_begin_time.update();
327 _cms_end_time.update();
329 _gc0_duration = 0.0;
330 _gc0_period = 0.0;
331 _gc0_promoted = 0;
333 _cms_duration = 0.0;
334 _cms_period = 0.0;
335 _cms_allocated = 0;
337 _cms_used_at_gc0_begin = 0;
338 _cms_used_at_gc0_end = 0;
339 _allow_duty_cycle_reduction = false;
340 _valid_bits = 0;
341 _icms_duty_cycle = CMSIncrementalDutyCycle;
342 }
344 // If promotion failure handling is on use
345 // the padded average size of the promotion for each
346 // young generation collection.
347 double CMSStats::time_until_cms_gen_full() const {
348 size_t cms_free = _cms_gen->cmsSpace()->free();
349 GenCollectedHeap* gch = GenCollectedHeap::heap();
350 size_t expected_promotion = gch->get_gen(0)->capacity();
351 if (HandlePromotionFailure) {
352 expected_promotion = MIN2(
353 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
354 expected_promotion);
355 }
356 if (cms_free > expected_promotion) {
357 // Start a cms collection if there isn't enough space to promote
358 // for the next minor collection. Use the padded average as
359 // a safety factor.
360 cms_free -= expected_promotion;
362 // Adjust by the safety factor.
363 double cms_free_dbl = (double)cms_free;
364 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
366 if (PrintGCDetails && Verbose) {
367 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
368 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
369 cms_free, expected_promotion);
370 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
371 cms_free_dbl, cms_consumption_rate() + 1.0);
372 }
373 // Add 1 in case the consumption rate goes to zero.
374 return cms_free_dbl / (cms_consumption_rate() + 1.0);
375 }
376 return 0.0;
377 }
379 // Compare the duration of the cms collection to the
380 // time remaining before the cms generation is empty.
381 // Note that the time from the start of the cms collection
382 // to the start of the cms sweep (less than the total
383 // duration of the cms collection) can be used. This
384 // has been tried and some applications experienced
385 // promotion failures early in execution. This was
386 // possibly because the averages were not accurate
387 // enough at the beginning.
388 double CMSStats::time_until_cms_start() const {
389 // We add "gc0_period" to the "work" calculation
390 // below because this query is done (mostly) at the
391 // end of a scavenge, so we need to conservatively
392 // account for that much possible delay
393 // in the query so as to avoid concurrent mode failures
394 // due to starting the collection just a wee bit too
395 // late.
396 double work = cms_duration() + gc0_period();
397 double deadline = time_until_cms_gen_full();
398 if (work > deadline) {
399 if (Verbose && PrintGCDetails) {
400 gclog_or_tty->print(
401 " CMSCollector: collect because of anticipated promotion "
402 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
403 gc0_period(), time_until_cms_gen_full());
404 }
405 return 0.0;
406 }
407 return work - deadline;
408 }
410 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
411 // amount of change to prevent wild oscillation.
412 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
413 unsigned int new_duty_cycle) {
414 assert(old_duty_cycle <= 100, "bad input value");
415 assert(new_duty_cycle <= 100, "bad input value");
417 // Note: use subtraction with caution since it may underflow (values are
418 // unsigned). Addition is safe since we're in the range 0-100.
419 unsigned int damped_duty_cycle = new_duty_cycle;
420 if (new_duty_cycle < old_duty_cycle) {
421 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
422 if (new_duty_cycle + largest_delta < old_duty_cycle) {
423 damped_duty_cycle = old_duty_cycle - largest_delta;
424 }
425 } else if (new_duty_cycle > old_duty_cycle) {
426 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
427 if (new_duty_cycle > old_duty_cycle + largest_delta) {
428 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
429 }
430 }
431 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
433 if (CMSTraceIncrementalPacing) {
434 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
435 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
436 }
437 return damped_duty_cycle;
438 }
440 unsigned int CMSStats::icms_update_duty_cycle_impl() {
441 assert(CMSIncrementalPacing && valid(),
442 "should be handled in icms_update_duty_cycle()");
444 double cms_time_so_far = cms_timer().seconds();
445 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
446 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
448 // Avoid division by 0.
449 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
450 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
452 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
453 if (new_duty_cycle > _icms_duty_cycle) {
454 // Avoid very small duty cycles (1 or 2); 0 is allowed.
455 if (new_duty_cycle > 2) {
456 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
457 new_duty_cycle);
458 }
459 } else if (_allow_duty_cycle_reduction) {
460 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
461 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
462 // Respect the minimum duty cycle.
463 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
464 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
465 }
467 if (PrintGCDetails || CMSTraceIncrementalPacing) {
468 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
469 }
471 _allow_duty_cycle_reduction = false;
472 return _icms_duty_cycle;
473 }
475 #ifndef PRODUCT
476 void CMSStats::print_on(outputStream *st) const {
477 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
478 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
479 gc0_duration(), gc0_period(), gc0_promoted());
480 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
481 cms_duration(), cms_duration_per_mb(),
482 cms_period(), cms_allocated());
483 st->print(",cms_since_beg=%g,cms_since_end=%g",
484 cms_time_since_begin(), cms_time_since_end());
485 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
486 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
487 if (CMSIncrementalMode) {
488 st->print(",dc=%d", icms_duty_cycle());
489 }
491 if (valid()) {
492 st->print(",promo_rate=%g,cms_alloc_rate=%g",
493 promotion_rate(), cms_allocation_rate());
494 st->print(",cms_consumption_rate=%g,time_until_full=%g",
495 cms_consumption_rate(), time_until_cms_gen_full());
496 }
497 st->print(" ");
498 }
499 #endif // #ifndef PRODUCT
501 CMSCollector::CollectorState CMSCollector::_collectorState =
502 CMSCollector::Idling;
503 bool CMSCollector::_foregroundGCIsActive = false;
504 bool CMSCollector::_foregroundGCShouldWait = false;
506 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
507 ConcurrentMarkSweepGeneration* permGen,
508 CardTableRS* ct,
509 ConcurrentMarkSweepPolicy* cp):
510 _cmsGen(cmsGen),
511 _permGen(permGen),
512 _ct(ct),
513 _ref_processor(NULL), // will be set later
514 _conc_workers(NULL), // may be set later
515 _abort_preclean(false),
516 _start_sampling(false),
517 _between_prologue_and_epilogue(false),
518 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
519 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
520 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
521 -1 /* lock-free */, "No_lock" /* dummy */),
522 _modUnionClosure(&_modUnionTable),
523 _modUnionClosurePar(&_modUnionTable),
524 // Adjust my span to cover old (cms) gen and perm gen
525 _span(cmsGen->reserved()._union(permGen->reserved())),
526 // Construct the is_alive_closure with _span & markBitMap
527 _is_alive_closure(_span, &_markBitMap),
528 _restart_addr(NULL),
529 _overflow_list(NULL),
530 _preserved_oop_stack(NULL),
531 _preserved_mark_stack(NULL),
532 _stats(cmsGen),
533 _eden_chunk_array(NULL), // may be set in ctor body
534 _eden_chunk_capacity(0), // -- ditto --
535 _eden_chunk_index(0), // -- ditto --
536 _survivor_plab_array(NULL), // -- ditto --
537 _survivor_chunk_array(NULL), // -- ditto --
538 _survivor_chunk_capacity(0), // -- ditto --
539 _survivor_chunk_index(0), // -- ditto --
540 _ser_pmc_preclean_ovflw(0),
541 _ser_kac_preclean_ovflw(0),
542 _ser_pmc_remark_ovflw(0),
543 _par_pmc_remark_ovflw(0),
544 _ser_kac_ovflw(0),
545 _par_kac_ovflw(0),
546 #ifndef PRODUCT
547 _num_par_pushes(0),
548 #endif
549 _collection_count_start(0),
550 _verifying(false),
551 _icms_start_limit(NULL),
552 _icms_stop_limit(NULL),
553 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
554 _completed_initialization(false),
555 _collector_policy(cp),
556 _should_unload_classes(false),
557 _concurrent_cycles_since_last_unload(0),
558 _roots_scanning_options(0),
559 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
560 {
561 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
562 ExplicitGCInvokesConcurrent = true;
563 }
564 // Now expand the span and allocate the collection support structures
565 // (MUT, marking bit map etc.) to cover both generations subject to
566 // collection.
568 // First check that _permGen is adjacent to _cmsGen and above it.
569 assert( _cmsGen->reserved().word_size() > 0
570 && _permGen->reserved().word_size() > 0,
571 "generations should not be of zero size");
572 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
573 "_cmsGen and _permGen should not overlap");
574 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
575 "_cmsGen->end() different from _permGen->start()");
577 // For use by dirty card to oop closures.
578 _cmsGen->cmsSpace()->set_collector(this);
579 _permGen->cmsSpace()->set_collector(this);
581 // Allocate MUT and marking bit map
582 {
583 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
584 if (!_markBitMap.allocate(_span)) {
585 warning("Failed to allocate CMS Bit Map");
586 return;
587 }
588 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
589 }
590 {
591 _modUnionTable.allocate(_span);
592 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
593 }
595 if (!_markStack.allocate(CMSMarkStackSize)) {
596 warning("Failed to allocate CMS Marking Stack");
597 return;
598 }
599 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
600 warning("Failed to allocate CMS Revisit Stack");
601 return;
602 }
604 // Support for multi-threaded concurrent phases
605 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
606 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
607 // just for now
608 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
609 }
610 if (ParallelCMSThreads > 1) {
611 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
612 ParallelCMSThreads, true);
613 if (_conc_workers == NULL) {
614 warning("GC/CMS: _conc_workers allocation failure: "
615 "forcing -CMSConcurrentMTEnabled");
616 CMSConcurrentMTEnabled = false;
617 }
618 } else {
619 CMSConcurrentMTEnabled = false;
620 }
621 }
622 if (!CMSConcurrentMTEnabled) {
623 ParallelCMSThreads = 0;
624 } else {
625 // Turn off CMSCleanOnEnter optimization temporarily for
626 // the MT case where it's not fixed yet; see 6178663.
627 CMSCleanOnEnter = false;
628 }
629 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
630 "Inconsistency");
632 // Parallel task queues; these are shared for the
633 // concurrent and stop-world phases of CMS, but
634 // are not shared with parallel scavenge (ParNew).
635 {
636 uint i;
637 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
639 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
640 || ParallelRefProcEnabled)
641 && num_queues > 0) {
642 _task_queues = new OopTaskQueueSet(num_queues);
643 if (_task_queues == NULL) {
644 warning("task_queues allocation failure.");
645 return;
646 }
647 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
648 if (_hash_seed == NULL) {
649 warning("_hash_seed array allocation failure");
650 return;
651 }
653 // XXX use a global constant instead of 64!
654 typedef struct OopTaskQueuePadded {
655 OopTaskQueue work_queue;
656 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
657 } OopTaskQueuePadded;
659 for (i = 0; i < num_queues; i++) {
660 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
661 if (q_padded == NULL) {
662 warning("work_queue allocation failure.");
663 return;
664 }
665 _task_queues->register_queue(i, &q_padded->work_queue);
666 }
667 for (i = 0; i < num_queues; i++) {
668 _task_queues->queue(i)->initialize();
669 _hash_seed[i] = 17; // copied from ParNew
670 }
671 }
672 }
674 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
675 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
677 // Clip CMSBootstrapOccupancy between 0 and 100.
678 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
679 /(double)100;
681 _full_gcs_since_conc_gc = 0;
683 // Now tell CMS generations the identity of their collector
684 ConcurrentMarkSweepGeneration::set_collector(this);
686 // Create & start a CMS thread for this CMS collector
687 _cmsThread = ConcurrentMarkSweepThread::start(this);
688 assert(cmsThread() != NULL, "CMS Thread should have been created");
689 assert(cmsThread()->collector() == this,
690 "CMS Thread should refer to this gen");
691 assert(CGC_lock != NULL, "Where's the CGC_lock?");
693 // Support for parallelizing young gen rescan
694 GenCollectedHeap* gch = GenCollectedHeap::heap();
695 _young_gen = gch->prev_gen(_cmsGen);
696 if (gch->supports_inline_contig_alloc()) {
697 _top_addr = gch->top_addr();
698 _end_addr = gch->end_addr();
699 assert(_young_gen != NULL, "no _young_gen");
700 _eden_chunk_index = 0;
701 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
702 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
703 if (_eden_chunk_array == NULL) {
704 _eden_chunk_capacity = 0;
705 warning("GC/CMS: _eden_chunk_array allocation failure");
706 }
707 }
708 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
710 // Support for parallelizing survivor space rescan
711 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
712 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
713 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
714 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
715 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
716 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
717 || _cursor == NULL) {
718 warning("Failed to allocate survivor plab/chunk array");
719 if (_survivor_plab_array != NULL) {
720 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
721 _survivor_plab_array = NULL;
722 }
723 if (_survivor_chunk_array != NULL) {
724 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
725 _survivor_chunk_array = NULL;
726 }
727 if (_cursor != NULL) {
728 FREE_C_HEAP_ARRAY(size_t, _cursor);
729 _cursor = NULL;
730 }
731 } else {
732 _survivor_chunk_capacity = 2*max_plab_samples;
733 for (uint i = 0; i < ParallelGCThreads; i++) {
734 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
735 if (vec == NULL) {
736 warning("Failed to allocate survivor plab array");
737 for (int j = i; j > 0; j--) {
738 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
739 }
740 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
741 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
742 _survivor_plab_array = NULL;
743 _survivor_chunk_array = NULL;
744 _survivor_chunk_capacity = 0;
745 break;
746 } else {
747 ChunkArray* cur =
748 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
749 max_plab_samples);
750 assert(cur->end() == 0, "Should be 0");
751 assert(cur->array() == vec, "Should be vec");
752 assert(cur->capacity() == max_plab_samples, "Error");
753 }
754 }
755 }
756 }
757 assert( ( _survivor_plab_array != NULL
758 && _survivor_chunk_array != NULL)
759 || ( _survivor_chunk_capacity == 0
760 && _survivor_chunk_index == 0),
761 "Error");
763 // Choose what strong roots should be scanned depending on verification options
764 // and perm gen collection mode.
765 if (!CMSClassUnloadingEnabled) {
766 // If class unloading is disabled we want to include all classes into the root set.
767 add_root_scanning_option(SharedHeap::SO_AllClasses);
768 } else {
769 add_root_scanning_option(SharedHeap::SO_SystemClasses);
770 }
772 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
773 _gc_counters = new CollectorCounters("CMS", 1);
774 _completed_initialization = true;
775 _sweep_timer.start(); // start of time
776 }
778 const char* ConcurrentMarkSweepGeneration::name() const {
779 return "concurrent mark-sweep generation";
780 }
781 void ConcurrentMarkSweepGeneration::update_counters() {
782 if (UsePerfData) {
783 _space_counters->update_all();
784 _gen_counters->update_all();
785 }
786 }
788 // this is an optimized version of update_counters(). it takes the
789 // used value as a parameter rather than computing it.
790 //
791 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
792 if (UsePerfData) {
793 _space_counters->update_used(used);
794 _space_counters->update_capacity();
795 _gen_counters->update_all();
796 }
797 }
799 void ConcurrentMarkSweepGeneration::print() const {
800 Generation::print();
801 cmsSpace()->print();
802 }
804 #ifndef PRODUCT
805 void ConcurrentMarkSweepGeneration::print_statistics() {
806 cmsSpace()->printFLCensus(0);
807 }
808 #endif
810 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
811 GenCollectedHeap* gch = GenCollectedHeap::heap();
812 if (PrintGCDetails) {
813 if (Verbose) {
814 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
815 level(), short_name(), s, used(), capacity());
816 } else {
817 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
818 level(), short_name(), s, used() / K, capacity() / K);
819 }
820 }
821 if (Verbose) {
822 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
823 gch->used(), gch->capacity());
824 } else {
825 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
826 gch->used() / K, gch->capacity() / K);
827 }
828 }
830 size_t
831 ConcurrentMarkSweepGeneration::contiguous_available() const {
832 // dld proposes an improvement in precision here. If the committed
833 // part of the space ends in a free block we should add that to
834 // uncommitted size in the calculation below. Will make this
835 // change later, staying with the approximation below for the
836 // time being. -- ysr.
837 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
838 }
840 size_t
841 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
842 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
843 }
845 size_t ConcurrentMarkSweepGeneration::max_available() const {
846 return free() + _virtual_space.uncommitted_size();
847 }
849 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
850 size_t max_promotion_in_bytes,
851 bool younger_handles_promotion_failure) const {
853 // This is the most conservative test. Full promotion is
854 // guaranteed if this is used. The multiplicative factor is to
855 // account for the worst case "dilatation".
856 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
857 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
858 adjusted_max_promo_bytes = (double)max_uintx;
859 }
860 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
862 if (younger_handles_promotion_failure && !result) {
863 // Full promotion is not guaranteed because fragmentation
864 // of the cms generation can prevent the full promotion.
865 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
867 if (!result) {
868 // With promotion failure handling the test for the ability
869 // to support the promotion does not have to be guaranteed.
870 // Use an average of the amount promoted.
871 result = max_available() >= (size_t)
872 gc_stats()->avg_promoted()->padded_average();
873 if (PrintGC && Verbose && result) {
874 gclog_or_tty->print_cr(
875 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
876 " max_available: " SIZE_FORMAT
877 " avg_promoted: " SIZE_FORMAT,
878 max_available(), (size_t)
879 gc_stats()->avg_promoted()->padded_average());
880 }
881 } else {
882 if (PrintGC && Verbose) {
883 gclog_or_tty->print_cr(
884 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
885 " max_available: " SIZE_FORMAT
886 " adj_max_promo_bytes: " SIZE_FORMAT,
887 max_available(), (size_t)adjusted_max_promo_bytes);
888 }
889 }
890 } else {
891 if (PrintGC && Verbose) {
892 gclog_or_tty->print_cr(
893 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
894 " contiguous_available: " SIZE_FORMAT
895 " adj_max_promo_bytes: " SIZE_FORMAT,
896 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
897 }
898 }
899 return result;
900 }
902 CompactibleSpace*
903 ConcurrentMarkSweepGeneration::first_compaction_space() const {
904 return _cmsSpace;
905 }
907 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
908 // Clear the promotion information. These pointers can be adjusted
909 // along with all the other pointers into the heap but
910 // compaction is expected to be a rare event with
911 // a heap using cms so don't do it without seeing the need.
912 if (ParallelGCThreads > 0) {
913 for (uint i = 0; i < ParallelGCThreads; i++) {
914 _par_gc_thread_states[i]->promo.reset();
915 }
916 }
917 }
919 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
920 blk->do_space(_cmsSpace);
921 }
923 void ConcurrentMarkSweepGeneration::compute_new_size() {
924 assert_locked_or_safepoint(Heap_lock);
926 // If incremental collection failed, we just want to expand
927 // to the limit.
928 if (incremental_collection_failed()) {
929 clear_incremental_collection_failed();
930 grow_to_reserved();
931 return;
932 }
934 size_t expand_bytes = 0;
935 double free_percentage = ((double) free()) / capacity();
936 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
937 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
939 // compute expansion delta needed for reaching desired free percentage
940 if (free_percentage < desired_free_percentage) {
941 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
942 assert(desired_capacity >= capacity(), "invalid expansion size");
943 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
944 }
945 if (expand_bytes > 0) {
946 if (PrintGCDetails && Verbose) {
947 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
948 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
949 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
950 gclog_or_tty->print_cr(" Desired free fraction %f",
951 desired_free_percentage);
952 gclog_or_tty->print_cr(" Maximum free fraction %f",
953 maximum_free_percentage);
954 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
955 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
956 desired_capacity/1000);
957 int prev_level = level() - 1;
958 if (prev_level >= 0) {
959 size_t prev_size = 0;
960 GenCollectedHeap* gch = GenCollectedHeap::heap();
961 Generation* prev_gen = gch->_gens[prev_level];
962 prev_size = prev_gen->capacity();
963 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
964 prev_size/1000);
965 }
966 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
967 unsafe_max_alloc_nogc()/1000);
968 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
969 contiguous_available()/1000);
970 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
971 expand_bytes);
972 }
973 // safe if expansion fails
974 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
975 if (PrintGCDetails && Verbose) {
976 gclog_or_tty->print_cr(" Expanded free fraction %f",
977 ((double) free()) / capacity());
978 }
979 }
980 }
982 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
983 return cmsSpace()->freelistLock();
984 }
986 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
987 bool tlab) {
988 CMSSynchronousYieldRequest yr;
989 MutexLockerEx x(freelistLock(),
990 Mutex::_no_safepoint_check_flag);
991 return have_lock_and_allocate(size, tlab);
992 }
994 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
995 bool tlab) {
996 assert_lock_strong(freelistLock());
997 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
998 HeapWord* res = cmsSpace()->allocate(adjustedSize);
999 // Allocate the object live (grey) if the background collector has
1000 // started marking. This is necessary because the marker may
1001 // have passed this address and consequently this object will
1002 // not otherwise be greyed and would be incorrectly swept up.
1003 // Note that if this object contains references, the writing
1004 // of those references will dirty the card containing this object
1005 // allowing the object to be blackened (and its references scanned)
1006 // either during a preclean phase or at the final checkpoint.
1007 if (res != NULL) {
1008 collector()->direct_allocated(res, adjustedSize);
1009 _direct_allocated_words += adjustedSize;
1010 // allocation counters
1011 NOT_PRODUCT(
1012 _numObjectsAllocated++;
1013 _numWordsAllocated += (int)adjustedSize;
1014 )
1015 }
1016 return res;
1017 }
1019 // In the case of direct allocation by mutators in a generation that
1020 // is being concurrently collected, the object must be allocated
1021 // live (grey) if the background collector has started marking.
1022 // This is necessary because the marker may
1023 // have passed this address and consequently this object will
1024 // not otherwise be greyed and would be incorrectly swept up.
1025 // Note that if this object contains references, the writing
1026 // of those references will dirty the card containing this object
1027 // allowing the object to be blackened (and its references scanned)
1028 // either during a preclean phase or at the final checkpoint.
1029 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1030 assert(_markBitMap.covers(start, size), "Out of bounds");
1031 if (_collectorState >= Marking) {
1032 MutexLockerEx y(_markBitMap.lock(),
1033 Mutex::_no_safepoint_check_flag);
1034 // [see comments preceding SweepClosure::do_blk() below for details]
1035 // 1. need to mark the object as live so it isn't collected
1036 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1037 // 3. need to mark the end of the object so sweeper can skip over it
1038 // if it's uninitialized when the sweeper reaches it.
1039 _markBitMap.mark(start); // object is live
1040 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1041 _markBitMap.mark(start + size - 1);
1042 // mark end of object
1043 }
1044 // check that oop looks uninitialized
1045 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1046 }
1048 void CMSCollector::promoted(bool par, HeapWord* start,
1049 bool is_obj_array, size_t obj_size) {
1050 assert(_markBitMap.covers(start), "Out of bounds");
1051 // See comment in direct_allocated() about when objects should
1052 // be allocated live.
1053 if (_collectorState >= Marking) {
1054 // we already hold the marking bit map lock, taken in
1055 // the prologue
1056 if (par) {
1057 _markBitMap.par_mark(start);
1058 } else {
1059 _markBitMap.mark(start);
1060 }
1061 // We don't need to mark the object as uninitialized (as
1062 // in direct_allocated above) because this is being done with the
1063 // world stopped and the object will be initialized by the
1064 // time the sweeper gets to look at it.
1065 assert(SafepointSynchronize::is_at_safepoint(),
1066 "expect promotion only at safepoints");
1068 if (_collectorState < Sweeping) {
1069 // Mark the appropriate cards in the modUnionTable, so that
1070 // this object gets scanned before the sweep. If this is
1071 // not done, CMS generation references in the object might
1072 // not get marked.
1073 // For the case of arrays, which are otherwise precisely
1074 // marked, we need to dirty the entire array, not just its head.
1075 if (is_obj_array) {
1076 // The [par_]mark_range() method expects mr.end() below to
1077 // be aligned to the granularity of a bit's representation
1078 // in the heap. In the case of the MUT below, that's a
1079 // card size.
1080 MemRegion mr(start,
1081 (HeapWord*)round_to((intptr_t)(start + obj_size),
1082 CardTableModRefBS::card_size /* bytes */));
1083 if (par) {
1084 _modUnionTable.par_mark_range(mr);
1085 } else {
1086 _modUnionTable.mark_range(mr);
1087 }
1088 } else { // not an obj array; we can just mark the head
1089 if (par) {
1090 _modUnionTable.par_mark(start);
1091 } else {
1092 _modUnionTable.mark(start);
1093 }
1094 }
1095 }
1096 }
1097 }
1099 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1100 {
1101 size_t delta = pointer_delta(addr, space->bottom());
1102 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1103 }
1105 void CMSCollector::icms_update_allocation_limits()
1106 {
1107 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1108 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1110 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1111 if (CMSTraceIncrementalPacing) {
1112 stats().print();
1113 }
1115 assert(duty_cycle <= 100, "invalid duty cycle");
1116 if (duty_cycle != 0) {
1117 // The duty_cycle is a percentage between 0 and 100; convert to words and
1118 // then compute the offset from the endpoints of the space.
1119 size_t free_words = eden->free() / HeapWordSize;
1120 double free_words_dbl = (double)free_words;
1121 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1122 size_t offset_words = (free_words - duty_cycle_words) / 2;
1124 _icms_start_limit = eden->top() + offset_words;
1125 _icms_stop_limit = eden->end() - offset_words;
1127 // The limits may be adjusted (shifted to the right) by
1128 // CMSIncrementalOffset, to allow the application more mutator time after a
1129 // young gen gc (when all mutators were stopped) and before CMS starts and
1130 // takes away one or more cpus.
1131 if (CMSIncrementalOffset != 0) {
1132 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1133 size_t adjustment = (size_t)adjustment_dbl;
1134 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1135 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1136 _icms_start_limit += adjustment;
1137 _icms_stop_limit = tmp_stop;
1138 }
1139 }
1140 }
1141 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1142 _icms_start_limit = _icms_stop_limit = eden->end();
1143 }
1145 // Install the new start limit.
1146 eden->set_soft_end(_icms_start_limit);
1148 if (CMSTraceIncrementalMode) {
1149 gclog_or_tty->print(" icms alloc limits: "
1150 PTR_FORMAT "," PTR_FORMAT
1151 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1152 _icms_start_limit, _icms_stop_limit,
1153 percent_of_space(eden, _icms_start_limit),
1154 percent_of_space(eden, _icms_stop_limit));
1155 if (Verbose) {
1156 gclog_or_tty->print("eden: ");
1157 eden->print_on(gclog_or_tty);
1158 }
1159 }
1160 }
1162 // Any changes here should try to maintain the invariant
1163 // that if this method is called with _icms_start_limit
1164 // and _icms_stop_limit both NULL, then it should return NULL
1165 // and not notify the icms thread.
1166 HeapWord*
1167 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1168 size_t word_size)
1169 {
1170 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1171 // nop.
1172 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1173 if (top <= _icms_start_limit) {
1174 if (CMSTraceIncrementalMode) {
1175 space->print_on(gclog_or_tty);
1176 gclog_or_tty->stamp();
1177 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1178 ", new limit=" PTR_FORMAT
1179 " (" SIZE_FORMAT "%%)",
1180 top, _icms_stop_limit,
1181 percent_of_space(space, _icms_stop_limit));
1182 }
1183 ConcurrentMarkSweepThread::start_icms();
1184 assert(top < _icms_stop_limit, "Tautology");
1185 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1186 return _icms_stop_limit;
1187 }
1189 // The allocation will cross both the _start and _stop limits, so do the
1190 // stop notification also and return end().
1191 if (CMSTraceIncrementalMode) {
1192 space->print_on(gclog_or_tty);
1193 gclog_or_tty->stamp();
1194 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1195 ", new limit=" PTR_FORMAT
1196 " (" SIZE_FORMAT "%%)",
1197 top, space->end(),
1198 percent_of_space(space, space->end()));
1199 }
1200 ConcurrentMarkSweepThread::stop_icms();
1201 return space->end();
1202 }
1204 if (top <= _icms_stop_limit) {
1205 if (CMSTraceIncrementalMode) {
1206 space->print_on(gclog_or_tty);
1207 gclog_or_tty->stamp();
1208 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1209 ", new limit=" PTR_FORMAT
1210 " (" SIZE_FORMAT "%%)",
1211 top, space->end(),
1212 percent_of_space(space, space->end()));
1213 }
1214 ConcurrentMarkSweepThread::stop_icms();
1215 return space->end();
1216 }
1218 if (CMSTraceIncrementalMode) {
1219 space->print_on(gclog_or_tty);
1220 gclog_or_tty->stamp();
1221 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1222 ", new limit=" PTR_FORMAT,
1223 top, NULL);
1224 }
1225 }
1227 return NULL;
1228 }
1230 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1231 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1232 // allocate, copy and if necessary update promoinfo --
1233 // delegate to underlying space.
1234 assert_lock_strong(freelistLock());
1236 #ifndef PRODUCT
1237 if (Universe::heap()->promotion_should_fail()) {
1238 return NULL;
1239 }
1240 #endif // #ifndef PRODUCT
1242 oop res = _cmsSpace->promote(obj, obj_size);
1243 if (res == NULL) {
1244 // expand and retry
1245 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1246 expand(s*HeapWordSize, MinHeapDeltaBytes,
1247 CMSExpansionCause::_satisfy_promotion);
1248 // Since there's currently no next generation, we don't try to promote
1249 // into a more senior generation.
1250 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1251 "is made to pass on a possibly failing "
1252 "promotion to next generation");
1253 res = _cmsSpace->promote(obj, obj_size);
1254 }
1255 if (res != NULL) {
1256 // See comment in allocate() about when objects should
1257 // be allocated live.
1258 assert(obj->is_oop(), "Will dereference klass pointer below");
1259 collector()->promoted(false, // Not parallel
1260 (HeapWord*)res, obj->is_objArray(), obj_size);
1261 // promotion counters
1262 NOT_PRODUCT(
1263 _numObjectsPromoted++;
1264 _numWordsPromoted +=
1265 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1266 )
1267 }
1268 return res;
1269 }
1272 HeapWord*
1273 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1274 HeapWord* top,
1275 size_t word_sz)
1276 {
1277 return collector()->allocation_limit_reached(space, top, word_sz);
1278 }
1280 // Things to support parallel young-gen collection.
1281 oop
1282 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1283 oop old, markOop m,
1284 size_t word_sz) {
1285 #ifndef PRODUCT
1286 if (Universe::heap()->promotion_should_fail()) {
1287 return NULL;
1288 }
1289 #endif // #ifndef PRODUCT
1291 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1292 PromotionInfo* promoInfo = &ps->promo;
1293 // if we are tracking promotions, then first ensure space for
1294 // promotion (including spooling space for saving header if necessary).
1295 // then allocate and copy, then track promoted info if needed.
1296 // When tracking (see PromotionInfo::track()), the mark word may
1297 // be displaced and in this case restoration of the mark word
1298 // occurs in the (oop_since_save_marks_)iterate phase.
1299 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1300 // Out of space for allocating spooling buffers;
1301 // try expanding and allocating spooling buffers.
1302 if (!expand_and_ensure_spooling_space(promoInfo)) {
1303 return NULL;
1304 }
1305 }
1306 assert(promoInfo->has_spooling_space(), "Control point invariant");
1307 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1308 if (obj_ptr == NULL) {
1309 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1310 if (obj_ptr == NULL) {
1311 return NULL;
1312 }
1313 }
1314 oop obj = oop(obj_ptr);
1315 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1316 // Otherwise, copy the object. Here we must be careful to insert the
1317 // klass pointer last, since this marks the block as an allocated object.
1318 // Except with compressed oops it's the mark word.
1319 HeapWord* old_ptr = (HeapWord*)old;
1320 if (word_sz > (size_t)oopDesc::header_size()) {
1321 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1322 obj_ptr + oopDesc::header_size(),
1323 word_sz - oopDesc::header_size());
1324 }
1326 if (UseCompressedOops) {
1327 // Copy gap missed by (aligned) header size calculation above
1328 obj->set_klass_gap(old->klass_gap());
1329 }
1331 // Restore the mark word copied above.
1332 obj->set_mark(m);
1334 // Now we can track the promoted object, if necessary. We take care
1335 // To delay the transition from uninitialized to full object
1336 // (i.e., insertion of klass pointer) until after, so that it
1337 // atomically becomes a promoted object.
1338 if (promoInfo->tracking()) {
1339 promoInfo->track((PromotedObject*)obj, old->klass());
1340 }
1342 // Finally, install the klass pointer (this should be volatile).
1343 obj->set_klass(old->klass());
1345 assert(old->is_oop(), "Will dereference klass ptr below");
1346 collector()->promoted(true, // parallel
1347 obj_ptr, old->is_objArray(), word_sz);
1349 NOT_PRODUCT(
1350 Atomic::inc(&_numObjectsPromoted);
1351 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1352 &_numWordsPromoted);
1353 )
1355 return obj;
1356 }
1358 void
1359 ConcurrentMarkSweepGeneration::
1360 par_promote_alloc_undo(int thread_num,
1361 HeapWord* obj, size_t word_sz) {
1362 // CMS does not support promotion undo.
1363 ShouldNotReachHere();
1364 }
1366 void
1367 ConcurrentMarkSweepGeneration::
1368 par_promote_alloc_done(int thread_num) {
1369 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1370 ps->lab.retire();
1371 #if CFLS_LAB_REFILL_STATS
1372 if (thread_num == 0) {
1373 _cmsSpace->print_par_alloc_stats();
1374 }
1375 #endif
1376 }
1378 void
1379 ConcurrentMarkSweepGeneration::
1380 par_oop_since_save_marks_iterate_done(int thread_num) {
1381 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1382 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1383 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1384 }
1386 // XXXPERM
1387 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1388 size_t size,
1389 bool tlab)
1390 {
1391 // We allow a STW collection only if a full
1392 // collection was requested.
1393 return full || should_allocate(size, tlab); // FIX ME !!!
1394 // This and promotion failure handling are connected at the
1395 // hip and should be fixed by untying them.
1396 }
1398 bool CMSCollector::shouldConcurrentCollect() {
1399 if (_full_gc_requested) {
1400 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1401 if (Verbose && PrintGCDetails) {
1402 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1403 " gc request");
1404 }
1405 return true;
1406 }
1408 // For debugging purposes, change the type of collection.
1409 // If the rotation is not on the concurrent collection
1410 // type, don't start a concurrent collection.
1411 NOT_PRODUCT(
1412 if (RotateCMSCollectionTypes &&
1413 (_cmsGen->debug_collection_type() !=
1414 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1415 assert(_cmsGen->debug_collection_type() !=
1416 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1417 "Bad cms collection type");
1418 return false;
1419 }
1420 )
1422 FreelistLocker x(this);
1423 // ------------------------------------------------------------------
1424 // Print out lots of information which affects the initiation of
1425 // a collection.
1426 if (PrintCMSInitiationStatistics && stats().valid()) {
1427 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1428 gclog_or_tty->stamp();
1429 gclog_or_tty->print_cr("");
1430 stats().print_on(gclog_or_tty);
1431 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1432 stats().time_until_cms_gen_full());
1433 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1434 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1435 _cmsGen->contiguous_available());
1436 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1437 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1438 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1439 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1440 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1441 }
1442 // ------------------------------------------------------------------
1444 // If the estimated time to complete a cms collection (cms_duration())
1445 // is less than the estimated time remaining until the cms generation
1446 // is full, start a collection.
1447 if (!UseCMSInitiatingOccupancyOnly) {
1448 if (stats().valid()) {
1449 if (stats().time_until_cms_start() == 0.0) {
1450 return true;
1451 }
1452 } else {
1453 // We want to conservatively collect somewhat early in order
1454 // to try and "bootstrap" our CMS/promotion statistics;
1455 // this branch will not fire after the first successful CMS
1456 // collection because the stats should then be valid.
1457 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1458 if (Verbose && PrintGCDetails) {
1459 gclog_or_tty->print_cr(
1460 " CMSCollector: collect for bootstrapping statistics:"
1461 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1462 _bootstrap_occupancy);
1463 }
1464 return true;
1465 }
1466 }
1467 }
1469 // Otherwise, we start a collection cycle if either the perm gen or
1470 // old gen want a collection cycle started. Each may use
1471 // an appropriate criterion for making this decision.
1472 // XXX We need to make sure that the gen expansion
1473 // criterion dovetails well with this. XXX NEED TO FIX THIS
1474 if (_cmsGen->should_concurrent_collect()) {
1475 if (Verbose && PrintGCDetails) {
1476 gclog_or_tty->print_cr("CMS old gen initiated");
1477 }
1478 return true;
1479 }
1481 // We start a collection if we believe an incremental collection may fail;
1482 // this is not likely to be productive in practice because it's probably too
1483 // late anyway.
1484 GenCollectedHeap* gch = GenCollectedHeap::heap();
1485 assert(gch->collector_policy()->is_two_generation_policy(),
1486 "You may want to check the correctness of the following");
1487 if (gch->incremental_collection_will_fail()) {
1488 if (PrintGCDetails && Verbose) {
1489 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1490 }
1491 return true;
1492 }
1494 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1495 bool res = update_should_unload_classes();
1496 if (res) {
1497 if (Verbose && PrintGCDetails) {
1498 gclog_or_tty->print_cr("CMS perm gen initiated");
1499 }
1500 return true;
1501 }
1502 }
1503 return false;
1504 }
1506 // Clear _expansion_cause fields of constituent generations
1507 void CMSCollector::clear_expansion_cause() {
1508 _cmsGen->clear_expansion_cause();
1509 _permGen->clear_expansion_cause();
1510 }
1512 // We should be conservative in starting a collection cycle. To
1513 // start too eagerly runs the risk of collecting too often in the
1514 // extreme. To collect too rarely falls back on full collections,
1515 // which works, even if not optimum in terms of concurrent work.
1516 // As a work around for too eagerly collecting, use the flag
1517 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1518 // giving the user an easily understandable way of controlling the
1519 // collections.
1520 // We want to start a new collection cycle if any of the following
1521 // conditions hold:
1522 // . our current occupancy exceeds the configured initiating occupancy
1523 // for this generation, or
1524 // . we recently needed to expand this space and have not, since that
1525 // expansion, done a collection of this generation, or
1526 // . the underlying space believes that it may be a good idea to initiate
1527 // a concurrent collection (this may be based on criteria such as the
1528 // following: the space uses linear allocation and linear allocation is
1529 // going to fail, or there is believed to be excessive fragmentation in
1530 // the generation, etc... or ...
1531 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1532 // the case of the old generation, not the perm generation; see CR 6543076):
1533 // we may be approaching a point at which allocation requests may fail because
1534 // we will be out of sufficient free space given allocation rate estimates.]
1535 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1537 assert_lock_strong(freelistLock());
1538 if (occupancy() > initiating_occupancy()) {
1539 if (PrintGCDetails && Verbose) {
1540 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1541 short_name(), occupancy(), initiating_occupancy());
1542 }
1543 return true;
1544 }
1545 if (UseCMSInitiatingOccupancyOnly) {
1546 return false;
1547 }
1548 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1549 if (PrintGCDetails && Verbose) {
1550 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1551 short_name());
1552 }
1553 return true;
1554 }
1555 if (_cmsSpace->should_concurrent_collect()) {
1556 if (PrintGCDetails && Verbose) {
1557 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1558 short_name());
1559 }
1560 return true;
1561 }
1562 return false;
1563 }
1565 void ConcurrentMarkSweepGeneration::collect(bool full,
1566 bool clear_all_soft_refs,
1567 size_t size,
1568 bool tlab)
1569 {
1570 collector()->collect(full, clear_all_soft_refs, size, tlab);
1571 }
1573 void CMSCollector::collect(bool full,
1574 bool clear_all_soft_refs,
1575 size_t size,
1576 bool tlab)
1577 {
1578 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1579 // For debugging purposes skip the collection if the state
1580 // is not currently idle
1581 if (TraceCMSState) {
1582 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1583 Thread::current(), full, _collectorState);
1584 }
1585 return;
1586 }
1588 // The following "if" branch is present for defensive reasons.
1589 // In the current uses of this interface, it can be replaced with:
1590 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1591 // But I am not placing that assert here to allow future
1592 // generality in invoking this interface.
1593 if (GC_locker::is_active()) {
1594 // A consistency test for GC_locker
1595 assert(GC_locker::needs_gc(), "Should have been set already");
1596 // Skip this foreground collection, instead
1597 // expanding the heap if necessary.
1598 // Need the free list locks for the call to free() in compute_new_size()
1599 compute_new_size();
1600 return;
1601 }
1602 acquire_control_and_collect(full, clear_all_soft_refs);
1603 _full_gcs_since_conc_gc++;
1605 }
1607 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1608 GenCollectedHeap* gch = GenCollectedHeap::heap();
1609 unsigned int gc_count = gch->total_full_collections();
1610 if (gc_count == full_gc_count) {
1611 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1612 _full_gc_requested = true;
1613 CGC_lock->notify(); // nudge CMS thread
1614 }
1615 }
1618 // The foreground and background collectors need to coordinate in order
1619 // to make sure that they do not mutually interfere with CMS collections.
1620 // When a background collection is active,
1621 // the foreground collector may need to take over (preempt) and
1622 // synchronously complete an ongoing collection. Depending on the
1623 // frequency of the background collections and the heap usage
1624 // of the application, this preemption can be seldom or frequent.
1625 // There are only certain
1626 // points in the background collection that the "collection-baton"
1627 // can be passed to the foreground collector.
1628 //
1629 // The foreground collector will wait for the baton before
1630 // starting any part of the collection. The foreground collector
1631 // will only wait at one location.
1632 //
1633 // The background collector will yield the baton before starting a new
1634 // phase of the collection (e.g., before initial marking, marking from roots,
1635 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1636 // of the loop which switches the phases. The background collector does some
1637 // of the phases (initial mark, final re-mark) with the world stopped.
1638 // Because of locking involved in stopping the world,
1639 // the foreground collector should not block waiting for the background
1640 // collector when it is doing a stop-the-world phase. The background
1641 // collector will yield the baton at an additional point just before
1642 // it enters a stop-the-world phase. Once the world is stopped, the
1643 // background collector checks the phase of the collection. If the
1644 // phase has not changed, it proceeds with the collection. If the
1645 // phase has changed, it skips that phase of the collection. See
1646 // the comments on the use of the Heap_lock in collect_in_background().
1647 //
1648 // Variable used in baton passing.
1649 // _foregroundGCIsActive - Set to true by the foreground collector when
1650 // it wants the baton. The foreground clears it when it has finished
1651 // the collection.
1652 // _foregroundGCShouldWait - Set to true by the background collector
1653 // when it is running. The foreground collector waits while
1654 // _foregroundGCShouldWait is true.
1655 // CGC_lock - monitor used to protect access to the above variables
1656 // and to notify the foreground and background collectors.
1657 // _collectorState - current state of the CMS collection.
1658 //
1659 // The foreground collector
1660 // acquires the CGC_lock
1661 // sets _foregroundGCIsActive
1662 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1663 // various locks acquired in preparation for the collection
1664 // are released so as not to block the background collector
1665 // that is in the midst of a collection
1666 // proceeds with the collection
1667 // clears _foregroundGCIsActive
1668 // returns
1669 //
1670 // The background collector in a loop iterating on the phases of the
1671 // collection
1672 // acquires the CGC_lock
1673 // sets _foregroundGCShouldWait
1674 // if _foregroundGCIsActive is set
1675 // clears _foregroundGCShouldWait, notifies _CGC_lock
1676 // waits on _CGC_lock for _foregroundGCIsActive to become false
1677 // and exits the loop.
1678 // otherwise
1679 // proceed with that phase of the collection
1680 // if the phase is a stop-the-world phase,
1681 // yield the baton once more just before enqueueing
1682 // the stop-world CMS operation (executed by the VM thread).
1683 // returns after all phases of the collection are done
1684 //
1686 void CMSCollector::acquire_control_and_collect(bool full,
1687 bool clear_all_soft_refs) {
1688 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1689 assert(!Thread::current()->is_ConcurrentGC_thread(),
1690 "shouldn't try to acquire control from self!");
1692 // Start the protocol for acquiring control of the
1693 // collection from the background collector (aka CMS thread).
1694 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1695 "VM thread should have CMS token");
1696 // Remember the possibly interrupted state of an ongoing
1697 // concurrent collection
1698 CollectorState first_state = _collectorState;
1700 // Signal to a possibly ongoing concurrent collection that
1701 // we want to do a foreground collection.
1702 _foregroundGCIsActive = true;
1704 // Disable incremental mode during a foreground collection.
1705 ICMSDisabler icms_disabler;
1707 // release locks and wait for a notify from the background collector
1708 // releasing the locks in only necessary for phases which
1709 // do yields to improve the granularity of the collection.
1710 assert_lock_strong(bitMapLock());
1711 // We need to lock the Free list lock for the space that we are
1712 // currently collecting.
1713 assert(haveFreelistLocks(), "Must be holding free list locks");
1714 bitMapLock()->unlock();
1715 releaseFreelistLocks();
1716 {
1717 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1718 if (_foregroundGCShouldWait) {
1719 // We are going to be waiting for action for the CMS thread;
1720 // it had better not be gone (for instance at shutdown)!
1721 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1722 "CMS thread must be running");
1723 // Wait here until the background collector gives us the go-ahead
1724 ConcurrentMarkSweepThread::clear_CMS_flag(
1725 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1726 // Get a possibly blocked CMS thread going:
1727 // Note that we set _foregroundGCIsActive true above,
1728 // without protection of the CGC_lock.
1729 CGC_lock->notify();
1730 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1731 "Possible deadlock");
1732 while (_foregroundGCShouldWait) {
1733 // wait for notification
1734 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1735 // Possibility of delay/starvation here, since CMS token does
1736 // not know to give priority to VM thread? Actually, i think
1737 // there wouldn't be any delay/starvation, but the proof of
1738 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1739 }
1740 ConcurrentMarkSweepThread::set_CMS_flag(
1741 ConcurrentMarkSweepThread::CMS_vm_has_token);
1742 }
1743 }
1744 // The CMS_token is already held. Get back the other locks.
1745 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1746 "VM thread should have CMS token");
1747 getFreelistLocks();
1748 bitMapLock()->lock_without_safepoint_check();
1749 if (TraceCMSState) {
1750 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1751 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1752 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1753 }
1755 // Check if we need to do a compaction, or if not, whether
1756 // we need to start the mark-sweep from scratch.
1757 bool should_compact = false;
1758 bool should_start_over = false;
1759 decide_foreground_collection_type(clear_all_soft_refs,
1760 &should_compact, &should_start_over);
1762 NOT_PRODUCT(
1763 if (RotateCMSCollectionTypes) {
1764 if (_cmsGen->debug_collection_type() ==
1765 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1766 should_compact = true;
1767 } else if (_cmsGen->debug_collection_type() ==
1768 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1769 should_compact = false;
1770 }
1771 }
1772 )
1774 if (PrintGCDetails && first_state > Idling) {
1775 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1776 if (GCCause::is_user_requested_gc(cause) ||
1777 GCCause::is_serviceability_requested_gc(cause)) {
1778 gclog_or_tty->print(" (concurrent mode interrupted)");
1779 } else {
1780 gclog_or_tty->print(" (concurrent mode failure)");
1781 }
1782 }
1784 if (should_compact) {
1785 // If the collection is being acquired from the background
1786 // collector, there may be references on the discovered
1787 // references lists that have NULL referents (being those
1788 // that were concurrently cleared by a mutator) or
1789 // that are no longer active (having been enqueued concurrently
1790 // by the mutator).
1791 // Scrub the list of those references because Mark-Sweep-Compact
1792 // code assumes referents are not NULL and that all discovered
1793 // Reference objects are active.
1794 ref_processor()->clean_up_discovered_references();
1796 do_compaction_work(clear_all_soft_refs);
1798 // Has the GC time limit been exceeded?
1799 check_gc_time_limit();
1801 } else {
1802 do_mark_sweep_work(clear_all_soft_refs, first_state,
1803 should_start_over);
1804 }
1805 // Reset the expansion cause, now that we just completed
1806 // a collection cycle.
1807 clear_expansion_cause();
1808 _foregroundGCIsActive = false;
1809 return;
1810 }
1812 void CMSCollector::check_gc_time_limit() {
1814 // Ignore explicit GC's. Exiting here does not set the flag and
1815 // does not reset the count. Updating of the averages for system
1816 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1817 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1818 if (GCCause::is_user_requested_gc(gc_cause) ||
1819 GCCause::is_serviceability_requested_gc(gc_cause)) {
1820 return;
1821 }
1823 // Calculate the fraction of the CMS generation was freed during
1824 // the last collection.
1825 // Only consider the STW compacting cost for now.
1826 //
1827 // Note that the gc time limit test only works for the collections
1828 // of the young gen + tenured gen and not for collections of the
1829 // permanent gen. That is because the calculation of the space
1830 // freed by the collection is the free space in the young gen +
1831 // tenured gen.
1833 double fraction_free =
1834 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1835 if ((100.0 * size_policy()->compacting_gc_cost()) >
1836 ((double) GCTimeLimit) &&
1837 ((fraction_free * 100) < GCHeapFreeLimit)) {
1838 size_policy()->inc_gc_time_limit_count();
1839 if (UseGCOverheadLimit &&
1840 (size_policy()->gc_time_limit_count() >
1841 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1842 size_policy()->set_gc_time_limit_exceeded(true);
1843 // Avoid consecutive OOM due to the gc time limit by resetting
1844 // the counter.
1845 size_policy()->reset_gc_time_limit_count();
1846 if (PrintGCDetails) {
1847 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1848 "of %d%%", GCTimeLimit);
1849 }
1850 } else {
1851 if (PrintGCDetails) {
1852 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1853 "of %d%%", GCTimeLimit);
1854 }
1855 }
1856 } else {
1857 size_policy()->reset_gc_time_limit_count();
1858 }
1859 }
1861 // Resize the perm generation and the tenured generation
1862 // after obtaining the free list locks for the
1863 // two generations.
1864 void CMSCollector::compute_new_size() {
1865 assert_locked_or_safepoint(Heap_lock);
1866 FreelistLocker z(this);
1867 _permGen->compute_new_size();
1868 _cmsGen->compute_new_size();
1869 }
1871 // A work method used by foreground collection to determine
1872 // what type of collection (compacting or not, continuing or fresh)
1873 // it should do.
1874 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1875 // and CMSCompactWhenClearAllSoftRefs the default in the future
1876 // and do away with the flags after a suitable period.
1877 void CMSCollector::decide_foreground_collection_type(
1878 bool clear_all_soft_refs, bool* should_compact,
1879 bool* should_start_over) {
1880 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1881 // flag is set, and we have either requested a System.gc() or
1882 // the number of full gc's since the last concurrent cycle
1883 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1884 // or if an incremental collection has failed
1885 GenCollectedHeap* gch = GenCollectedHeap::heap();
1886 assert(gch->collector_policy()->is_two_generation_policy(),
1887 "You may want to check the correctness of the following");
1888 // Inform cms gen if this was due to partial collection failing.
1889 // The CMS gen may use this fact to determine its expansion policy.
1890 if (gch->incremental_collection_will_fail()) {
1891 assert(!_cmsGen->incremental_collection_failed(),
1892 "Should have been noticed, reacted to and cleared");
1893 _cmsGen->set_incremental_collection_failed();
1894 }
1895 *should_compact =
1896 UseCMSCompactAtFullCollection &&
1897 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1898 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1899 gch->incremental_collection_will_fail());
1900 *should_start_over = false;
1901 if (clear_all_soft_refs && !*should_compact) {
1902 // We are about to do a last ditch collection attempt
1903 // so it would normally make sense to do a compaction
1904 // to reclaim as much space as possible.
1905 if (CMSCompactWhenClearAllSoftRefs) {
1906 // Default: The rationale is that in this case either
1907 // we are past the final marking phase, in which case
1908 // we'd have to start over, or so little has been done
1909 // that there's little point in saving that work. Compaction
1910 // appears to be the sensible choice in either case.
1911 *should_compact = true;
1912 } else {
1913 // We have been asked to clear all soft refs, but not to
1914 // compact. Make sure that we aren't past the final checkpoint
1915 // phase, for that is where we process soft refs. If we are already
1916 // past that phase, we'll need to redo the refs discovery phase and
1917 // if necessary clear soft refs that weren't previously
1918 // cleared. We do so by remembering the phase in which
1919 // we came in, and if we are past the refs processing
1920 // phase, we'll choose to just redo the mark-sweep
1921 // collection from scratch.
1922 if (_collectorState > FinalMarking) {
1923 // We are past the refs processing phase;
1924 // start over and do a fresh synchronous CMS cycle
1925 _collectorState = Resetting; // skip to reset to start new cycle
1926 reset(false /* == !asynch */);
1927 *should_start_over = true;
1928 } // else we can continue a possibly ongoing current cycle
1929 }
1930 }
1931 }
1933 // A work method used by the foreground collector to do
1934 // a mark-sweep-compact.
1935 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1936 GenCollectedHeap* gch = GenCollectedHeap::heap();
1937 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1938 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1939 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1940 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1941 }
1943 // Sample collection interval time and reset for collection pause.
1944 if (UseAdaptiveSizePolicy) {
1945 size_policy()->msc_collection_begin();
1946 }
1948 // Temporarily widen the span of the weak reference processing to
1949 // the entire heap.
1950 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1951 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1953 // Temporarily, clear the "is_alive_non_header" field of the
1954 // reference processor.
1955 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1957 // Temporarily make reference _processing_ single threaded (non-MT).
1958 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1960 // Temporarily make refs discovery atomic
1961 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1963 ref_processor()->set_enqueuing_is_done(false);
1964 ref_processor()->enable_discovery();
1965 ref_processor()->setup_policy(clear_all_soft_refs);
1966 // If an asynchronous collection finishes, the _modUnionTable is
1967 // all clear. If we are assuming the collection from an asynchronous
1968 // collection, clear the _modUnionTable.
1969 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1970 "_modUnionTable should be clear if the baton was not passed");
1971 _modUnionTable.clear_all();
1973 // We must adjust the allocation statistics being maintained
1974 // in the free list space. We do so by reading and clearing
1975 // the sweep timer and updating the block flux rate estimates below.
1976 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1977 _sweep_timer.stop();
1978 // Note that we do not use this sample to update the _sweep_estimate.
1979 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1980 _sweep_estimate.padded_average());
1982 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1983 ref_processor(), clear_all_soft_refs);
1984 #ifdef ASSERT
1985 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1986 size_t free_size = cms_space->free();
1987 assert(free_size ==
1988 pointer_delta(cms_space->end(), cms_space->compaction_top())
1989 * HeapWordSize,
1990 "All the free space should be compacted into one chunk at top");
1991 assert(cms_space->dictionary()->totalChunkSize(
1992 debug_only(cms_space->freelistLock())) == 0 ||
1993 cms_space->totalSizeInIndexedFreeLists() == 0,
1994 "All the free space should be in a single chunk");
1995 size_t num = cms_space->totalCount();
1996 assert((free_size == 0 && num == 0) ||
1997 (free_size > 0 && (num == 1 || num == 2)),
1998 "There should be at most 2 free chunks after compaction");
1999 #endif // ASSERT
2000 _collectorState = Resetting;
2001 assert(_restart_addr == NULL,
2002 "Should have been NULL'd before baton was passed");
2003 reset(false /* == !asynch */);
2004 _cmsGen->reset_after_compaction();
2005 _concurrent_cycles_since_last_unload = 0;
2007 if (verifying() && !should_unload_classes()) {
2008 perm_gen_verify_bit_map()->clear_all();
2009 }
2011 // Clear any data recorded in the PLAB chunk arrays.
2012 if (_survivor_plab_array != NULL) {
2013 reset_survivor_plab_arrays();
2014 }
2016 // Adjust the per-size allocation stats for the next epoch.
2017 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
2018 // Restart the "sweep timer" for next epoch.
2019 _sweep_timer.reset();
2020 _sweep_timer.start();
2022 // Sample collection pause time and reset for collection interval.
2023 if (UseAdaptiveSizePolicy) {
2024 size_policy()->msc_collection_end(gch->gc_cause());
2025 }
2027 // For a mark-sweep-compact, compute_new_size() will be called
2028 // in the heap's do_collection() method.
2029 }
2031 // A work method used by the foreground collector to do
2032 // a mark-sweep, after taking over from a possibly on-going
2033 // concurrent mark-sweep collection.
2034 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2035 CollectorState first_state, bool should_start_over) {
2036 if (PrintGC && Verbose) {
2037 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2038 "collector with count %d",
2039 _full_gcs_since_conc_gc);
2040 }
2041 switch (_collectorState) {
2042 case Idling:
2043 if (first_state == Idling || should_start_over) {
2044 // The background GC was not active, or should
2045 // restarted from scratch; start the cycle.
2046 _collectorState = InitialMarking;
2047 }
2048 // If first_state was not Idling, then a background GC
2049 // was in progress and has now finished. No need to do it
2050 // again. Leave the state as Idling.
2051 break;
2052 case Precleaning:
2053 // In the foreground case don't do the precleaning since
2054 // it is not done concurrently and there is extra work
2055 // required.
2056 _collectorState = FinalMarking;
2057 }
2058 if (PrintGCDetails &&
2059 (_collectorState > Idling ||
2060 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2061 gclog_or_tty->print(" (concurrent mode failure)");
2062 }
2063 collect_in_foreground(clear_all_soft_refs);
2065 // For a mark-sweep, compute_new_size() will be called
2066 // in the heap's do_collection() method.
2067 }
2070 void CMSCollector::getFreelistLocks() const {
2071 // Get locks for all free lists in all generations that this
2072 // collector is responsible for
2073 _cmsGen->freelistLock()->lock_without_safepoint_check();
2074 _permGen->freelistLock()->lock_without_safepoint_check();
2075 }
2077 void CMSCollector::releaseFreelistLocks() const {
2078 // Release locks for all free lists in all generations that this
2079 // collector is responsible for
2080 _cmsGen->freelistLock()->unlock();
2081 _permGen->freelistLock()->unlock();
2082 }
2084 bool CMSCollector::haveFreelistLocks() const {
2085 // Check locks for all free lists in all generations that this
2086 // collector is responsible for
2087 assert_lock_strong(_cmsGen->freelistLock());
2088 assert_lock_strong(_permGen->freelistLock());
2089 PRODUCT_ONLY(ShouldNotReachHere());
2090 return true;
2091 }
2093 // A utility class that is used by the CMS collector to
2094 // temporarily "release" the foreground collector from its
2095 // usual obligation to wait for the background collector to
2096 // complete an ongoing phase before proceeding.
2097 class ReleaseForegroundGC: public StackObj {
2098 private:
2099 CMSCollector* _c;
2100 public:
2101 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2102 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2103 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2104 // allow a potentially blocked foreground collector to proceed
2105 _c->_foregroundGCShouldWait = false;
2106 if (_c->_foregroundGCIsActive) {
2107 CGC_lock->notify();
2108 }
2109 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2110 "Possible deadlock");
2111 }
2113 ~ReleaseForegroundGC() {
2114 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2115 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2116 _c->_foregroundGCShouldWait = true;
2117 }
2118 };
2120 // There are separate collect_in_background and collect_in_foreground because of
2121 // the different locking requirements of the background collector and the
2122 // foreground collector. There was originally an attempt to share
2123 // one "collect" method between the background collector and the foreground
2124 // collector but the if-then-else required made it cleaner to have
2125 // separate methods.
2126 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2127 assert(Thread::current()->is_ConcurrentGC_thread(),
2128 "A CMS asynchronous collection is only allowed on a CMS thread.");
2130 GenCollectedHeap* gch = GenCollectedHeap::heap();
2131 {
2132 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2133 MutexLockerEx hl(Heap_lock, safepoint_check);
2134 FreelistLocker fll(this);
2135 MutexLockerEx x(CGC_lock, safepoint_check);
2136 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2137 // The foreground collector is active or we're
2138 // not using asynchronous collections. Skip this
2139 // background collection.
2140 assert(!_foregroundGCShouldWait, "Should be clear");
2141 return;
2142 } else {
2143 assert(_collectorState == Idling, "Should be idling before start.");
2144 _collectorState = InitialMarking;
2145 // Reset the expansion cause, now that we are about to begin
2146 // a new cycle.
2147 clear_expansion_cause();
2148 }
2149 // Decide if we want to enable class unloading as part of the
2150 // ensuing concurrent GC cycle.
2151 update_should_unload_classes();
2152 _full_gc_requested = false; // acks all outstanding full gc requests
2153 // Signal that we are about to start a collection
2154 gch->increment_total_full_collections(); // ... starting a collection cycle
2155 _collection_count_start = gch->total_full_collections();
2156 }
2158 // Used for PrintGC
2159 size_t prev_used;
2160 if (PrintGC && Verbose) {
2161 prev_used = _cmsGen->used(); // XXXPERM
2162 }
2164 // The change of the collection state is normally done at this level;
2165 // the exceptions are phases that are executed while the world is
2166 // stopped. For those phases the change of state is done while the
2167 // world is stopped. For baton passing purposes this allows the
2168 // background collector to finish the phase and change state atomically.
2169 // The foreground collector cannot wait on a phase that is done
2170 // while the world is stopped because the foreground collector already
2171 // has the world stopped and would deadlock.
2172 while (_collectorState != Idling) {
2173 if (TraceCMSState) {
2174 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2175 Thread::current(), _collectorState);
2176 }
2177 // The foreground collector
2178 // holds the Heap_lock throughout its collection.
2179 // holds the CMS token (but not the lock)
2180 // except while it is waiting for the background collector to yield.
2181 //
2182 // The foreground collector should be blocked (not for long)
2183 // if the background collector is about to start a phase
2184 // executed with world stopped. If the background
2185 // collector has already started such a phase, the
2186 // foreground collector is blocked waiting for the
2187 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2188 // are executed in the VM thread.
2189 //
2190 // The locking order is
2191 // PendingListLock (PLL) -- if applicable (FinalMarking)
2192 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2193 // CMS token (claimed in
2194 // stop_world_and_do() -->
2195 // safepoint_synchronize() -->
2196 // CMSThread::synchronize())
2198 {
2199 // Check if the FG collector wants us to yield.
2200 CMSTokenSync x(true); // is cms thread
2201 if (waitForForegroundGC()) {
2202 // We yielded to a foreground GC, nothing more to be
2203 // done this round.
2204 assert(_foregroundGCShouldWait == false, "We set it to false in "
2205 "waitForForegroundGC()");
2206 if (TraceCMSState) {
2207 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2208 " exiting collection CMS state %d",
2209 Thread::current(), _collectorState);
2210 }
2211 return;
2212 } else {
2213 // The background collector can run but check to see if the
2214 // foreground collector has done a collection while the
2215 // background collector was waiting to get the CGC_lock
2216 // above. If yes, break so that _foregroundGCShouldWait
2217 // is cleared before returning.
2218 if (_collectorState == Idling) {
2219 break;
2220 }
2221 }
2222 }
2224 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2225 "should be waiting");
2227 switch (_collectorState) {
2228 case InitialMarking:
2229 {
2230 ReleaseForegroundGC x(this);
2231 stats().record_cms_begin();
2233 VM_CMS_Initial_Mark initial_mark_op(this);
2234 VMThread::execute(&initial_mark_op);
2235 }
2236 // The collector state may be any legal state at this point
2237 // since the background collector may have yielded to the
2238 // foreground collector.
2239 break;
2240 case Marking:
2241 // initial marking in checkpointRootsInitialWork has been completed
2242 if (markFromRoots(true)) { // we were successful
2243 assert(_collectorState == Precleaning, "Collector state should "
2244 "have changed");
2245 } else {
2246 assert(_foregroundGCIsActive, "Internal state inconsistency");
2247 }
2248 break;
2249 case Precleaning:
2250 if (UseAdaptiveSizePolicy) {
2251 size_policy()->concurrent_precleaning_begin();
2252 }
2253 // marking from roots in markFromRoots has been completed
2254 preclean();
2255 if (UseAdaptiveSizePolicy) {
2256 size_policy()->concurrent_precleaning_end();
2257 }
2258 assert(_collectorState == AbortablePreclean ||
2259 _collectorState == FinalMarking,
2260 "Collector state should have changed");
2261 break;
2262 case AbortablePreclean:
2263 if (UseAdaptiveSizePolicy) {
2264 size_policy()->concurrent_phases_resume();
2265 }
2266 abortable_preclean();
2267 if (UseAdaptiveSizePolicy) {
2268 size_policy()->concurrent_precleaning_end();
2269 }
2270 assert(_collectorState == FinalMarking, "Collector state should "
2271 "have changed");
2272 break;
2273 case FinalMarking:
2274 {
2275 ReleaseForegroundGC x(this);
2277 VM_CMS_Final_Remark final_remark_op(this);
2278 VMThread::execute(&final_remark_op);
2279 }
2280 assert(_foregroundGCShouldWait, "block post-condition");
2281 break;
2282 case Sweeping:
2283 if (UseAdaptiveSizePolicy) {
2284 size_policy()->concurrent_sweeping_begin();
2285 }
2286 // final marking in checkpointRootsFinal has been completed
2287 sweep(true);
2288 assert(_collectorState == Resizing, "Collector state change "
2289 "to Resizing must be done under the free_list_lock");
2290 _full_gcs_since_conc_gc = 0;
2292 // Stop the timers for adaptive size policy for the concurrent phases
2293 if (UseAdaptiveSizePolicy) {
2294 size_policy()->concurrent_sweeping_end();
2295 size_policy()->concurrent_phases_end(gch->gc_cause(),
2296 gch->prev_gen(_cmsGen)->capacity(),
2297 _cmsGen->free());
2298 }
2300 case Resizing: {
2301 // Sweeping has been completed...
2302 // At this point the background collection has completed.
2303 // Don't move the call to compute_new_size() down
2304 // into code that might be executed if the background
2305 // collection was preempted.
2306 {
2307 ReleaseForegroundGC x(this); // unblock FG collection
2308 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2309 CMSTokenSync z(true); // not strictly needed.
2310 if (_collectorState == Resizing) {
2311 compute_new_size();
2312 _collectorState = Resetting;
2313 } else {
2314 assert(_collectorState == Idling, "The state should only change"
2315 " because the foreground collector has finished the collection");
2316 }
2317 }
2318 break;
2319 }
2320 case Resetting:
2321 // CMS heap resizing has been completed
2322 reset(true);
2323 assert(_collectorState == Idling, "Collector state should "
2324 "have changed");
2325 stats().record_cms_end();
2326 // Don't move the concurrent_phases_end() and compute_new_size()
2327 // calls to here because a preempted background collection
2328 // has it's state set to "Resetting".
2329 break;
2330 case Idling:
2331 default:
2332 ShouldNotReachHere();
2333 break;
2334 }
2335 if (TraceCMSState) {
2336 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2337 Thread::current(), _collectorState);
2338 }
2339 assert(_foregroundGCShouldWait, "block post-condition");
2340 }
2342 // Should this be in gc_epilogue?
2343 collector_policy()->counters()->update_counters();
2345 {
2346 // Clear _foregroundGCShouldWait and, in the event that the
2347 // foreground collector is waiting, notify it, before
2348 // returning.
2349 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2350 _foregroundGCShouldWait = false;
2351 if (_foregroundGCIsActive) {
2352 CGC_lock->notify();
2353 }
2354 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2355 "Possible deadlock");
2356 }
2357 if (TraceCMSState) {
2358 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2359 " exiting collection CMS state %d",
2360 Thread::current(), _collectorState);
2361 }
2362 if (PrintGC && Verbose) {
2363 _cmsGen->print_heap_change(prev_used);
2364 }
2365 }
2367 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2368 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2369 "Foreground collector should be waiting, not executing");
2370 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2371 "may only be done by the VM Thread with the world stopped");
2372 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2373 "VM thread should have CMS token");
2375 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2376 true, gclog_or_tty);)
2377 if (UseAdaptiveSizePolicy) {
2378 size_policy()->ms_collection_begin();
2379 }
2380 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2382 HandleMark hm; // Discard invalid handles created during verification
2384 if (VerifyBeforeGC &&
2385 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2386 Universe::verify(true);
2387 }
2389 // Snapshot the soft reference policy to be used in this collection cycle.
2390 ref_processor()->setup_policy(clear_all_soft_refs);
2392 bool init_mark_was_synchronous = false; // until proven otherwise
2393 while (_collectorState != Idling) {
2394 if (TraceCMSState) {
2395 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2396 Thread::current(), _collectorState);
2397 }
2398 switch (_collectorState) {
2399 case InitialMarking:
2400 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2401 checkpointRootsInitial(false);
2402 assert(_collectorState == Marking, "Collector state should have changed"
2403 " within checkpointRootsInitial()");
2404 break;
2405 case Marking:
2406 // initial marking in checkpointRootsInitialWork has been completed
2407 if (VerifyDuringGC &&
2408 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2409 gclog_or_tty->print("Verify before initial mark: ");
2410 Universe::verify(true);
2411 }
2412 {
2413 bool res = markFromRoots(false);
2414 assert(res && _collectorState == FinalMarking, "Collector state should "
2415 "have changed");
2416 break;
2417 }
2418 case FinalMarking:
2419 if (VerifyDuringGC &&
2420 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2421 gclog_or_tty->print("Verify before re-mark: ");
2422 Universe::verify(true);
2423 }
2424 checkpointRootsFinal(false, clear_all_soft_refs,
2425 init_mark_was_synchronous);
2426 assert(_collectorState == Sweeping, "Collector state should not "
2427 "have changed within checkpointRootsFinal()");
2428 break;
2429 case Sweeping:
2430 // final marking in checkpointRootsFinal has been completed
2431 if (VerifyDuringGC &&
2432 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2433 gclog_or_tty->print("Verify before sweep: ");
2434 Universe::verify(true);
2435 }
2436 sweep(false);
2437 assert(_collectorState == Resizing, "Incorrect state");
2438 break;
2439 case Resizing: {
2440 // Sweeping has been completed; the actual resize in this case
2441 // is done separately; nothing to be done in this state.
2442 _collectorState = Resetting;
2443 break;
2444 }
2445 case Resetting:
2446 // The heap has been resized.
2447 if (VerifyDuringGC &&
2448 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2449 gclog_or_tty->print("Verify before reset: ");
2450 Universe::verify(true);
2451 }
2452 reset(false);
2453 assert(_collectorState == Idling, "Collector state should "
2454 "have changed");
2455 break;
2456 case Precleaning:
2457 case AbortablePreclean:
2458 // Elide the preclean phase
2459 _collectorState = FinalMarking;
2460 break;
2461 default:
2462 ShouldNotReachHere();
2463 }
2464 if (TraceCMSState) {
2465 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2466 Thread::current(), _collectorState);
2467 }
2468 }
2470 if (UseAdaptiveSizePolicy) {
2471 GenCollectedHeap* gch = GenCollectedHeap::heap();
2472 size_policy()->ms_collection_end(gch->gc_cause());
2473 }
2475 if (VerifyAfterGC &&
2476 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2477 Universe::verify(true);
2478 }
2479 if (TraceCMSState) {
2480 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2481 " exiting collection CMS state %d",
2482 Thread::current(), _collectorState);
2483 }
2484 }
2486 bool CMSCollector::waitForForegroundGC() {
2487 bool res = false;
2488 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2489 "CMS thread should have CMS token");
2490 // Block the foreground collector until the
2491 // background collectors decides whether to
2492 // yield.
2493 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2494 _foregroundGCShouldWait = true;
2495 if (_foregroundGCIsActive) {
2496 // The background collector yields to the
2497 // foreground collector and returns a value
2498 // indicating that it has yielded. The foreground
2499 // collector can proceed.
2500 res = true;
2501 _foregroundGCShouldWait = false;
2502 ConcurrentMarkSweepThread::clear_CMS_flag(
2503 ConcurrentMarkSweepThread::CMS_cms_has_token);
2504 ConcurrentMarkSweepThread::set_CMS_flag(
2505 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2506 // Get a possibly blocked foreground thread going
2507 CGC_lock->notify();
2508 if (TraceCMSState) {
2509 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2510 Thread::current(), _collectorState);
2511 }
2512 while (_foregroundGCIsActive) {
2513 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2514 }
2515 ConcurrentMarkSweepThread::set_CMS_flag(
2516 ConcurrentMarkSweepThread::CMS_cms_has_token);
2517 ConcurrentMarkSweepThread::clear_CMS_flag(
2518 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2519 }
2520 if (TraceCMSState) {
2521 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2522 Thread::current(), _collectorState);
2523 }
2524 return res;
2525 }
2527 // Because of the need to lock the free lists and other structures in
2528 // the collector, common to all the generations that the collector is
2529 // collecting, we need the gc_prologues of individual CMS generations
2530 // delegate to their collector. It may have been simpler had the
2531 // current infrastructure allowed one to call a prologue on a
2532 // collector. In the absence of that we have the generation's
2533 // prologue delegate to the collector, which delegates back
2534 // some "local" work to a worker method in the individual generations
2535 // that it's responsible for collecting, while itself doing any
2536 // work common to all generations it's responsible for. A similar
2537 // comment applies to the gc_epilogue()'s.
2538 // The role of the varaible _between_prologue_and_epilogue is to
2539 // enforce the invocation protocol.
2540 void CMSCollector::gc_prologue(bool full) {
2541 // Call gc_prologue_work() for each CMSGen and PermGen that
2542 // we are responsible for.
2544 // The following locking discipline assumes that we are only called
2545 // when the world is stopped.
2546 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2548 // The CMSCollector prologue must call the gc_prologues for the
2549 // "generations" (including PermGen if any) that it's responsible
2550 // for.
2552 assert( Thread::current()->is_VM_thread()
2553 || ( CMSScavengeBeforeRemark
2554 && Thread::current()->is_ConcurrentGC_thread()),
2555 "Incorrect thread type for prologue execution");
2557 if (_between_prologue_and_epilogue) {
2558 // We have already been invoked; this is a gc_prologue delegation
2559 // from yet another CMS generation that we are responsible for, just
2560 // ignore it since all relevant work has already been done.
2561 return;
2562 }
2564 // set a bit saying prologue has been called; cleared in epilogue
2565 _between_prologue_and_epilogue = true;
2566 // Claim locks for common data structures, then call gc_prologue_work()
2567 // for each CMSGen and PermGen that we are responsible for.
2569 getFreelistLocks(); // gets free list locks on constituent spaces
2570 bitMapLock()->lock_without_safepoint_check();
2572 // Should call gc_prologue_work() for all cms gens we are responsible for
2573 bool registerClosure = _collectorState >= Marking
2574 && _collectorState < Sweeping;
2575 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2576 : &_modUnionClosure;
2577 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2578 _permGen->gc_prologue_work(full, registerClosure, muc);
2580 if (!full) {
2581 stats().record_gc0_begin();
2582 }
2583 }
2585 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2586 // Delegate to CMScollector which knows how to coordinate between
2587 // this and any other CMS generations that it is responsible for
2588 // collecting.
2589 collector()->gc_prologue(full);
2590 }
2592 // This is a "private" interface for use by this generation's CMSCollector.
2593 // Not to be called directly by any other entity (for instance,
2594 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2595 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2596 bool registerClosure, ModUnionClosure* modUnionClosure) {
2597 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2598 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2599 "Should be NULL");
2600 if (registerClosure) {
2601 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2602 }
2603 cmsSpace()->gc_prologue();
2604 // Clear stat counters
2605 NOT_PRODUCT(
2606 assert(_numObjectsPromoted == 0, "check");
2607 assert(_numWordsPromoted == 0, "check");
2608 if (Verbose && PrintGC) {
2609 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2610 SIZE_FORMAT" bytes concurrently",
2611 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2612 }
2613 _numObjectsAllocated = 0;
2614 _numWordsAllocated = 0;
2615 )
2616 }
2618 void CMSCollector::gc_epilogue(bool full) {
2619 // The following locking discipline assumes that we are only called
2620 // when the world is stopped.
2621 assert(SafepointSynchronize::is_at_safepoint(),
2622 "world is stopped assumption");
2624 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2625 // if linear allocation blocks need to be appropriately marked to allow the
2626 // the blocks to be parsable. We also check here whether we need to nudge the
2627 // CMS collector thread to start a new cycle (if it's not already active).
2628 assert( Thread::current()->is_VM_thread()
2629 || ( CMSScavengeBeforeRemark
2630 && Thread::current()->is_ConcurrentGC_thread()),
2631 "Incorrect thread type for epilogue execution");
2633 if (!_between_prologue_and_epilogue) {
2634 // We have already been invoked; this is a gc_epilogue delegation
2635 // from yet another CMS generation that we are responsible for, just
2636 // ignore it since all relevant work has already been done.
2637 return;
2638 }
2639 assert(haveFreelistLocks(), "must have freelist locks");
2640 assert_lock_strong(bitMapLock());
2642 _cmsGen->gc_epilogue_work(full);
2643 _permGen->gc_epilogue_work(full);
2645 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2646 // in case sampling was not already enabled, enable it
2647 _start_sampling = true;
2648 }
2649 // reset _eden_chunk_array so sampling starts afresh
2650 _eden_chunk_index = 0;
2652 size_t cms_used = _cmsGen->cmsSpace()->used();
2653 size_t perm_used = _permGen->cmsSpace()->used();
2655 // update performance counters - this uses a special version of
2656 // update_counters() that allows the utilization to be passed as a
2657 // parameter, avoiding multiple calls to used().
2658 //
2659 _cmsGen->update_counters(cms_used);
2660 _permGen->update_counters(perm_used);
2662 if (CMSIncrementalMode) {
2663 icms_update_allocation_limits();
2664 }
2666 bitMapLock()->unlock();
2667 releaseFreelistLocks();
2669 _between_prologue_and_epilogue = false; // ready for next cycle
2670 }
2672 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2673 collector()->gc_epilogue(full);
2675 // Also reset promotion tracking in par gc thread states.
2676 if (ParallelGCThreads > 0) {
2677 for (uint i = 0; i < ParallelGCThreads; i++) {
2678 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2679 }
2680 }
2681 }
2683 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2684 assert(!incremental_collection_failed(), "Should have been cleared");
2685 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2686 cmsSpace()->gc_epilogue();
2687 // Print stat counters
2688 NOT_PRODUCT(
2689 assert(_numObjectsAllocated == 0, "check");
2690 assert(_numWordsAllocated == 0, "check");
2691 if (Verbose && PrintGC) {
2692 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2693 SIZE_FORMAT" bytes",
2694 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2695 }
2696 _numObjectsPromoted = 0;
2697 _numWordsPromoted = 0;
2698 )
2700 if (PrintGC && Verbose) {
2701 // Call down the chain in contiguous_available needs the freelistLock
2702 // so print this out before releasing the freeListLock.
2703 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2704 contiguous_available());
2705 }
2706 }
2708 #ifndef PRODUCT
2709 bool CMSCollector::have_cms_token() {
2710 Thread* thr = Thread::current();
2711 if (thr->is_VM_thread()) {
2712 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2713 } else if (thr->is_ConcurrentGC_thread()) {
2714 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2715 } else if (thr->is_GC_task_thread()) {
2716 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2717 ParGCRareEvent_lock->owned_by_self();
2718 }
2719 return false;
2720 }
2721 #endif
2723 // Check reachability of the given heap address in CMS generation,
2724 // treating all other generations as roots.
2725 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2726 // We could "guarantee" below, rather than assert, but i'll
2727 // leave these as "asserts" so that an adventurous debugger
2728 // could try this in the product build provided some subset of
2729 // the conditions were met, provided they were intersted in the
2730 // results and knew that the computation below wouldn't interfere
2731 // with other concurrent computations mutating the structures
2732 // being read or written.
2733 assert(SafepointSynchronize::is_at_safepoint(),
2734 "Else mutations in object graph will make answer suspect");
2735 assert(have_cms_token(), "Should hold cms token");
2736 assert(haveFreelistLocks(), "must hold free list locks");
2737 assert_lock_strong(bitMapLock());
2739 // Clear the marking bit map array before starting, but, just
2740 // for kicks, first report if the given address is already marked
2741 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2742 _markBitMap.isMarked(addr) ? "" : " not");
2744 if (verify_after_remark()) {
2745 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2746 bool result = verification_mark_bm()->isMarked(addr);
2747 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2748 result ? "IS" : "is NOT");
2749 return result;
2750 } else {
2751 gclog_or_tty->print_cr("Could not compute result");
2752 return false;
2753 }
2754 }
2756 ////////////////////////////////////////////////////////
2757 // CMS Verification Support
2758 ////////////////////////////////////////////////////////
2759 // Following the remark phase, the following invariant
2760 // should hold -- each object in the CMS heap which is
2761 // marked in markBitMap() should be marked in the verification_mark_bm().
2763 class VerifyMarkedClosure: public BitMapClosure {
2764 CMSBitMap* _marks;
2765 bool _failed;
2767 public:
2768 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2770 bool do_bit(size_t offset) {
2771 HeapWord* addr = _marks->offsetToHeapWord(offset);
2772 if (!_marks->isMarked(addr)) {
2773 oop(addr)->print();
2774 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2775 _failed = true;
2776 }
2777 return true;
2778 }
2780 bool failed() { return _failed; }
2781 };
2783 bool CMSCollector::verify_after_remark() {
2784 gclog_or_tty->print(" [Verifying CMS Marking... ");
2785 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2786 static bool init = false;
2788 assert(SafepointSynchronize::is_at_safepoint(),
2789 "Else mutations in object graph will make answer suspect");
2790 assert(have_cms_token(),
2791 "Else there may be mutual interference in use of "
2792 " verification data structures");
2793 assert(_collectorState > Marking && _collectorState <= Sweeping,
2794 "Else marking info checked here may be obsolete");
2795 assert(haveFreelistLocks(), "must hold free list locks");
2796 assert_lock_strong(bitMapLock());
2799 // Allocate marking bit map if not already allocated
2800 if (!init) { // first time
2801 if (!verification_mark_bm()->allocate(_span)) {
2802 return false;
2803 }
2804 init = true;
2805 }
2807 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2809 // Turn off refs discovery -- so we will be tracing through refs.
2810 // This is as intended, because by this time
2811 // GC must already have cleared any refs that need to be cleared,
2812 // and traced those that need to be marked; moreover,
2813 // the marking done here is not going to intefere in any
2814 // way with the marking information used by GC.
2815 NoRefDiscovery no_discovery(ref_processor());
2817 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2819 // Clear any marks from a previous round
2820 verification_mark_bm()->clear_all();
2821 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2822 assert(overflow_list_is_empty(), "overflow list should be empty");
2824 GenCollectedHeap* gch = GenCollectedHeap::heap();
2825 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2826 // Update the saved marks which may affect the root scans.
2827 gch->save_marks();
2829 if (CMSRemarkVerifyVariant == 1) {
2830 // In this first variant of verification, we complete
2831 // all marking, then check if the new marks-verctor is
2832 // a subset of the CMS marks-vector.
2833 verify_after_remark_work_1();
2834 } else if (CMSRemarkVerifyVariant == 2) {
2835 // In this second variant of verification, we flag an error
2836 // (i.e. an object reachable in the new marks-vector not reachable
2837 // in the CMS marks-vector) immediately, also indicating the
2838 // identify of an object (A) that references the unmarked object (B) --
2839 // presumably, a mutation to A failed to be picked up by preclean/remark?
2840 verify_after_remark_work_2();
2841 } else {
2842 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2843 CMSRemarkVerifyVariant);
2844 }
2845 gclog_or_tty->print(" done] ");
2846 return true;
2847 }
2849 void CMSCollector::verify_after_remark_work_1() {
2850 ResourceMark rm;
2851 HandleMark hm;
2852 GenCollectedHeap* gch = GenCollectedHeap::heap();
2854 // Mark from roots one level into CMS
2855 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2856 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2858 gch->gen_process_strong_roots(_cmsGen->level(),
2859 true, // younger gens are roots
2860 true, // activate StrongRootsScope
2861 true, // collecting perm gen
2862 SharedHeap::ScanningOption(roots_scanning_options()),
2863 ¬Older,
2864 true, // walk code active on stacks
2865 NULL);
2867 // Now mark from the roots
2868 assert(_revisitStack.isEmpty(), "Should be empty");
2869 MarkFromRootsClosure markFromRootsClosure(this, _span,
2870 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2871 false /* don't yield */, true /* verifying */);
2872 assert(_restart_addr == NULL, "Expected pre-condition");
2873 verification_mark_bm()->iterate(&markFromRootsClosure);
2874 while (_restart_addr != NULL) {
2875 // Deal with stack overflow: by restarting at the indicated
2876 // address.
2877 HeapWord* ra = _restart_addr;
2878 markFromRootsClosure.reset(ra);
2879 _restart_addr = NULL;
2880 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2881 }
2882 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2883 verify_work_stacks_empty();
2884 // Should reset the revisit stack above, since no class tree
2885 // surgery is forthcoming.
2886 _revisitStack.reset(); // throwing away all contents
2888 // Marking completed -- now verify that each bit marked in
2889 // verification_mark_bm() is also marked in markBitMap(); flag all
2890 // errors by printing corresponding objects.
2891 VerifyMarkedClosure vcl(markBitMap());
2892 verification_mark_bm()->iterate(&vcl);
2893 if (vcl.failed()) {
2894 gclog_or_tty->print("Verification failed");
2895 Universe::heap()->print();
2896 fatal(" ... aborting");
2897 }
2898 }
2900 void CMSCollector::verify_after_remark_work_2() {
2901 ResourceMark rm;
2902 HandleMark hm;
2903 GenCollectedHeap* gch = GenCollectedHeap::heap();
2905 // Mark from roots one level into CMS
2906 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2907 markBitMap());
2908 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2909 gch->gen_process_strong_roots(_cmsGen->level(),
2910 true, // younger gens are roots
2911 true, // activate StrongRootsScope
2912 true, // collecting perm gen
2913 SharedHeap::ScanningOption(roots_scanning_options()),
2914 ¬Older,
2915 true, // walk code active on stacks
2916 NULL);
2918 // Now mark from the roots
2919 assert(_revisitStack.isEmpty(), "Should be empty");
2920 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2921 verification_mark_bm(), markBitMap(), verification_mark_stack());
2922 assert(_restart_addr == NULL, "Expected pre-condition");
2923 verification_mark_bm()->iterate(&markFromRootsClosure);
2924 while (_restart_addr != NULL) {
2925 // Deal with stack overflow: by restarting at the indicated
2926 // address.
2927 HeapWord* ra = _restart_addr;
2928 markFromRootsClosure.reset(ra);
2929 _restart_addr = NULL;
2930 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2931 }
2932 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2933 verify_work_stacks_empty();
2934 // Should reset the revisit stack above, since no class tree
2935 // surgery is forthcoming.
2936 _revisitStack.reset(); // throwing away all contents
2938 // Marking completed -- now verify that each bit marked in
2939 // verification_mark_bm() is also marked in markBitMap(); flag all
2940 // errors by printing corresponding objects.
2941 VerifyMarkedClosure vcl(markBitMap());
2942 verification_mark_bm()->iterate(&vcl);
2943 assert(!vcl.failed(), "Else verification above should not have succeeded");
2944 }
2946 void ConcurrentMarkSweepGeneration::save_marks() {
2947 // delegate to CMS space
2948 cmsSpace()->save_marks();
2949 for (uint i = 0; i < ParallelGCThreads; i++) {
2950 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2951 }
2952 }
2954 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2955 return cmsSpace()->no_allocs_since_save_marks();
2956 }
2958 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2959 \
2960 void ConcurrentMarkSweepGeneration:: \
2961 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2962 cl->set_generation(this); \
2963 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2964 cl->reset_generation(); \
2965 save_marks(); \
2966 }
2968 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2970 void
2971 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2972 {
2973 // Not currently implemented; need to do the following. -- ysr.
2974 // dld -- I think that is used for some sort of allocation profiler. So it
2975 // really means the objects allocated by the mutator since the last
2976 // GC. We could potentially implement this cheaply by recording only
2977 // the direct allocations in a side data structure.
2978 //
2979 // I think we probably ought not to be required to support these
2980 // iterations at any arbitrary point; I think there ought to be some
2981 // call to enable/disable allocation profiling in a generation/space,
2982 // and the iterator ought to return the objects allocated in the
2983 // gen/space since the enable call, or the last iterator call (which
2984 // will probably be at a GC.) That way, for gens like CM&S that would
2985 // require some extra data structure to support this, we only pay the
2986 // cost when it's in use...
2987 cmsSpace()->object_iterate_since_last_GC(blk);
2988 }
2990 void
2991 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2992 cl->set_generation(this);
2993 younger_refs_in_space_iterate(_cmsSpace, cl);
2994 cl->reset_generation();
2995 }
2997 void
2998 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2999 if (freelistLock()->owned_by_self()) {
3000 Generation::oop_iterate(mr, cl);
3001 } else {
3002 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3003 Generation::oop_iterate(mr, cl);
3004 }
3005 }
3007 void
3008 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3009 if (freelistLock()->owned_by_self()) {
3010 Generation::oop_iterate(cl);
3011 } else {
3012 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3013 Generation::oop_iterate(cl);
3014 }
3015 }
3017 void
3018 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3019 if (freelistLock()->owned_by_self()) {
3020 Generation::object_iterate(cl);
3021 } else {
3022 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3023 Generation::object_iterate(cl);
3024 }
3025 }
3027 void
3028 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3029 if (freelistLock()->owned_by_self()) {
3030 Generation::safe_object_iterate(cl);
3031 } else {
3032 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3033 Generation::safe_object_iterate(cl);
3034 }
3035 }
3037 void
3038 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3039 }
3041 void
3042 ConcurrentMarkSweepGeneration::post_compact() {
3043 }
3045 void
3046 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3047 // Fix the linear allocation blocks to look like free blocks.
3049 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3050 // are not called when the heap is verified during universe initialization and
3051 // at vm shutdown.
3052 if (freelistLock()->owned_by_self()) {
3053 cmsSpace()->prepare_for_verify();
3054 } else {
3055 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3056 cmsSpace()->prepare_for_verify();
3057 }
3058 }
3060 void
3061 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3062 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3063 // are not called when the heap is verified during universe initialization and
3064 // at vm shutdown.
3065 if (freelistLock()->owned_by_self()) {
3066 cmsSpace()->verify(false /* ignored */);
3067 } else {
3068 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3069 cmsSpace()->verify(false /* ignored */);
3070 }
3071 }
3073 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3074 _cmsGen->verify(allow_dirty);
3075 _permGen->verify(allow_dirty);
3076 }
3078 #ifndef PRODUCT
3079 bool CMSCollector::overflow_list_is_empty() const {
3080 assert(_num_par_pushes >= 0, "Inconsistency");
3081 if (_overflow_list == NULL) {
3082 assert(_num_par_pushes == 0, "Inconsistency");
3083 }
3084 return _overflow_list == NULL;
3085 }
3087 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3088 // merely consolidate assertion checks that appear to occur together frequently.
3089 void CMSCollector::verify_work_stacks_empty() const {
3090 assert(_markStack.isEmpty(), "Marking stack should be empty");
3091 assert(overflow_list_is_empty(), "Overflow list should be empty");
3092 }
3094 void CMSCollector::verify_overflow_empty() const {
3095 assert(overflow_list_is_empty(), "Overflow list should be empty");
3096 assert(no_preserved_marks(), "No preserved marks");
3097 }
3098 #endif // PRODUCT
3100 // Decide if we want to enable class unloading as part of the
3101 // ensuing concurrent GC cycle. We will collect the perm gen and
3102 // unload classes if it's the case that:
3103 // (1) an explicit gc request has been made and the flag
3104 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3105 // (2) (a) class unloading is enabled at the command line, and
3106 // (b) (i) perm gen threshold has been crossed, or
3107 // (ii) old gen is getting really full, or
3108 // (iii) the previous N CMS collections did not collect the
3109 // perm gen
3110 // NOTE: Provided there is no change in the state of the heap between
3111 // calls to this method, it should have idempotent results. Moreover,
3112 // its results should be monotonically increasing (i.e. going from 0 to 1,
3113 // but not 1 to 0) between successive calls between which the heap was
3114 // not collected. For the implementation below, it must thus rely on
3115 // the property that concurrent_cycles_since_last_unload()
3116 // will not decrease unless a collection cycle happened and that
3117 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3118 // themselves also monotonic in that sense. See check_monotonicity()
3119 // below.
3120 bool CMSCollector::update_should_unload_classes() {
3121 _should_unload_classes = false;
3122 // Condition 1 above
3123 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3124 _should_unload_classes = true;
3125 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3126 // Disjuncts 2.b.(i,ii,iii) above
3127 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3128 CMSClassUnloadingMaxInterval)
3129 || _permGen->should_concurrent_collect()
3130 || _cmsGen->is_too_full();
3131 }
3132 return _should_unload_classes;
3133 }
3135 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3136 bool res = should_concurrent_collect();
3137 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3138 return res;
3139 }
3141 void CMSCollector::setup_cms_unloading_and_verification_state() {
3142 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3143 || VerifyBeforeExit;
3144 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3145 | SharedHeap::SO_CodeCache;
3147 if (should_unload_classes()) { // Should unload classes this cycle
3148 remove_root_scanning_option(rso); // Shrink the root set appropriately
3149 set_verifying(should_verify); // Set verification state for this cycle
3150 return; // Nothing else needs to be done at this time
3151 }
3153 // Not unloading classes this cycle
3154 assert(!should_unload_classes(), "Inconsitency!");
3155 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3156 // We were not verifying, or we _were_ unloading classes in the last cycle,
3157 // AND some verification options are enabled this cycle; in this case,
3158 // we must make sure that the deadness map is allocated if not already so,
3159 // and cleared (if already allocated previously --
3160 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3161 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3162 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3163 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3164 "permanent generation verification disabled");
3165 return; // Note that we leave verification disabled, so we'll retry this
3166 // allocation next cycle. We _could_ remember this failure
3167 // and skip further attempts and permanently disable verification
3168 // attempts if that is considered more desirable.
3169 }
3170 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3171 "_perm_gen_ver_bit_map inconsistency?");
3172 } else {
3173 perm_gen_verify_bit_map()->clear_all();
3174 }
3175 // Include symbols, strings and code cache elements to prevent their resurrection.
3176 add_root_scanning_option(rso);
3177 set_verifying(true);
3178 } else if (verifying() && !should_verify) {
3179 // We were verifying, but some verification flags got disabled.
3180 set_verifying(false);
3181 // Exclude symbols, strings and code cache elements from root scanning to
3182 // reduce IM and RM pauses.
3183 remove_root_scanning_option(rso);
3184 }
3185 }
3188 #ifndef PRODUCT
3189 HeapWord* CMSCollector::block_start(const void* p) const {
3190 const HeapWord* addr = (HeapWord*)p;
3191 if (_span.contains(p)) {
3192 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3193 return _cmsGen->cmsSpace()->block_start(p);
3194 } else {
3195 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3196 "Inconsistent _span?");
3197 return _permGen->cmsSpace()->block_start(p);
3198 }
3199 }
3200 return NULL;
3201 }
3202 #endif
3204 HeapWord*
3205 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3206 bool tlab,
3207 bool parallel) {
3208 assert(!tlab, "Can't deal with TLAB allocation");
3209 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3210 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3211 CMSExpansionCause::_satisfy_allocation);
3212 if (GCExpandToAllocateDelayMillis > 0) {
3213 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3214 }
3215 return have_lock_and_allocate(word_size, tlab);
3216 }
3218 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3219 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3220 // to CardGeneration and share it...
3221 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3222 return CardGeneration::expand(bytes, expand_bytes);
3223 }
3225 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3226 CMSExpansionCause::Cause cause)
3227 {
3229 bool success = expand(bytes, expand_bytes);
3231 // remember why we expanded; this information is used
3232 // by shouldConcurrentCollect() when making decisions on whether to start
3233 // a new CMS cycle.
3234 if (success) {
3235 set_expansion_cause(cause);
3236 if (PrintGCDetails && Verbose) {
3237 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3238 CMSExpansionCause::to_string(cause));
3239 }
3240 }
3241 }
3243 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3244 HeapWord* res = NULL;
3245 MutexLocker x(ParGCRareEvent_lock);
3246 while (true) {
3247 // Expansion by some other thread might make alloc OK now:
3248 res = ps->lab.alloc(word_sz);
3249 if (res != NULL) return res;
3250 // If there's not enough expansion space available, give up.
3251 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3252 return NULL;
3253 }
3254 // Otherwise, we try expansion.
3255 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3256 CMSExpansionCause::_allocate_par_lab);
3257 // Now go around the loop and try alloc again;
3258 // A competing par_promote might beat us to the expansion space,
3259 // so we may go around the loop again if promotion fails agaion.
3260 if (GCExpandToAllocateDelayMillis > 0) {
3261 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3262 }
3263 }
3264 }
3267 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3268 PromotionInfo* promo) {
3269 MutexLocker x(ParGCRareEvent_lock);
3270 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3271 while (true) {
3272 // Expansion by some other thread might make alloc OK now:
3273 if (promo->ensure_spooling_space()) {
3274 assert(promo->has_spooling_space(),
3275 "Post-condition of successful ensure_spooling_space()");
3276 return true;
3277 }
3278 // If there's not enough expansion space available, give up.
3279 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3280 return false;
3281 }
3282 // Otherwise, we try expansion.
3283 expand(refill_size_bytes, MinHeapDeltaBytes,
3284 CMSExpansionCause::_allocate_par_spooling_space);
3285 // Now go around the loop and try alloc again;
3286 // A competing allocation might beat us to the expansion space,
3287 // so we may go around the loop again if allocation fails again.
3288 if (GCExpandToAllocateDelayMillis > 0) {
3289 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3290 }
3291 }
3292 }
3296 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3297 assert_locked_or_safepoint(Heap_lock);
3298 size_t size = ReservedSpace::page_align_size_down(bytes);
3299 if (size > 0) {
3300 shrink_by(size);
3301 }
3302 }
3304 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3305 assert_locked_or_safepoint(Heap_lock);
3306 bool result = _virtual_space.expand_by(bytes);
3307 if (result) {
3308 HeapWord* old_end = _cmsSpace->end();
3309 size_t new_word_size =
3310 heap_word_size(_virtual_space.committed_size());
3311 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3312 _bts->resize(new_word_size); // resize the block offset shared array
3313 Universe::heap()->barrier_set()->resize_covered_region(mr);
3314 // Hmmmm... why doesn't CFLS::set_end verify locking?
3315 // This is quite ugly; FIX ME XXX
3316 _cmsSpace->assert_locked();
3317 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3319 // update the space and generation capacity counters
3320 if (UsePerfData) {
3321 _space_counters->update_capacity();
3322 _gen_counters->update_all();
3323 }
3325 if (Verbose && PrintGC) {
3326 size_t new_mem_size = _virtual_space.committed_size();
3327 size_t old_mem_size = new_mem_size - bytes;
3328 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3329 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3330 }
3331 }
3332 return result;
3333 }
3335 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3336 assert_locked_or_safepoint(Heap_lock);
3337 bool success = true;
3338 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3339 if (remaining_bytes > 0) {
3340 success = grow_by(remaining_bytes);
3341 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3342 }
3343 return success;
3344 }
3346 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3347 assert_locked_or_safepoint(Heap_lock);
3348 assert_lock_strong(freelistLock());
3349 // XXX Fix when compaction is implemented.
3350 warning("Shrinking of CMS not yet implemented");
3351 return;
3352 }
3355 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3356 // phases.
3357 class CMSPhaseAccounting: public StackObj {
3358 public:
3359 CMSPhaseAccounting(CMSCollector *collector,
3360 const char *phase,
3361 bool print_cr = true);
3362 ~CMSPhaseAccounting();
3364 private:
3365 CMSCollector *_collector;
3366 const char *_phase;
3367 elapsedTimer _wallclock;
3368 bool _print_cr;
3370 public:
3371 // Not MT-safe; so do not pass around these StackObj's
3372 // where they may be accessed by other threads.
3373 jlong wallclock_millis() {
3374 assert(_wallclock.is_active(), "Wall clock should not stop");
3375 _wallclock.stop(); // to record time
3376 jlong ret = _wallclock.milliseconds();
3377 _wallclock.start(); // restart
3378 return ret;
3379 }
3380 };
3382 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3383 const char *phase,
3384 bool print_cr) :
3385 _collector(collector), _phase(phase), _print_cr(print_cr) {
3387 if (PrintCMSStatistics != 0) {
3388 _collector->resetYields();
3389 }
3390 if (PrintGCDetails && PrintGCTimeStamps) {
3391 gclog_or_tty->date_stamp(PrintGCDateStamps);
3392 gclog_or_tty->stamp();
3393 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3394 _collector->cmsGen()->short_name(), _phase);
3395 }
3396 _collector->resetTimer();
3397 _wallclock.start();
3398 _collector->startTimer();
3399 }
3401 CMSPhaseAccounting::~CMSPhaseAccounting() {
3402 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3403 _collector->stopTimer();
3404 _wallclock.stop();
3405 if (PrintGCDetails) {
3406 gclog_or_tty->date_stamp(PrintGCDateStamps);
3407 if (PrintGCTimeStamps) {
3408 gclog_or_tty->stamp();
3409 gclog_or_tty->print(": ");
3410 }
3411 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3412 _collector->cmsGen()->short_name(),
3413 _phase, _collector->timerValue(), _wallclock.seconds());
3414 if (_print_cr) {
3415 gclog_or_tty->print_cr("");
3416 }
3417 if (PrintCMSStatistics != 0) {
3418 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3419 _collector->yields());
3420 }
3421 }
3422 }
3424 // CMS work
3426 // Checkpoint the roots into this generation from outside
3427 // this generation. [Note this initial checkpoint need only
3428 // be approximate -- we'll do a catch up phase subsequently.]
3429 void CMSCollector::checkpointRootsInitial(bool asynch) {
3430 assert(_collectorState == InitialMarking, "Wrong collector state");
3431 check_correct_thread_executing();
3432 ReferenceProcessor* rp = ref_processor();
3433 SpecializationStats::clear();
3434 assert(_restart_addr == NULL, "Control point invariant");
3435 if (asynch) {
3436 // acquire locks for subsequent manipulations
3437 MutexLockerEx x(bitMapLock(),
3438 Mutex::_no_safepoint_check_flag);
3439 checkpointRootsInitialWork(asynch);
3440 rp->verify_no_references_recorded();
3441 rp->enable_discovery(); // enable ("weak") refs discovery
3442 _collectorState = Marking;
3443 } else {
3444 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3445 // which recognizes if we are a CMS generation, and doesn't try to turn on
3446 // discovery; verify that they aren't meddling.
3447 assert(!rp->discovery_is_atomic(),
3448 "incorrect setting of discovery predicate");
3449 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3450 "ref discovery for this generation kind");
3451 // already have locks
3452 checkpointRootsInitialWork(asynch);
3453 rp->enable_discovery(); // now enable ("weak") refs discovery
3454 _collectorState = Marking;
3455 }
3456 SpecializationStats::print();
3457 }
3459 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3460 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3461 assert(_collectorState == InitialMarking, "just checking");
3463 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3464 // precede our marking with a collection of all
3465 // younger generations to keep floating garbage to a minimum.
3466 // XXX: we won't do this for now -- it's an optimization to be done later.
3468 // already have locks
3469 assert_lock_strong(bitMapLock());
3470 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3472 // Setup the verification and class unloading state for this
3473 // CMS collection cycle.
3474 setup_cms_unloading_and_verification_state();
3476 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3477 PrintGCDetails && Verbose, true, gclog_or_tty);)
3478 if (UseAdaptiveSizePolicy) {
3479 size_policy()->checkpoint_roots_initial_begin();
3480 }
3482 // Reset all the PLAB chunk arrays if necessary.
3483 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3484 reset_survivor_plab_arrays();
3485 }
3487 ResourceMark rm;
3488 HandleMark hm;
3490 FalseClosure falseClosure;
3491 // In the case of a synchronous collection, we will elide the
3492 // remark step, so it's important to catch all the nmethod oops
3493 // in this step.
3494 // The final 'true' flag to gen_process_strong_roots will ensure this.
3495 // If 'async' is true, we can relax the nmethod tracing.
3496 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3497 GenCollectedHeap* gch = GenCollectedHeap::heap();
3499 verify_work_stacks_empty();
3500 verify_overflow_empty();
3502 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3503 // Update the saved marks which may affect the root scans.
3504 gch->save_marks();
3506 // weak reference processing has not started yet.
3507 ref_processor()->set_enqueuing_is_done(false);
3509 {
3510 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3511 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3512 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3513 gch->gen_process_strong_roots(_cmsGen->level(),
3514 true, // younger gens are roots
3515 true, // activate StrongRootsScope
3516 true, // collecting perm gen
3517 SharedHeap::ScanningOption(roots_scanning_options()),
3518 ¬Older,
3519 true, // walk all of code cache if (so & SO_CodeCache)
3520 NULL);
3521 }
3523 // Clear mod-union table; it will be dirtied in the prologue of
3524 // CMS generation per each younger generation collection.
3526 assert(_modUnionTable.isAllClear(),
3527 "Was cleared in most recent final checkpoint phase"
3528 " or no bits are set in the gc_prologue before the start of the next "
3529 "subsequent marking phase.");
3531 // Temporarily disabled, since pre/post-consumption closures don't
3532 // care about precleaned cards
3533 #if 0
3534 {
3535 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3536 (HeapWord*)_virtual_space.high());
3537 _ct->ct_bs()->preclean_dirty_cards(mr);
3538 }
3539 #endif
3541 // Save the end of the used_region of the constituent generations
3542 // to be used to limit the extent of sweep in each generation.
3543 save_sweep_limits();
3544 if (UseAdaptiveSizePolicy) {
3545 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3546 }
3547 verify_overflow_empty();
3548 }
3550 bool CMSCollector::markFromRoots(bool asynch) {
3551 // we might be tempted to assert that:
3552 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3553 // "inconsistent argument?");
3554 // However that wouldn't be right, because it's possible that
3555 // a safepoint is indeed in progress as a younger generation
3556 // stop-the-world GC happens even as we mark in this generation.
3557 assert(_collectorState == Marking, "inconsistent state?");
3558 check_correct_thread_executing();
3559 verify_overflow_empty();
3561 bool res;
3562 if (asynch) {
3564 // Start the timers for adaptive size policy for the concurrent phases
3565 // Do it here so that the foreground MS can use the concurrent
3566 // timer since a foreground MS might has the sweep done concurrently
3567 // or STW.
3568 if (UseAdaptiveSizePolicy) {
3569 size_policy()->concurrent_marking_begin();
3570 }
3572 // Weak ref discovery note: We may be discovering weak
3573 // refs in this generation concurrent (but interleaved) with
3574 // weak ref discovery by a younger generation collector.
3576 CMSTokenSyncWithLocks ts(true, bitMapLock());
3577 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3578 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3579 res = markFromRootsWork(asynch);
3580 if (res) {
3581 _collectorState = Precleaning;
3582 } else { // We failed and a foreground collection wants to take over
3583 assert(_foregroundGCIsActive, "internal state inconsistency");
3584 assert(_restart_addr == NULL, "foreground will restart from scratch");
3585 if (PrintGCDetails) {
3586 gclog_or_tty->print_cr("bailing out to foreground collection");
3587 }
3588 }
3589 if (UseAdaptiveSizePolicy) {
3590 size_policy()->concurrent_marking_end();
3591 }
3592 } else {
3593 assert(SafepointSynchronize::is_at_safepoint(),
3594 "inconsistent with asynch == false");
3595 if (UseAdaptiveSizePolicy) {
3596 size_policy()->ms_collection_marking_begin();
3597 }
3598 // already have locks
3599 res = markFromRootsWork(asynch);
3600 _collectorState = FinalMarking;
3601 if (UseAdaptiveSizePolicy) {
3602 GenCollectedHeap* gch = GenCollectedHeap::heap();
3603 size_policy()->ms_collection_marking_end(gch->gc_cause());
3604 }
3605 }
3606 verify_overflow_empty();
3607 return res;
3608 }
3610 bool CMSCollector::markFromRootsWork(bool asynch) {
3611 // iterate over marked bits in bit map, doing a full scan and mark
3612 // from these roots using the following algorithm:
3613 // . if oop is to the right of the current scan pointer,
3614 // mark corresponding bit (we'll process it later)
3615 // . else (oop is to left of current scan pointer)
3616 // push oop on marking stack
3617 // . drain the marking stack
3619 // Note that when we do a marking step we need to hold the
3620 // bit map lock -- recall that direct allocation (by mutators)
3621 // and promotion (by younger generation collectors) is also
3622 // marking the bit map. [the so-called allocate live policy.]
3623 // Because the implementation of bit map marking is not
3624 // robust wrt simultaneous marking of bits in the same word,
3625 // we need to make sure that there is no such interference
3626 // between concurrent such updates.
3628 // already have locks
3629 assert_lock_strong(bitMapLock());
3631 // Clear the revisit stack, just in case there are any
3632 // obsolete contents from a short-circuited previous CMS cycle.
3633 _revisitStack.reset();
3634 verify_work_stacks_empty();
3635 verify_overflow_empty();
3636 assert(_revisitStack.isEmpty(), "tabula rasa");
3638 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
3640 bool result = false;
3641 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3642 result = do_marking_mt(asynch);
3643 } else {
3644 result = do_marking_st(asynch);
3645 }
3646 return result;
3647 }
3649 // Forward decl
3650 class CMSConcMarkingTask;
3652 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3653 CMSCollector* _collector;
3654 CMSConcMarkingTask* _task;
3655 bool _yield;
3656 protected:
3657 virtual void yield();
3658 public:
3659 // "n_threads" is the number of threads to be terminated.
3660 // "queue_set" is a set of work queues of other threads.
3661 // "collector" is the CMS collector associated with this task terminator.
3662 // "yield" indicates whether we need the gang as a whole to yield.
3663 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3664 CMSCollector* collector, bool yield) :
3665 ParallelTaskTerminator(n_threads, queue_set),
3666 _collector(collector),
3667 _yield(yield) { }
3669 void set_task(CMSConcMarkingTask* task) {
3670 _task = task;
3671 }
3672 };
3674 // MT Concurrent Marking Task
3675 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3676 CMSCollector* _collector;
3677 YieldingFlexibleWorkGang* _workers; // the whole gang
3678 int _n_workers; // requested/desired # workers
3679 bool _asynch;
3680 bool _result;
3681 CompactibleFreeListSpace* _cms_space;
3682 CompactibleFreeListSpace* _perm_space;
3683 HeapWord* _global_finger;
3684 HeapWord* _restart_addr;
3686 // Exposed here for yielding support
3687 Mutex* const _bit_map_lock;
3689 // The per thread work queues, available here for stealing
3690 OopTaskQueueSet* _task_queues;
3691 CMSConcMarkingTerminator _term;
3693 public:
3694 CMSConcMarkingTask(CMSCollector* collector,
3695 CompactibleFreeListSpace* cms_space,
3696 CompactibleFreeListSpace* perm_space,
3697 bool asynch, int n_workers,
3698 YieldingFlexibleWorkGang* workers,
3699 OopTaskQueueSet* task_queues):
3700 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3701 _collector(collector),
3702 _cms_space(cms_space),
3703 _perm_space(perm_space),
3704 _asynch(asynch), _n_workers(n_workers), _result(true),
3705 _workers(workers), _task_queues(task_queues),
3706 _term(n_workers, task_queues, _collector, asynch),
3707 _bit_map_lock(collector->bitMapLock())
3708 {
3709 assert(n_workers <= workers->total_workers(),
3710 "Else termination won't work correctly today"); // XXX FIX ME!
3711 _requested_size = n_workers;
3712 _term.set_task(this);
3713 assert(_cms_space->bottom() < _perm_space->bottom(),
3714 "Finger incorrectly initialized below");
3715 _restart_addr = _global_finger = _cms_space->bottom();
3716 }
3719 OopTaskQueueSet* task_queues() { return _task_queues; }
3721 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3723 HeapWord** global_finger_addr() { return &_global_finger; }
3725 CMSConcMarkingTerminator* terminator() { return &_term; }
3727 void work(int i);
3729 virtual void coordinator_yield(); // stuff done by coordinator
3730 bool result() { return _result; }
3732 void reset(HeapWord* ra) {
3733 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3734 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3735 assert(ra < _perm_space->end(), "ra too large");
3736 _restart_addr = _global_finger = ra;
3737 _term.reset_for_reuse();
3738 }
3740 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3741 OopTaskQueue* work_q);
3743 private:
3744 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3745 void do_work_steal(int i);
3746 void bump_global_finger(HeapWord* f);
3747 };
3749 void CMSConcMarkingTerminator::yield() {
3750 if (ConcurrentMarkSweepThread::should_yield() &&
3751 !_collector->foregroundGCIsActive() &&
3752 _yield) {
3753 _task->yield();
3754 } else {
3755 ParallelTaskTerminator::yield();
3756 }
3757 }
3759 ////////////////////////////////////////////////////////////////
3760 // Concurrent Marking Algorithm Sketch
3761 ////////////////////////////////////////////////////////////////
3762 // Until all tasks exhausted (both spaces):
3763 // -- claim next available chunk
3764 // -- bump global finger via CAS
3765 // -- find first object that starts in this chunk
3766 // and start scanning bitmap from that position
3767 // -- scan marked objects for oops
3768 // -- CAS-mark target, and if successful:
3769 // . if target oop is above global finger (volatile read)
3770 // nothing to do
3771 // . if target oop is in chunk and above local finger
3772 // then nothing to do
3773 // . else push on work-queue
3774 // -- Deal with possible overflow issues:
3775 // . local work-queue overflow causes stuff to be pushed on
3776 // global (common) overflow queue
3777 // . always first empty local work queue
3778 // . then get a batch of oops from global work queue if any
3779 // . then do work stealing
3780 // -- When all tasks claimed (both spaces)
3781 // and local work queue empty,
3782 // then in a loop do:
3783 // . check global overflow stack; steal a batch of oops and trace
3784 // . try to steal from other threads oif GOS is empty
3785 // . if neither is available, offer termination
3786 // -- Terminate and return result
3787 //
3788 void CMSConcMarkingTask::work(int i) {
3789 elapsedTimer _timer;
3790 ResourceMark rm;
3791 HandleMark hm;
3793 DEBUG_ONLY(_collector->verify_overflow_empty();)
3795 // Before we begin work, our work queue should be empty
3796 assert(work_queue(i)->size() == 0, "Expected to be empty");
3797 // Scan the bitmap covering _cms_space, tracing through grey objects.
3798 _timer.start();
3799 do_scan_and_mark(i, _cms_space);
3800 _timer.stop();
3801 if (PrintCMSStatistics != 0) {
3802 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3803 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3804 }
3806 // ... do the same for the _perm_space
3807 _timer.reset();
3808 _timer.start();
3809 do_scan_and_mark(i, _perm_space);
3810 _timer.stop();
3811 if (PrintCMSStatistics != 0) {
3812 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3813 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3814 }
3816 // ... do work stealing
3817 _timer.reset();
3818 _timer.start();
3819 do_work_steal(i);
3820 _timer.stop();
3821 if (PrintCMSStatistics != 0) {
3822 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3823 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3824 }
3825 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3826 assert(work_queue(i)->size() == 0, "Should have been emptied");
3827 // Note that under the current task protocol, the
3828 // following assertion is true even of the spaces
3829 // expanded since the completion of the concurrent
3830 // marking. XXX This will likely change under a strict
3831 // ABORT semantics.
3832 assert(_global_finger > _cms_space->end() &&
3833 _global_finger >= _perm_space->end(),
3834 "All tasks have been completed");
3835 DEBUG_ONLY(_collector->verify_overflow_empty();)
3836 }
3838 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3839 HeapWord* read = _global_finger;
3840 HeapWord* cur = read;
3841 while (f > read) {
3842 cur = read;
3843 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3844 if (cur == read) {
3845 // our cas succeeded
3846 assert(_global_finger >= f, "protocol consistency");
3847 break;
3848 }
3849 }
3850 }
3852 // This is really inefficient, and should be redone by
3853 // using (not yet available) block-read and -write interfaces to the
3854 // stack and the work_queue. XXX FIX ME !!!
3855 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3856 OopTaskQueue* work_q) {
3857 // Fast lock-free check
3858 if (ovflw_stk->length() == 0) {
3859 return false;
3860 }
3861 assert(work_q->size() == 0, "Shouldn't steal");
3862 MutexLockerEx ml(ovflw_stk->par_lock(),
3863 Mutex::_no_safepoint_check_flag);
3864 // Grab up to 1/4 the size of the work queue
3865 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3866 (size_t)ParGCDesiredObjsFromOverflowList);
3867 num = MIN2(num, ovflw_stk->length());
3868 for (int i = (int) num; i > 0; i--) {
3869 oop cur = ovflw_stk->pop();
3870 assert(cur != NULL, "Counted wrong?");
3871 work_q->push(cur);
3872 }
3873 return num > 0;
3874 }
3876 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3877 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3878 int n_tasks = pst->n_tasks();
3879 // We allow that there may be no tasks to do here because
3880 // we are restarting after a stack overflow.
3881 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3882 int nth_task = 0;
3884 HeapWord* aligned_start = sp->bottom();
3885 if (sp->used_region().contains(_restart_addr)) {
3886 // Align down to a card boundary for the start of 0th task
3887 // for this space.
3888 aligned_start =
3889 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3890 CardTableModRefBS::card_size);
3891 }
3893 size_t chunk_size = sp->marking_task_size();
3894 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3895 // Having claimed the nth task in this space,
3896 // compute the chunk that it corresponds to:
3897 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3898 aligned_start + (nth_task+1)*chunk_size);
3899 // Try and bump the global finger via a CAS;
3900 // note that we need to do the global finger bump
3901 // _before_ taking the intersection below, because
3902 // the task corresponding to that region will be
3903 // deemed done even if the used_region() expands
3904 // because of allocation -- as it almost certainly will
3905 // during start-up while the threads yield in the
3906 // closure below.
3907 HeapWord* finger = span.end();
3908 bump_global_finger(finger); // atomically
3909 // There are null tasks here corresponding to chunks
3910 // beyond the "top" address of the space.
3911 span = span.intersection(sp->used_region());
3912 if (!span.is_empty()) { // Non-null task
3913 HeapWord* prev_obj;
3914 assert(!span.contains(_restart_addr) || nth_task == 0,
3915 "Inconsistency");
3916 if (nth_task == 0) {
3917 // For the 0th task, we'll not need to compute a block_start.
3918 if (span.contains(_restart_addr)) {
3919 // In the case of a restart because of stack overflow,
3920 // we might additionally skip a chunk prefix.
3921 prev_obj = _restart_addr;
3922 } else {
3923 prev_obj = span.start();
3924 }
3925 } else {
3926 // We want to skip the first object because
3927 // the protocol is to scan any object in its entirety
3928 // that _starts_ in this span; a fortiori, any
3929 // object starting in an earlier span is scanned
3930 // as part of an earlier claimed task.
3931 // Below we use the "careful" version of block_start
3932 // so we do not try to navigate uninitialized objects.
3933 prev_obj = sp->block_start_careful(span.start());
3934 // Below we use a variant of block_size that uses the
3935 // Printezis bits to avoid waiting for allocated
3936 // objects to become initialized/parsable.
3937 while (prev_obj < span.start()) {
3938 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3939 if (sz > 0) {
3940 prev_obj += sz;
3941 } else {
3942 // In this case we may end up doing a bit of redundant
3943 // scanning, but that appears unavoidable, short of
3944 // locking the free list locks; see bug 6324141.
3945 break;
3946 }
3947 }
3948 }
3949 if (prev_obj < span.end()) {
3950 MemRegion my_span = MemRegion(prev_obj, span.end());
3951 // Do the marking work within a non-empty span --
3952 // the last argument to the constructor indicates whether the
3953 // iteration should be incremental with periodic yields.
3954 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3955 &_collector->_markBitMap,
3956 work_queue(i),
3957 &_collector->_markStack,
3958 &_collector->_revisitStack,
3959 _asynch);
3960 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3961 } // else nothing to do for this task
3962 } // else nothing to do for this task
3963 }
3964 // We'd be tempted to assert here that since there are no
3965 // more tasks left to claim in this space, the global_finger
3966 // must exceed space->top() and a fortiori space->end(). However,
3967 // that would not quite be correct because the bumping of
3968 // global_finger occurs strictly after the claiming of a task,
3969 // so by the time we reach here the global finger may not yet
3970 // have been bumped up by the thread that claimed the last
3971 // task.
3972 pst->all_tasks_completed();
3973 }
3975 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
3976 private:
3977 MemRegion _span;
3978 CMSBitMap* _bit_map;
3979 CMSMarkStack* _overflow_stack;
3980 OopTaskQueue* _work_queue;
3981 protected:
3982 DO_OOP_WORK_DEFN
3983 public:
3984 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3985 CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
3986 CMSMarkStack* revisit_stack):
3987 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
3988 _span(_collector->_span),
3989 _work_queue(work_queue),
3990 _bit_map(bit_map),
3991 _overflow_stack(overflow_stack)
3992 { }
3993 virtual void do_oop(oop* p);
3994 virtual void do_oop(narrowOop* p);
3995 void trim_queue(size_t max);
3996 void handle_stack_overflow(HeapWord* lost);
3997 };
3999 // Grey object scanning during work stealing phase --
4000 // the salient assumption here is that any references
4001 // that are in these stolen objects being scanned must
4002 // already have been initialized (else they would not have
4003 // been published), so we do not need to check for
4004 // uninitialized objects before pushing here.
4005 void Par_ConcMarkingClosure::do_oop(oop obj) {
4006 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4007 HeapWord* addr = (HeapWord*)obj;
4008 // Check if oop points into the CMS generation
4009 // and is not marked
4010 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4011 // a white object ...
4012 // If we manage to "claim" the object, by being the
4013 // first thread to mark it, then we push it on our
4014 // marking stack
4015 if (_bit_map->par_mark(addr)) { // ... now grey
4016 // push on work queue (grey set)
4017 bool simulate_overflow = false;
4018 NOT_PRODUCT(
4019 if (CMSMarkStackOverflowALot &&
4020 _collector->simulate_overflow()) {
4021 // simulate a stack overflow
4022 simulate_overflow = true;
4023 }
4024 )
4025 if (simulate_overflow ||
4026 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4027 // stack overflow
4028 if (PrintCMSStatistics != 0) {
4029 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4030 SIZE_FORMAT, _overflow_stack->capacity());
4031 }
4032 // We cannot assert that the overflow stack is full because
4033 // it may have been emptied since.
4034 assert(simulate_overflow ||
4035 _work_queue->size() == _work_queue->max_elems(),
4036 "Else push should have succeeded");
4037 handle_stack_overflow(addr);
4038 }
4039 } // Else, some other thread got there first
4040 }
4041 }
4043 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4044 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4046 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4047 while (_work_queue->size() > max) {
4048 oop new_oop;
4049 if (_work_queue->pop_local(new_oop)) {
4050 assert(new_oop->is_oop(), "Should be an oop");
4051 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4052 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4053 assert(new_oop->is_parsable(), "Should be parsable");
4054 new_oop->oop_iterate(this); // do_oop() above
4055 }
4056 }
4057 }
4059 // Upon stack overflow, we discard (part of) the stack,
4060 // remembering the least address amongst those discarded
4061 // in CMSCollector's _restart_address.
4062 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4063 // We need to do this under a mutex to prevent other
4064 // workers from interfering with the work done below.
4065 MutexLockerEx ml(_overflow_stack->par_lock(),
4066 Mutex::_no_safepoint_check_flag);
4067 // Remember the least grey address discarded
4068 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4069 _collector->lower_restart_addr(ra);
4070 _overflow_stack->reset(); // discard stack contents
4071 _overflow_stack->expand(); // expand the stack if possible
4072 }
4075 void CMSConcMarkingTask::do_work_steal(int i) {
4076 OopTaskQueue* work_q = work_queue(i);
4077 oop obj_to_scan;
4078 CMSBitMap* bm = &(_collector->_markBitMap);
4079 CMSMarkStack* ovflw = &(_collector->_markStack);
4080 CMSMarkStack* revisit = &(_collector->_revisitStack);
4081 int* seed = _collector->hash_seed(i);
4082 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
4083 while (true) {
4084 cl.trim_queue(0);
4085 assert(work_q->size() == 0, "Should have been emptied above");
4086 if (get_work_from_overflow_stack(ovflw, work_q)) {
4087 // Can't assert below because the work obtained from the
4088 // overflow stack may already have been stolen from us.
4089 // assert(work_q->size() > 0, "Work from overflow stack");
4090 continue;
4091 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4092 assert(obj_to_scan->is_oop(), "Should be an oop");
4093 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4094 obj_to_scan->oop_iterate(&cl);
4095 } else if (terminator()->offer_termination()) {
4096 assert(work_q->size() == 0, "Impossible!");
4097 break;
4098 }
4099 }
4100 }
4102 // This is run by the CMS (coordinator) thread.
4103 void CMSConcMarkingTask::coordinator_yield() {
4104 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4105 "CMS thread should hold CMS token");
4107 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4108 // First give up the locks, then yield, then re-lock
4109 // We should probably use a constructor/destructor idiom to
4110 // do this unlock/lock or modify the MutexUnlocker class to
4111 // serve our purpose. XXX
4112 assert_lock_strong(_bit_map_lock);
4113 _bit_map_lock->unlock();
4114 ConcurrentMarkSweepThread::desynchronize(true);
4115 ConcurrentMarkSweepThread::acknowledge_yield_request();
4116 _collector->stopTimer();
4117 if (PrintCMSStatistics != 0) {
4118 _collector->incrementYields();
4119 }
4120 _collector->icms_wait();
4122 // It is possible for whichever thread initiated the yield request
4123 // not to get a chance to wake up and take the bitmap lock between
4124 // this thread releasing it and reacquiring it. So, while the
4125 // should_yield() flag is on, let's sleep for a bit to give the
4126 // other thread a chance to wake up. The limit imposed on the number
4127 // of iterations is defensive, to avoid any unforseen circumstances
4128 // putting us into an infinite loop. Since it's always been this
4129 // (coordinator_yield()) method that was observed to cause the
4130 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4131 // which is by default non-zero. For the other seven methods that
4132 // also perform the yield operation, as are using a different
4133 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4134 // can enable the sleeping for those methods too, if necessary.
4135 // See 6442774.
4136 //
4137 // We really need to reconsider the synchronization between the GC
4138 // thread and the yield-requesting threads in the future and we
4139 // should really use wait/notify, which is the recommended
4140 // way of doing this type of interaction. Additionally, we should
4141 // consolidate the eight methods that do the yield operation and they
4142 // are almost identical into one for better maintenability and
4143 // readability. See 6445193.
4144 //
4145 // Tony 2006.06.29
4146 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4147 ConcurrentMarkSweepThread::should_yield() &&
4148 !CMSCollector::foregroundGCIsActive(); ++i) {
4149 os::sleep(Thread::current(), 1, false);
4150 ConcurrentMarkSweepThread::acknowledge_yield_request();
4151 }
4153 ConcurrentMarkSweepThread::synchronize(true);
4154 _bit_map_lock->lock_without_safepoint_check();
4155 _collector->startTimer();
4156 }
4158 bool CMSCollector::do_marking_mt(bool asynch) {
4159 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4160 // In the future this would be determined ergonomically, based
4161 // on #cpu's, # active mutator threads (and load), and mutation rate.
4162 int num_workers = ParallelCMSThreads;
4164 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4165 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4167 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4168 asynch, num_workers /* number requested XXX */,
4169 conc_workers(), task_queues());
4171 // Since the actual number of workers we get may be different
4172 // from the number we requested above, do we need to do anything different
4173 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4174 // class?? XXX
4175 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4176 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4178 // Refs discovery is already non-atomic.
4179 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4180 // Mutate the Refs discovery so it is MT during the
4181 // multi-threaded marking phase.
4182 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4184 DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
4186 conc_workers()->start_task(&tsk);
4187 while (tsk.yielded()) {
4188 tsk.coordinator_yield();
4189 conc_workers()->continue_task(&tsk);
4190 }
4191 // If the task was aborted, _restart_addr will be non-NULL
4192 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4193 while (_restart_addr != NULL) {
4194 // XXX For now we do not make use of ABORTED state and have not
4195 // yet implemented the right abort semantics (even in the original
4196 // single-threaded CMS case). That needs some more investigation
4197 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4198 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4199 // If _restart_addr is non-NULL, a marking stack overflow
4200 // occurred; we need to do a fresh marking iteration from the
4201 // indicated restart address.
4202 if (_foregroundGCIsActive && asynch) {
4203 // We may be running into repeated stack overflows, having
4204 // reached the limit of the stack size, while making very
4205 // slow forward progress. It may be best to bail out and
4206 // let the foreground collector do its job.
4207 // Clear _restart_addr, so that foreground GC
4208 // works from scratch. This avoids the headache of
4209 // a "rescan" which would otherwise be needed because
4210 // of the dirty mod union table & card table.
4211 _restart_addr = NULL;
4212 return false;
4213 }
4214 // Adjust the task to restart from _restart_addr
4215 tsk.reset(_restart_addr);
4216 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4217 _restart_addr);
4218 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4219 _restart_addr);
4220 _restart_addr = NULL;
4221 // Get the workers going again
4222 conc_workers()->start_task(&tsk);
4223 while (tsk.yielded()) {
4224 tsk.coordinator_yield();
4225 conc_workers()->continue_task(&tsk);
4226 }
4227 }
4228 assert(tsk.completed(), "Inconsistency");
4229 assert(tsk.result() == true, "Inconsistency");
4230 return true;
4231 }
4233 bool CMSCollector::do_marking_st(bool asynch) {
4234 ResourceMark rm;
4235 HandleMark hm;
4237 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4238 &_markStack, &_revisitStack, CMSYield && asynch);
4239 // the last argument to iterate indicates whether the iteration
4240 // should be incremental with periodic yields.
4241 _markBitMap.iterate(&markFromRootsClosure);
4242 // If _restart_addr is non-NULL, a marking stack overflow
4243 // occurred; we need to do a fresh iteration from the
4244 // indicated restart address.
4245 while (_restart_addr != NULL) {
4246 if (_foregroundGCIsActive && asynch) {
4247 // We may be running into repeated stack overflows, having
4248 // reached the limit of the stack size, while making very
4249 // slow forward progress. It may be best to bail out and
4250 // let the foreground collector do its job.
4251 // Clear _restart_addr, so that foreground GC
4252 // works from scratch. This avoids the headache of
4253 // a "rescan" which would otherwise be needed because
4254 // of the dirty mod union table & card table.
4255 _restart_addr = NULL;
4256 return false; // indicating failure to complete marking
4257 }
4258 // Deal with stack overflow:
4259 // we restart marking from _restart_addr
4260 HeapWord* ra = _restart_addr;
4261 markFromRootsClosure.reset(ra);
4262 _restart_addr = NULL;
4263 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4264 }
4265 return true;
4266 }
4268 void CMSCollector::preclean() {
4269 check_correct_thread_executing();
4270 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4271 verify_work_stacks_empty();
4272 verify_overflow_empty();
4273 _abort_preclean = false;
4274 if (CMSPrecleaningEnabled) {
4275 _eden_chunk_index = 0;
4276 size_t used = get_eden_used();
4277 size_t capacity = get_eden_capacity();
4278 // Don't start sampling unless we will get sufficiently
4279 // many samples.
4280 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4281 * CMSScheduleRemarkEdenPenetration)) {
4282 _start_sampling = true;
4283 } else {
4284 _start_sampling = false;
4285 }
4286 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4287 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4288 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4289 }
4290 CMSTokenSync x(true); // is cms thread
4291 if (CMSPrecleaningEnabled) {
4292 sample_eden();
4293 _collectorState = AbortablePreclean;
4294 } else {
4295 _collectorState = FinalMarking;
4296 }
4297 verify_work_stacks_empty();
4298 verify_overflow_empty();
4299 }
4301 // Try and schedule the remark such that young gen
4302 // occupancy is CMSScheduleRemarkEdenPenetration %.
4303 void CMSCollector::abortable_preclean() {
4304 check_correct_thread_executing();
4305 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4306 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4308 // If Eden's current occupancy is below this threshold,
4309 // immediately schedule the remark; else preclean
4310 // past the next scavenge in an effort to
4311 // schedule the pause as described avove. By choosing
4312 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4313 // we will never do an actual abortable preclean cycle.
4314 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4315 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4316 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4317 // We need more smarts in the abortable preclean
4318 // loop below to deal with cases where allocation
4319 // in young gen is very very slow, and our precleaning
4320 // is running a losing race against a horde of
4321 // mutators intent on flooding us with CMS updates
4322 // (dirty cards).
4323 // One, admittedly dumb, strategy is to give up
4324 // after a certain number of abortable precleaning loops
4325 // or after a certain maximum time. We want to make
4326 // this smarter in the next iteration.
4327 // XXX FIX ME!!! YSR
4328 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4329 while (!(should_abort_preclean() ||
4330 ConcurrentMarkSweepThread::should_terminate())) {
4331 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4332 cumworkdone += workdone;
4333 loops++;
4334 // Voluntarily terminate abortable preclean phase if we have
4335 // been at it for too long.
4336 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4337 loops >= CMSMaxAbortablePrecleanLoops) {
4338 if (PrintGCDetails) {
4339 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4340 }
4341 break;
4342 }
4343 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4344 if (PrintGCDetails) {
4345 gclog_or_tty->print(" CMS: abort preclean due to time ");
4346 }
4347 break;
4348 }
4349 // If we are doing little work each iteration, we should
4350 // take a short break.
4351 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4352 // Sleep for some time, waiting for work to accumulate
4353 stopTimer();
4354 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4355 startTimer();
4356 waited++;
4357 }
4358 }
4359 if (PrintCMSStatistics > 0) {
4360 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4361 loops, waited, cumworkdone);
4362 }
4363 }
4364 CMSTokenSync x(true); // is cms thread
4365 if (_collectorState != Idling) {
4366 assert(_collectorState == AbortablePreclean,
4367 "Spontaneous state transition?");
4368 _collectorState = FinalMarking;
4369 } // Else, a foreground collection completed this CMS cycle.
4370 return;
4371 }
4373 // Respond to an Eden sampling opportunity
4374 void CMSCollector::sample_eden() {
4375 // Make sure a young gc cannot sneak in between our
4376 // reading and recording of a sample.
4377 assert(Thread::current()->is_ConcurrentGC_thread(),
4378 "Only the cms thread may collect Eden samples");
4379 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4380 "Should collect samples while holding CMS token");
4381 if (!_start_sampling) {
4382 return;
4383 }
4384 if (_eden_chunk_array) {
4385 if (_eden_chunk_index < _eden_chunk_capacity) {
4386 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4387 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4388 "Unexpected state of Eden");
4389 // We'd like to check that what we just sampled is an oop-start address;
4390 // however, we cannot do that here since the object may not yet have been
4391 // initialized. So we'll instead do the check when we _use_ this sample
4392 // later.
4393 if (_eden_chunk_index == 0 ||
4394 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4395 _eden_chunk_array[_eden_chunk_index-1])
4396 >= CMSSamplingGrain)) {
4397 _eden_chunk_index++; // commit sample
4398 }
4399 }
4400 }
4401 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4402 size_t used = get_eden_used();
4403 size_t capacity = get_eden_capacity();
4404 assert(used <= capacity, "Unexpected state of Eden");
4405 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4406 _abort_preclean = true;
4407 }
4408 }
4409 }
4412 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4413 assert(_collectorState == Precleaning ||
4414 _collectorState == AbortablePreclean, "incorrect state");
4415 ResourceMark rm;
4416 HandleMark hm;
4417 // Do one pass of scrubbing the discovered reference lists
4418 // to remove any reference objects with strongly-reachable
4419 // referents.
4420 if (clean_refs) {
4421 ReferenceProcessor* rp = ref_processor();
4422 CMSPrecleanRefsYieldClosure yield_cl(this);
4423 assert(rp->span().equals(_span), "Spans should be equal");
4424 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4425 &_markStack, &_revisitStack,
4426 true /* preclean */);
4427 CMSDrainMarkingStackClosure complete_trace(this,
4428 _span, &_markBitMap, &_markStack,
4429 &keep_alive, true /* preclean */);
4431 // We don't want this step to interfere with a young
4432 // collection because we don't want to take CPU
4433 // or memory bandwidth away from the young GC threads
4434 // (which may be as many as there are CPUs).
4435 // Note that we don't need to protect ourselves from
4436 // interference with mutators because they can't
4437 // manipulate the discovered reference lists nor affect
4438 // the computed reachability of the referents, the
4439 // only properties manipulated by the precleaning
4440 // of these reference lists.
4441 stopTimer();
4442 CMSTokenSyncWithLocks x(true /* is cms thread */,
4443 bitMapLock());
4444 startTimer();
4445 sample_eden();
4447 // The following will yield to allow foreground
4448 // collection to proceed promptly. XXX YSR:
4449 // The code in this method may need further
4450 // tweaking for better performance and some restructuring
4451 // for cleaner interfaces.
4452 rp->preclean_discovered_references(
4453 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4454 &yield_cl);
4455 }
4457 if (clean_survivor) { // preclean the active survivor space(s)
4458 assert(_young_gen->kind() == Generation::DefNew ||
4459 _young_gen->kind() == Generation::ParNew ||
4460 _young_gen->kind() == Generation::ASParNew,
4461 "incorrect type for cast");
4462 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4463 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4464 &_markBitMap, &_modUnionTable,
4465 &_markStack, &_revisitStack,
4466 true /* precleaning phase */);
4467 stopTimer();
4468 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4469 bitMapLock());
4470 startTimer();
4471 unsigned int before_count =
4472 GenCollectedHeap::heap()->total_collections();
4473 SurvivorSpacePrecleanClosure
4474 sss_cl(this, _span, &_markBitMap, &_markStack,
4475 &pam_cl, before_count, CMSYield);
4476 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4477 dng->from()->object_iterate_careful(&sss_cl);
4478 dng->to()->object_iterate_careful(&sss_cl);
4479 }
4480 MarkRefsIntoAndScanClosure
4481 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4482 &_markStack, &_revisitStack, this, CMSYield,
4483 true /* precleaning phase */);
4484 // CAUTION: The following closure has persistent state that may need to
4485 // be reset upon a decrease in the sequence of addresses it
4486 // processes.
4487 ScanMarkedObjectsAgainCarefullyClosure
4488 smoac_cl(this, _span,
4489 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4491 // Preclean dirty cards in ModUnionTable and CardTable using
4492 // appropriate convergence criterion;
4493 // repeat CMSPrecleanIter times unless we find that
4494 // we are losing.
4495 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4496 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4497 "Bad convergence multiplier");
4498 assert(CMSPrecleanThreshold >= 100,
4499 "Unreasonably low CMSPrecleanThreshold");
4501 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4502 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4503 numIter < CMSPrecleanIter;
4504 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4505 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4506 if (CMSPermGenPrecleaningEnabled) {
4507 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4508 }
4509 if (Verbose && PrintGCDetails) {
4510 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4511 }
4512 // Either there are very few dirty cards, so re-mark
4513 // pause will be small anyway, or our pre-cleaning isn't
4514 // that much faster than the rate at which cards are being
4515 // dirtied, so we might as well stop and re-mark since
4516 // precleaning won't improve our re-mark time by much.
4517 if (curNumCards <= CMSPrecleanThreshold ||
4518 (numIter > 0 &&
4519 (curNumCards * CMSPrecleanDenominator >
4520 lastNumCards * CMSPrecleanNumerator))) {
4521 numIter++;
4522 cumNumCards += curNumCards;
4523 break;
4524 }
4525 }
4526 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4527 if (CMSPermGenPrecleaningEnabled) {
4528 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4529 }
4530 cumNumCards += curNumCards;
4531 if (PrintGCDetails && PrintCMSStatistics != 0) {
4532 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4533 curNumCards, cumNumCards, numIter);
4534 }
4535 return cumNumCards; // as a measure of useful work done
4536 }
4538 // PRECLEANING NOTES:
4539 // Precleaning involves:
4540 // . reading the bits of the modUnionTable and clearing the set bits.
4541 // . For the cards corresponding to the set bits, we scan the
4542 // objects on those cards. This means we need the free_list_lock
4543 // so that we can safely iterate over the CMS space when scanning
4544 // for oops.
4545 // . When we scan the objects, we'll be both reading and setting
4546 // marks in the marking bit map, so we'll need the marking bit map.
4547 // . For protecting _collector_state transitions, we take the CGC_lock.
4548 // Note that any races in the reading of of card table entries by the
4549 // CMS thread on the one hand and the clearing of those entries by the
4550 // VM thread or the setting of those entries by the mutator threads on the
4551 // other are quite benign. However, for efficiency it makes sense to keep
4552 // the VM thread from racing with the CMS thread while the latter is
4553 // dirty card info to the modUnionTable. We therefore also use the
4554 // CGC_lock to protect the reading of the card table and the mod union
4555 // table by the CM thread.
4556 // . We run concurrently with mutator updates, so scanning
4557 // needs to be done carefully -- we should not try to scan
4558 // potentially uninitialized objects.
4559 //
4560 // Locking strategy: While holding the CGC_lock, we scan over and
4561 // reset a maximal dirty range of the mod union / card tables, then lock
4562 // the free_list_lock and bitmap lock to do a full marking, then
4563 // release these locks; and repeat the cycle. This allows for a
4564 // certain amount of fairness in the sharing of these locks between
4565 // the CMS collector on the one hand, and the VM thread and the
4566 // mutators on the other.
4568 // NOTE: preclean_mod_union_table() and preclean_card_table()
4569 // further below are largely identical; if you need to modify
4570 // one of these methods, please check the other method too.
4572 size_t CMSCollector::preclean_mod_union_table(
4573 ConcurrentMarkSweepGeneration* gen,
4574 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4575 verify_work_stacks_empty();
4576 verify_overflow_empty();
4578 // Turn off checking for this method but turn it back on
4579 // selectively. There are yield points in this method
4580 // but it is difficult to turn the checking off just around
4581 // the yield points. It is simpler to selectively turn
4582 // it on.
4583 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4585 // strategy: starting with the first card, accumulate contiguous
4586 // ranges of dirty cards; clear these cards, then scan the region
4587 // covered by these cards.
4589 // Since all of the MUT is committed ahead, we can just use
4590 // that, in case the generations expand while we are precleaning.
4591 // It might also be fine to just use the committed part of the
4592 // generation, but we might potentially miss cards when the
4593 // generation is rapidly expanding while we are in the midst
4594 // of precleaning.
4595 HeapWord* startAddr = gen->reserved().start();
4596 HeapWord* endAddr = gen->reserved().end();
4598 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4600 size_t numDirtyCards, cumNumDirtyCards;
4601 HeapWord *nextAddr, *lastAddr;
4602 for (cumNumDirtyCards = numDirtyCards = 0,
4603 nextAddr = lastAddr = startAddr;
4604 nextAddr < endAddr;
4605 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4607 ResourceMark rm;
4608 HandleMark hm;
4610 MemRegion dirtyRegion;
4611 {
4612 stopTimer();
4613 // Potential yield point
4614 CMSTokenSync ts(true);
4615 startTimer();
4616 sample_eden();
4617 // Get dirty region starting at nextOffset (inclusive),
4618 // simultaneously clearing it.
4619 dirtyRegion =
4620 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4621 assert(dirtyRegion.start() >= nextAddr,
4622 "returned region inconsistent?");
4623 }
4624 // Remember where the next search should begin.
4625 // The returned region (if non-empty) is a right open interval,
4626 // so lastOffset is obtained from the right end of that
4627 // interval.
4628 lastAddr = dirtyRegion.end();
4629 // Should do something more transparent and less hacky XXX
4630 numDirtyCards =
4631 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4633 // We'll scan the cards in the dirty region (with periodic
4634 // yields for foreground GC as needed).
4635 if (!dirtyRegion.is_empty()) {
4636 assert(numDirtyCards > 0, "consistency check");
4637 HeapWord* stop_point = NULL;
4638 stopTimer();
4639 // Potential yield point
4640 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4641 bitMapLock());
4642 startTimer();
4643 {
4644 verify_work_stacks_empty();
4645 verify_overflow_empty();
4646 sample_eden();
4647 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4648 stop_point =
4649 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4650 }
4651 if (stop_point != NULL) {
4652 // The careful iteration stopped early either because it found an
4653 // uninitialized object, or because we were in the midst of an
4654 // "abortable preclean", which should now be aborted. Redirty
4655 // the bits corresponding to the partially-scanned or unscanned
4656 // cards. We'll either restart at the next block boundary or
4657 // abort the preclean.
4658 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4659 (_collectorState == AbortablePreclean && should_abort_preclean()),
4660 "Unparsable objects should only be in perm gen.");
4661 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4662 if (should_abort_preclean()) {
4663 break; // out of preclean loop
4664 } else {
4665 // Compute the next address at which preclean should pick up;
4666 // might need bitMapLock in order to read P-bits.
4667 lastAddr = next_card_start_after_block(stop_point);
4668 }
4669 }
4670 } else {
4671 assert(lastAddr == endAddr, "consistency check");
4672 assert(numDirtyCards == 0, "consistency check");
4673 break;
4674 }
4675 }
4676 verify_work_stacks_empty();
4677 verify_overflow_empty();
4678 return cumNumDirtyCards;
4679 }
4681 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4682 // below are largely identical; if you need to modify
4683 // one of these methods, please check the other method too.
4685 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4686 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4687 // strategy: it's similar to precleamModUnionTable above, in that
4688 // we accumulate contiguous ranges of dirty cards, mark these cards
4689 // precleaned, then scan the region covered by these cards.
4690 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4691 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4693 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4695 size_t numDirtyCards, cumNumDirtyCards;
4696 HeapWord *lastAddr, *nextAddr;
4698 for (cumNumDirtyCards = numDirtyCards = 0,
4699 nextAddr = lastAddr = startAddr;
4700 nextAddr < endAddr;
4701 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4703 ResourceMark rm;
4704 HandleMark hm;
4706 MemRegion dirtyRegion;
4707 {
4708 // See comments in "Precleaning notes" above on why we
4709 // do this locking. XXX Could the locking overheads be
4710 // too high when dirty cards are sparse? [I don't think so.]
4711 stopTimer();
4712 CMSTokenSync x(true); // is cms thread
4713 startTimer();
4714 sample_eden();
4715 // Get and clear dirty region from card table
4716 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4717 MemRegion(nextAddr, endAddr),
4718 true,
4719 CardTableModRefBS::precleaned_card_val());
4721 assert(dirtyRegion.start() >= nextAddr,
4722 "returned region inconsistent?");
4723 }
4724 lastAddr = dirtyRegion.end();
4725 numDirtyCards =
4726 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4728 if (!dirtyRegion.is_empty()) {
4729 stopTimer();
4730 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4731 startTimer();
4732 sample_eden();
4733 verify_work_stacks_empty();
4734 verify_overflow_empty();
4735 DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
4736 HeapWord* stop_point =
4737 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4738 if (stop_point != NULL) {
4739 // The careful iteration stopped early because it found an
4740 // uninitialized object. Redirty the bits corresponding to the
4741 // partially-scanned or unscanned cards, and start again at the
4742 // next block boundary.
4743 assert(CMSPermGenPrecleaningEnabled ||
4744 (_collectorState == AbortablePreclean && should_abort_preclean()),
4745 "Unparsable objects should only be in perm gen.");
4746 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4747 if (should_abort_preclean()) {
4748 break; // out of preclean loop
4749 } else {
4750 // Compute the next address at which preclean should pick up.
4751 lastAddr = next_card_start_after_block(stop_point);
4752 }
4753 }
4754 } else {
4755 break;
4756 }
4757 }
4758 verify_work_stacks_empty();
4759 verify_overflow_empty();
4760 return cumNumDirtyCards;
4761 }
4763 void CMSCollector::checkpointRootsFinal(bool asynch,
4764 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4765 assert(_collectorState == FinalMarking, "incorrect state transition?");
4766 check_correct_thread_executing();
4767 // world is stopped at this checkpoint
4768 assert(SafepointSynchronize::is_at_safepoint(),
4769 "world should be stopped");
4770 verify_work_stacks_empty();
4771 verify_overflow_empty();
4773 SpecializationStats::clear();
4774 if (PrintGCDetails) {
4775 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4776 _young_gen->used() / K,
4777 _young_gen->capacity() / K);
4778 }
4779 if (asynch) {
4780 if (CMSScavengeBeforeRemark) {
4781 GenCollectedHeap* gch = GenCollectedHeap::heap();
4782 // Temporarily set flag to false, GCH->do_collection will
4783 // expect it to be false and set to true
4784 FlagSetting fl(gch->_is_gc_active, false);
4785 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4786 PrintGCDetails && Verbose, true, gclog_or_tty);)
4787 int level = _cmsGen->level() - 1;
4788 if (level >= 0) {
4789 gch->do_collection(true, // full (i.e. force, see below)
4790 false, // !clear_all_soft_refs
4791 0, // size
4792 false, // is_tlab
4793 level // max_level
4794 );
4795 }
4796 }
4797 FreelistLocker x(this);
4798 MutexLockerEx y(bitMapLock(),
4799 Mutex::_no_safepoint_check_flag);
4800 assert(!init_mark_was_synchronous, "but that's impossible!");
4801 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4802 } else {
4803 // already have all the locks
4804 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4805 init_mark_was_synchronous);
4806 }
4807 verify_work_stacks_empty();
4808 verify_overflow_empty();
4809 SpecializationStats::print();
4810 }
4812 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4813 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4815 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4817 assert(haveFreelistLocks(), "must have free list locks");
4818 assert_lock_strong(bitMapLock());
4820 if (UseAdaptiveSizePolicy) {
4821 size_policy()->checkpoint_roots_final_begin();
4822 }
4824 ResourceMark rm;
4825 HandleMark hm;
4827 GenCollectedHeap* gch = GenCollectedHeap::heap();
4829 if (should_unload_classes()) {
4830 CodeCache::gc_prologue();
4831 }
4832 assert(haveFreelistLocks(), "must have free list locks");
4833 assert_lock_strong(bitMapLock());
4835 DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
4836 if (!init_mark_was_synchronous) {
4837 // We might assume that we need not fill TLAB's when
4838 // CMSScavengeBeforeRemark is set, because we may have just done
4839 // a scavenge which would have filled all TLAB's -- and besides
4840 // Eden would be empty. This however may not always be the case --
4841 // for instance although we asked for a scavenge, it may not have
4842 // happened because of a JNI critical section. We probably need
4843 // a policy for deciding whether we can in that case wait until
4844 // the critical section releases and then do the remark following
4845 // the scavenge, and skip it here. In the absence of that policy,
4846 // or of an indication of whether the scavenge did indeed occur,
4847 // we cannot rely on TLAB's having been filled and must do
4848 // so here just in case a scavenge did not happen.
4849 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4850 // Update the saved marks which may affect the root scans.
4851 gch->save_marks();
4853 {
4854 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4856 // Note on the role of the mod union table:
4857 // Since the marker in "markFromRoots" marks concurrently with
4858 // mutators, it is possible for some reachable objects not to have been
4859 // scanned. For instance, an only reference to an object A was
4860 // placed in object B after the marker scanned B. Unless B is rescanned,
4861 // A would be collected. Such updates to references in marked objects
4862 // are detected via the mod union table which is the set of all cards
4863 // dirtied since the first checkpoint in this GC cycle and prior to
4864 // the most recent young generation GC, minus those cleaned up by the
4865 // concurrent precleaning.
4866 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4867 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4868 do_remark_parallel();
4869 } else {
4870 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4871 gclog_or_tty);
4872 do_remark_non_parallel();
4873 }
4874 }
4875 } else {
4876 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4877 // The initial mark was stop-world, so there's no rescanning to
4878 // do; go straight on to the next step below.
4879 }
4880 verify_work_stacks_empty();
4881 verify_overflow_empty();
4883 {
4884 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4885 refProcessingWork(asynch, clear_all_soft_refs);
4886 }
4887 verify_work_stacks_empty();
4888 verify_overflow_empty();
4890 if (should_unload_classes()) {
4891 CodeCache::gc_epilogue();
4892 }
4894 // If we encountered any (marking stack / work queue) overflow
4895 // events during the current CMS cycle, take appropriate
4896 // remedial measures, where possible, so as to try and avoid
4897 // recurrence of that condition.
4898 assert(_markStack.isEmpty(), "No grey objects");
4899 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4900 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4901 if (ser_ovflw > 0) {
4902 if (PrintCMSStatistics != 0) {
4903 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4904 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4905 ", kac_preclean="SIZE_FORMAT")",
4906 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4907 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4908 }
4909 _markStack.expand();
4910 _ser_pmc_remark_ovflw = 0;
4911 _ser_pmc_preclean_ovflw = 0;
4912 _ser_kac_preclean_ovflw = 0;
4913 _ser_kac_ovflw = 0;
4914 }
4915 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4916 if (PrintCMSStatistics != 0) {
4917 gclog_or_tty->print_cr("Work queue overflow (benign) "
4918 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4919 _par_pmc_remark_ovflw, _par_kac_ovflw);
4920 }
4921 _par_pmc_remark_ovflw = 0;
4922 _par_kac_ovflw = 0;
4923 }
4924 if (PrintCMSStatistics != 0) {
4925 if (_markStack._hit_limit > 0) {
4926 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4927 _markStack._hit_limit);
4928 }
4929 if (_markStack._failed_double > 0) {
4930 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4931 " current capacity "SIZE_FORMAT,
4932 _markStack._failed_double,
4933 _markStack.capacity());
4934 }
4935 }
4936 _markStack._hit_limit = 0;
4937 _markStack._failed_double = 0;
4939 // Check that all the klasses have been checked
4940 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
4942 if ((VerifyAfterGC || VerifyDuringGC) &&
4943 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4944 verify_after_remark();
4945 }
4947 // Change under the freelistLocks.
4948 _collectorState = Sweeping;
4949 // Call isAllClear() under bitMapLock
4950 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4951 " final marking");
4952 if (UseAdaptiveSizePolicy) {
4953 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4954 }
4955 }
4957 // Parallel remark task
4958 class CMSParRemarkTask: public AbstractGangTask {
4959 CMSCollector* _collector;
4960 WorkGang* _workers;
4961 int _n_workers;
4962 CompactibleFreeListSpace* _cms_space;
4963 CompactibleFreeListSpace* _perm_space;
4965 // The per-thread work queues, available here for stealing.
4966 OopTaskQueueSet* _task_queues;
4967 ParallelTaskTerminator _term;
4969 public:
4970 CMSParRemarkTask(CMSCollector* collector,
4971 CompactibleFreeListSpace* cms_space,
4972 CompactibleFreeListSpace* perm_space,
4973 int n_workers, WorkGang* workers,
4974 OopTaskQueueSet* task_queues):
4975 AbstractGangTask("Rescan roots and grey objects in parallel"),
4976 _collector(collector),
4977 _cms_space(cms_space), _perm_space(perm_space),
4978 _n_workers(n_workers),
4979 _workers(workers),
4980 _task_queues(task_queues),
4981 _term(workers->total_workers(), task_queues) { }
4983 OopTaskQueueSet* task_queues() { return _task_queues; }
4985 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4987 ParallelTaskTerminator* terminator() { return &_term; }
4989 void work(int i);
4991 private:
4992 // Work method in support of parallel rescan ... of young gen spaces
4993 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4994 ContiguousSpace* space,
4995 HeapWord** chunk_array, size_t chunk_top);
4997 // ... of dirty cards in old space
4998 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4999 Par_MarkRefsIntoAndScanClosure* cl);
5001 // ... work stealing for the above
5002 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5003 };
5005 void CMSParRemarkTask::work(int i) {
5006 elapsedTimer _timer;
5007 ResourceMark rm;
5008 HandleMark hm;
5010 // ---------- rescan from roots --------------
5011 _timer.start();
5012 GenCollectedHeap* gch = GenCollectedHeap::heap();
5013 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5014 _collector->_span, _collector->ref_processor(),
5015 &(_collector->_markBitMap),
5016 work_queue(i), &(_collector->_revisitStack));
5018 // Rescan young gen roots first since these are likely
5019 // coarsely partitioned and may, on that account, constitute
5020 // the critical path; thus, it's best to start off that
5021 // work first.
5022 // ---------- young gen roots --------------
5023 {
5024 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5025 EdenSpace* eden_space = dng->eden();
5026 ContiguousSpace* from_space = dng->from();
5027 ContiguousSpace* to_space = dng->to();
5029 HeapWord** eca = _collector->_eden_chunk_array;
5030 size_t ect = _collector->_eden_chunk_index;
5031 HeapWord** sca = _collector->_survivor_chunk_array;
5032 size_t sct = _collector->_survivor_chunk_index;
5034 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5035 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5037 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
5038 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
5039 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
5041 _timer.stop();
5042 if (PrintCMSStatistics != 0) {
5043 gclog_or_tty->print_cr(
5044 "Finished young gen rescan work in %dth thread: %3.3f sec",
5045 i, _timer.seconds());
5046 }
5047 }
5049 // ---------- remaining roots --------------
5050 _timer.reset();
5051 _timer.start();
5052 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5053 false, // yg was scanned above
5054 false, // this is parallel code
5055 true, // collecting perm gen
5056 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5057 &par_mrias_cl,
5058 true, // walk all of code cache if (so & SO_CodeCache)
5059 NULL);
5060 assert(_collector->should_unload_classes()
5061 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5062 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5063 _timer.stop();
5064 if (PrintCMSStatistics != 0) {
5065 gclog_or_tty->print_cr(
5066 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5067 i, _timer.seconds());
5068 }
5070 // ---------- rescan dirty cards ------------
5071 _timer.reset();
5072 _timer.start();
5074 // Do the rescan tasks for each of the two spaces
5075 // (cms_space and perm_space) in turn.
5076 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5077 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5078 _timer.stop();
5079 if (PrintCMSStatistics != 0) {
5080 gclog_or_tty->print_cr(
5081 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5082 i, _timer.seconds());
5083 }
5085 // ---------- steal work from other threads ...
5086 // ---------- ... and drain overflow list.
5087 _timer.reset();
5088 _timer.start();
5089 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5090 _timer.stop();
5091 if (PrintCMSStatistics != 0) {
5092 gclog_or_tty->print_cr(
5093 "Finished work stealing in %dth thread: %3.3f sec",
5094 i, _timer.seconds());
5095 }
5096 }
5098 void
5099 CMSParRemarkTask::do_young_space_rescan(int i,
5100 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5101 HeapWord** chunk_array, size_t chunk_top) {
5102 // Until all tasks completed:
5103 // . claim an unclaimed task
5104 // . compute region boundaries corresponding to task claimed
5105 // using chunk_array
5106 // . par_oop_iterate(cl) over that region
5108 ResourceMark rm;
5109 HandleMark hm;
5111 SequentialSubTasksDone* pst = space->par_seq_tasks();
5112 assert(pst->valid(), "Uninitialized use?");
5114 int nth_task = 0;
5115 int n_tasks = pst->n_tasks();
5117 HeapWord *start, *end;
5118 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5119 // We claimed task # nth_task; compute its boundaries.
5120 if (chunk_top == 0) { // no samples were taken
5121 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5122 start = space->bottom();
5123 end = space->top();
5124 } else if (nth_task == 0) {
5125 start = space->bottom();
5126 end = chunk_array[nth_task];
5127 } else if (nth_task < (jint)chunk_top) {
5128 assert(nth_task >= 1, "Control point invariant");
5129 start = chunk_array[nth_task - 1];
5130 end = chunk_array[nth_task];
5131 } else {
5132 assert(nth_task == (jint)chunk_top, "Control point invariant");
5133 start = chunk_array[chunk_top - 1];
5134 end = space->top();
5135 }
5136 MemRegion mr(start, end);
5137 // Verify that mr is in space
5138 assert(mr.is_empty() || space->used_region().contains(mr),
5139 "Should be in space");
5140 // Verify that "start" is an object boundary
5141 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5142 "Should be an oop");
5143 space->par_oop_iterate(mr, cl);
5144 }
5145 pst->all_tasks_completed();
5146 }
5148 void
5149 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5150 CompactibleFreeListSpace* sp, int i,
5151 Par_MarkRefsIntoAndScanClosure* cl) {
5152 // Until all tasks completed:
5153 // . claim an unclaimed task
5154 // . compute region boundaries corresponding to task claimed
5155 // . transfer dirty bits ct->mut for that region
5156 // . apply rescanclosure to dirty mut bits for that region
5158 ResourceMark rm;
5159 HandleMark hm;
5161 OopTaskQueue* work_q = work_queue(i);
5162 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5163 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5164 // CAUTION: This closure has state that persists across calls to
5165 // the work method dirty_range_iterate_clear() in that it has
5166 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5167 // use of that state in the imbedded UpwardsObjectClosure instance
5168 // assumes that the cards are always iterated (even if in parallel
5169 // by several threads) in monotonically increasing order per each
5170 // thread. This is true of the implementation below which picks
5171 // card ranges (chunks) in monotonically increasing order globally
5172 // and, a-fortiori, in monotonically increasing order per thread
5173 // (the latter order being a subsequence of the former).
5174 // If the work code below is ever reorganized into a more chaotic
5175 // work-partitioning form than the current "sequential tasks"
5176 // paradigm, the use of that persistent state will have to be
5177 // revisited and modified appropriately. See also related
5178 // bug 4756801 work on which should examine this code to make
5179 // sure that the changes there do not run counter to the
5180 // assumptions made here and necessary for correctness and
5181 // efficiency. Note also that this code might yield inefficient
5182 // behaviour in the case of very large objects that span one or
5183 // more work chunks. Such objects would potentially be scanned
5184 // several times redundantly. Work on 4756801 should try and
5185 // address that performance anomaly if at all possible. XXX
5186 MemRegion full_span = _collector->_span;
5187 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5188 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5189 MarkFromDirtyCardsClosure
5190 greyRescanClosure(_collector, full_span, // entire span of interest
5191 sp, bm, work_q, rs, cl);
5193 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5194 assert(pst->valid(), "Uninitialized use?");
5195 int nth_task = 0;
5196 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5197 MemRegion span = sp->used_region();
5198 HeapWord* start_addr = span.start();
5199 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5200 alignment);
5201 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5202 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5203 start_addr, "Check alignment");
5204 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5205 chunk_size, "Check alignment");
5207 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5208 // Having claimed the nth_task, compute corresponding mem-region,
5209 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5210 // The alignment restriction ensures that we do not need any
5211 // synchronization with other gang-workers while setting or
5212 // clearing bits in thus chunk of the MUT.
5213 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5214 start_addr + (nth_task+1)*chunk_size);
5215 // The last chunk's end might be way beyond end of the
5216 // used region. In that case pull back appropriately.
5217 if (this_span.end() > end_addr) {
5218 this_span.set_end(end_addr);
5219 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5220 }
5221 // Iterate over the dirty cards covering this chunk, marking them
5222 // precleaned, and setting the corresponding bits in the mod union
5223 // table. Since we have been careful to partition at Card and MUT-word
5224 // boundaries no synchronization is needed between parallel threads.
5225 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5226 &modUnionClosure);
5228 // Having transferred these marks into the modUnionTable,
5229 // rescan the marked objects on the dirty cards in the modUnionTable.
5230 // Even if this is at a synchronous collection, the initial marking
5231 // may have been done during an asynchronous collection so there
5232 // may be dirty bits in the mod-union table.
5233 _collector->_modUnionTable.dirty_range_iterate_clear(
5234 this_span, &greyRescanClosure);
5235 _collector->_modUnionTable.verifyNoOneBitsInRange(
5236 this_span.start(),
5237 this_span.end());
5238 }
5239 pst->all_tasks_completed(); // declare that i am done
5240 }
5242 // . see if we can share work_queues with ParNew? XXX
5243 void
5244 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5245 int* seed) {
5246 OopTaskQueue* work_q = work_queue(i);
5247 NOT_PRODUCT(int num_steals = 0;)
5248 oop obj_to_scan;
5249 CMSBitMap* bm = &(_collector->_markBitMap);
5251 while (true) {
5252 // Completely finish any left over work from (an) earlier round(s)
5253 cl->trim_queue(0);
5254 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5255 (size_t)ParGCDesiredObjsFromOverflowList);
5256 // Now check if there's any work in the overflow list
5257 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5258 work_q)) {
5259 // found something in global overflow list;
5260 // not yet ready to go stealing work from others.
5261 // We'd like to assert(work_q->size() != 0, ...)
5262 // because we just took work from the overflow list,
5263 // but of course we can't since all of that could have
5264 // been already stolen from us.
5265 // "He giveth and He taketh away."
5266 continue;
5267 }
5268 // Verify that we have no work before we resort to stealing
5269 assert(work_q->size() == 0, "Have work, shouldn't steal");
5270 // Try to steal from other queues that have work
5271 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5272 NOT_PRODUCT(num_steals++;)
5273 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5274 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5275 // Do scanning work
5276 obj_to_scan->oop_iterate(cl);
5277 // Loop around, finish this work, and try to steal some more
5278 } else if (terminator()->offer_termination()) {
5279 break; // nirvana from the infinite cycle
5280 }
5281 }
5282 NOT_PRODUCT(
5283 if (PrintCMSStatistics != 0) {
5284 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5285 }
5286 )
5287 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5288 "Else our work is not yet done");
5289 }
5291 // Return a thread-local PLAB recording array, as appropriate.
5292 void* CMSCollector::get_data_recorder(int thr_num) {
5293 if (_survivor_plab_array != NULL &&
5294 (CMSPLABRecordAlways ||
5295 (_collectorState > Marking && _collectorState < FinalMarking))) {
5296 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5297 ChunkArray* ca = &_survivor_plab_array[thr_num];
5298 ca->reset(); // clear it so that fresh data is recorded
5299 return (void*) ca;
5300 } else {
5301 return NULL;
5302 }
5303 }
5305 // Reset all the thread-local PLAB recording arrays
5306 void CMSCollector::reset_survivor_plab_arrays() {
5307 for (uint i = 0; i < ParallelGCThreads; i++) {
5308 _survivor_plab_array[i].reset();
5309 }
5310 }
5312 // Merge the per-thread plab arrays into the global survivor chunk
5313 // array which will provide the partitioning of the survivor space
5314 // for CMS rescan.
5315 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5316 assert(_survivor_plab_array != NULL, "Error");
5317 assert(_survivor_chunk_array != NULL, "Error");
5318 assert(_collectorState == FinalMarking, "Error");
5319 for (uint j = 0; j < ParallelGCThreads; j++) {
5320 _cursor[j] = 0;
5321 }
5322 HeapWord* top = surv->top();
5323 size_t i;
5324 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5325 HeapWord* min_val = top; // Higher than any PLAB address
5326 uint min_tid = 0; // position of min_val this round
5327 for (uint j = 0; j < ParallelGCThreads; j++) {
5328 ChunkArray* cur_sca = &_survivor_plab_array[j];
5329 if (_cursor[j] == cur_sca->end()) {
5330 continue;
5331 }
5332 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5333 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5334 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5335 if (cur_val < min_val) {
5336 min_tid = j;
5337 min_val = cur_val;
5338 } else {
5339 assert(cur_val < top, "All recorded addresses should be less");
5340 }
5341 }
5342 // At this point min_val and min_tid are respectively
5343 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5344 // and the thread (j) that witnesses that address.
5345 // We record this address in the _survivor_chunk_array[i]
5346 // and increment _cursor[min_tid] prior to the next round i.
5347 if (min_val == top) {
5348 break;
5349 }
5350 _survivor_chunk_array[i] = min_val;
5351 _cursor[min_tid]++;
5352 }
5353 // We are all done; record the size of the _survivor_chunk_array
5354 _survivor_chunk_index = i; // exclusive: [0, i)
5355 if (PrintCMSStatistics > 0) {
5356 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5357 }
5358 // Verify that we used up all the recorded entries
5359 #ifdef ASSERT
5360 size_t total = 0;
5361 for (uint j = 0; j < ParallelGCThreads; j++) {
5362 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5363 total += _cursor[j];
5364 }
5365 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5366 // Check that the merged array is in sorted order
5367 if (total > 0) {
5368 for (size_t i = 0; i < total - 1; i++) {
5369 if (PrintCMSStatistics > 0) {
5370 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5371 i, _survivor_chunk_array[i]);
5372 }
5373 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5374 "Not sorted");
5375 }
5376 }
5377 #endif // ASSERT
5378 }
5380 // Set up the space's par_seq_tasks structure for work claiming
5381 // for parallel rescan of young gen.
5382 // See ParRescanTask where this is currently used.
5383 void
5384 CMSCollector::
5385 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5386 assert(n_threads > 0, "Unexpected n_threads argument");
5387 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5389 // Eden space
5390 {
5391 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5392 assert(!pst->valid(), "Clobbering existing data?");
5393 // Each valid entry in [0, _eden_chunk_index) represents a task.
5394 size_t n_tasks = _eden_chunk_index + 1;
5395 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5396 pst->set_par_threads(n_threads);
5397 pst->set_n_tasks((int)n_tasks);
5398 }
5400 // Merge the survivor plab arrays into _survivor_chunk_array
5401 if (_survivor_plab_array != NULL) {
5402 merge_survivor_plab_arrays(dng->from());
5403 } else {
5404 assert(_survivor_chunk_index == 0, "Error");
5405 }
5407 // To space
5408 {
5409 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5410 assert(!pst->valid(), "Clobbering existing data?");
5411 pst->set_par_threads(n_threads);
5412 pst->set_n_tasks(1);
5413 assert(pst->valid(), "Error");
5414 }
5416 // From space
5417 {
5418 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5419 assert(!pst->valid(), "Clobbering existing data?");
5420 size_t n_tasks = _survivor_chunk_index + 1;
5421 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5422 pst->set_par_threads(n_threads);
5423 pst->set_n_tasks((int)n_tasks);
5424 assert(pst->valid(), "Error");
5425 }
5426 }
5428 // Parallel version of remark
5429 void CMSCollector::do_remark_parallel() {
5430 GenCollectedHeap* gch = GenCollectedHeap::heap();
5431 WorkGang* workers = gch->workers();
5432 assert(workers != NULL, "Need parallel worker threads.");
5433 int n_workers = workers->total_workers();
5434 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5435 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5437 CMSParRemarkTask tsk(this,
5438 cms_space, perm_space,
5439 n_workers, workers, task_queues());
5441 // Set up for parallel process_strong_roots work.
5442 gch->set_par_threads(n_workers);
5443 // We won't be iterating over the cards in the card table updating
5444 // the younger_gen cards, so we shouldn't call the following else
5445 // the verification code as well as subsequent younger_refs_iterate
5446 // code would get confused. XXX
5447 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5449 // The young gen rescan work will not be done as part of
5450 // process_strong_roots (which currently doesn't knw how to
5451 // parallelize such a scan), but rather will be broken up into
5452 // a set of parallel tasks (via the sampling that the [abortable]
5453 // preclean phase did of EdenSpace, plus the [two] tasks of
5454 // scanning the [two] survivor spaces. Further fine-grain
5455 // parallelization of the scanning of the survivor spaces
5456 // themselves, and of precleaning of the younger gen itself
5457 // is deferred to the future.
5458 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5460 // The dirty card rescan work is broken up into a "sequence"
5461 // of parallel tasks (per constituent space) that are dynamically
5462 // claimed by the parallel threads.
5463 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5464 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5466 // It turns out that even when we're using 1 thread, doing the work in a
5467 // separate thread causes wide variance in run times. We can't help this
5468 // in the multi-threaded case, but we special-case n=1 here to get
5469 // repeatable measurements of the 1-thread overhead of the parallel code.
5470 if (n_workers > 1) {
5471 // Make refs discovery MT-safe
5472 ReferenceProcessorMTMutator mt(ref_processor(), true);
5473 GenCollectedHeap::StrongRootsScope srs(gch);
5474 workers->run_task(&tsk);
5475 } else {
5476 GenCollectedHeap::StrongRootsScope srs(gch);
5477 tsk.work(0);
5478 }
5479 gch->set_par_threads(0); // 0 ==> non-parallel.
5480 // restore, single-threaded for now, any preserved marks
5481 // as a result of work_q overflow
5482 restore_preserved_marks_if_any();
5483 }
5485 // Non-parallel version of remark
5486 void CMSCollector::do_remark_non_parallel() {
5487 ResourceMark rm;
5488 HandleMark hm;
5489 GenCollectedHeap* gch = GenCollectedHeap::heap();
5490 MarkRefsIntoAndScanClosure
5491 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5492 &_markStack, &_revisitStack, this,
5493 false /* should_yield */, false /* not precleaning */);
5494 MarkFromDirtyCardsClosure
5495 markFromDirtyCardsClosure(this, _span,
5496 NULL, // space is set further below
5497 &_markBitMap, &_markStack, &_revisitStack,
5498 &mrias_cl);
5499 {
5500 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5501 // Iterate over the dirty cards, setting the corresponding bits in the
5502 // mod union table.
5503 {
5504 ModUnionClosure modUnionClosure(&_modUnionTable);
5505 _ct->ct_bs()->dirty_card_iterate(
5506 _cmsGen->used_region(),
5507 &modUnionClosure);
5508 _ct->ct_bs()->dirty_card_iterate(
5509 _permGen->used_region(),
5510 &modUnionClosure);
5511 }
5512 // Having transferred these marks into the modUnionTable, we just need
5513 // to rescan the marked objects on the dirty cards in the modUnionTable.
5514 // The initial marking may have been done during an asynchronous
5515 // collection so there may be dirty bits in the mod-union table.
5516 const int alignment =
5517 CardTableModRefBS::card_size * BitsPerWord;
5518 {
5519 // ... First handle dirty cards in CMS gen
5520 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5521 MemRegion ur = _cmsGen->used_region();
5522 HeapWord* lb = ur.start();
5523 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5524 MemRegion cms_span(lb, ub);
5525 _modUnionTable.dirty_range_iterate_clear(cms_span,
5526 &markFromDirtyCardsClosure);
5527 verify_work_stacks_empty();
5528 if (PrintCMSStatistics != 0) {
5529 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5530 markFromDirtyCardsClosure.num_dirty_cards());
5531 }
5532 }
5533 {
5534 // .. and then repeat for dirty cards in perm gen
5535 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5536 MemRegion ur = _permGen->used_region();
5537 HeapWord* lb = ur.start();
5538 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5539 MemRegion perm_span(lb, ub);
5540 _modUnionTable.dirty_range_iterate_clear(perm_span,
5541 &markFromDirtyCardsClosure);
5542 verify_work_stacks_empty();
5543 if (PrintCMSStatistics != 0) {
5544 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5545 markFromDirtyCardsClosure.num_dirty_cards());
5546 }
5547 }
5548 }
5549 if (VerifyDuringGC &&
5550 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5551 HandleMark hm; // Discard invalid handles created during verification
5552 Universe::verify(true);
5553 }
5554 {
5555 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5557 verify_work_stacks_empty();
5559 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5560 GenCollectedHeap::StrongRootsScope srs(gch);
5561 gch->gen_process_strong_roots(_cmsGen->level(),
5562 true, // younger gens as roots
5563 false, // use the local StrongRootsScope
5564 true, // collecting perm gen
5565 SharedHeap::ScanningOption(roots_scanning_options()),
5566 &mrias_cl,
5567 true, // walk code active on stacks
5568 NULL);
5569 assert(should_unload_classes()
5570 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5571 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5572 }
5573 verify_work_stacks_empty();
5574 // Restore evacuated mark words, if any, used for overflow list links
5575 if (!CMSOverflowEarlyRestoration) {
5576 restore_preserved_marks_if_any();
5577 }
5578 verify_overflow_empty();
5579 }
5581 ////////////////////////////////////////////////////////
5582 // Parallel Reference Processing Task Proxy Class
5583 ////////////////////////////////////////////////////////
5584 class CMSRefProcTaskProxy: public AbstractGangTask {
5585 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5586 CMSCollector* _collector;
5587 CMSBitMap* _mark_bit_map;
5588 const MemRegion _span;
5589 OopTaskQueueSet* _task_queues;
5590 ParallelTaskTerminator _term;
5591 ProcessTask& _task;
5593 public:
5594 CMSRefProcTaskProxy(ProcessTask& task,
5595 CMSCollector* collector,
5596 const MemRegion& span,
5597 CMSBitMap* mark_bit_map,
5598 int total_workers,
5599 OopTaskQueueSet* task_queues):
5600 AbstractGangTask("Process referents by policy in parallel"),
5601 _task(task),
5602 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5603 _task_queues(task_queues),
5604 _term(total_workers, task_queues)
5605 {
5606 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5607 "Inconsistency in _span");
5608 }
5610 OopTaskQueueSet* task_queues() { return _task_queues; }
5612 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5614 ParallelTaskTerminator* terminator() { return &_term; }
5616 void do_work_steal(int i,
5617 CMSParDrainMarkingStackClosure* drain,
5618 CMSParKeepAliveClosure* keep_alive,
5619 int* seed);
5621 virtual void work(int i);
5622 };
5624 void CMSRefProcTaskProxy::work(int i) {
5625 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5626 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5627 _mark_bit_map,
5628 &_collector->_revisitStack,
5629 work_queue(i));
5630 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5631 _mark_bit_map,
5632 &_collector->_revisitStack,
5633 work_queue(i));
5634 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5635 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5636 if (_task.marks_oops_alive()) {
5637 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5638 _collector->hash_seed(i));
5639 }
5640 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5641 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5642 }
5644 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5645 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5646 EnqueueTask& _task;
5648 public:
5649 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5650 : AbstractGangTask("Enqueue reference objects in parallel"),
5651 _task(task)
5652 { }
5654 virtual void work(int i)
5655 {
5656 _task.work(i);
5657 }
5658 };
5660 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5661 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5662 OopTaskQueue* work_queue):
5663 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5664 _span(span),
5665 _bit_map(bit_map),
5666 _work_queue(work_queue),
5667 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5668 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5669 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5670 { }
5672 // . see if we can share work_queues with ParNew? XXX
5673 void CMSRefProcTaskProxy::do_work_steal(int i,
5674 CMSParDrainMarkingStackClosure* drain,
5675 CMSParKeepAliveClosure* keep_alive,
5676 int* seed) {
5677 OopTaskQueue* work_q = work_queue(i);
5678 NOT_PRODUCT(int num_steals = 0;)
5679 oop obj_to_scan;
5681 while (true) {
5682 // Completely finish any left over work from (an) earlier round(s)
5683 drain->trim_queue(0);
5684 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5685 (size_t)ParGCDesiredObjsFromOverflowList);
5686 // Now check if there's any work in the overflow list
5687 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5688 work_q)) {
5689 // Found something in global overflow list;
5690 // not yet ready to go stealing work from others.
5691 // We'd like to assert(work_q->size() != 0, ...)
5692 // because we just took work from the overflow list,
5693 // but of course we can't, since all of that might have
5694 // been already stolen from us.
5695 continue;
5696 }
5697 // Verify that we have no work before we resort to stealing
5698 assert(work_q->size() == 0, "Have work, shouldn't steal");
5699 // Try to steal from other queues that have work
5700 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5701 NOT_PRODUCT(num_steals++;)
5702 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5703 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5704 // Do scanning work
5705 obj_to_scan->oop_iterate(keep_alive);
5706 // Loop around, finish this work, and try to steal some more
5707 } else if (terminator()->offer_termination()) {
5708 break; // nirvana from the infinite cycle
5709 }
5710 }
5711 NOT_PRODUCT(
5712 if (PrintCMSStatistics != 0) {
5713 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5714 }
5715 )
5716 }
5718 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5719 {
5720 GenCollectedHeap* gch = GenCollectedHeap::heap();
5721 WorkGang* workers = gch->workers();
5722 assert(workers != NULL, "Need parallel worker threads.");
5723 int n_workers = workers->total_workers();
5724 CMSRefProcTaskProxy rp_task(task, &_collector,
5725 _collector.ref_processor()->span(),
5726 _collector.markBitMap(),
5727 n_workers, _collector.task_queues());
5728 workers->run_task(&rp_task);
5729 }
5731 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5732 {
5734 GenCollectedHeap* gch = GenCollectedHeap::heap();
5735 WorkGang* workers = gch->workers();
5736 assert(workers != NULL, "Need parallel worker threads.");
5737 CMSRefEnqueueTaskProxy enq_task(task);
5738 workers->run_task(&enq_task);
5739 }
5741 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5743 ResourceMark rm;
5744 HandleMark hm;
5746 ReferenceProcessor* rp = ref_processor();
5747 assert(rp->span().equals(_span), "Spans should be equal");
5748 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5749 // Process weak references.
5750 rp->setup_policy(clear_all_soft_refs);
5751 verify_work_stacks_empty();
5753 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5754 &_markStack, &_revisitStack,
5755 false /* !preclean */);
5756 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5757 _span, &_markBitMap, &_markStack,
5758 &cmsKeepAliveClosure, false /* !preclean */);
5759 {
5760 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5761 if (rp->processing_is_mt()) {
5762 CMSRefProcTaskExecutor task_executor(*this);
5763 rp->process_discovered_references(&_is_alive_closure,
5764 &cmsKeepAliveClosure,
5765 &cmsDrainMarkingStackClosure,
5766 &task_executor);
5767 } else {
5768 rp->process_discovered_references(&_is_alive_closure,
5769 &cmsKeepAliveClosure,
5770 &cmsDrainMarkingStackClosure,
5771 NULL);
5772 }
5773 verify_work_stacks_empty();
5774 }
5776 if (should_unload_classes()) {
5777 {
5778 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5780 // Follow SystemDictionary roots and unload classes
5781 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5783 // Follow CodeCache roots and unload any methods marked for unloading
5784 CodeCache::do_unloading(&_is_alive_closure,
5785 &cmsKeepAliveClosure,
5786 purged_class);
5788 cmsDrainMarkingStackClosure.do_void();
5789 verify_work_stacks_empty();
5791 // Update subklass/sibling/implementor links in KlassKlass descendants
5792 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5793 oop k;
5794 while ((k = _revisitStack.pop()) != NULL) {
5795 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5796 &_is_alive_closure,
5797 &cmsKeepAliveClosure);
5798 }
5799 assert(!ClassUnloading ||
5800 (_markStack.isEmpty() && overflow_list_is_empty()),
5801 "Should not have found new reachable objects");
5802 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5803 cmsDrainMarkingStackClosure.do_void();
5804 verify_work_stacks_empty();
5805 }
5807 {
5808 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5809 // Now clean up stale oops in SymbolTable and StringTable
5810 SymbolTable::unlink(&_is_alive_closure);
5811 StringTable::unlink(&_is_alive_closure);
5812 }
5813 }
5815 verify_work_stacks_empty();
5816 // Restore any preserved marks as a result of mark stack or
5817 // work queue overflow
5818 restore_preserved_marks_if_any(); // done single-threaded for now
5820 rp->set_enqueuing_is_done(true);
5821 if (rp->processing_is_mt()) {
5822 CMSRefProcTaskExecutor task_executor(*this);
5823 rp->enqueue_discovered_references(&task_executor);
5824 } else {
5825 rp->enqueue_discovered_references(NULL);
5826 }
5827 rp->verify_no_references_recorded();
5828 assert(!rp->discovery_enabled(), "should have been disabled");
5830 // JVMTI object tagging is based on JNI weak refs. If any of these
5831 // refs were cleared then JVMTI needs to update its maps and
5832 // maybe post ObjectFrees to agents.
5833 JvmtiExport::cms_ref_processing_epilogue();
5834 }
5836 #ifndef PRODUCT
5837 void CMSCollector::check_correct_thread_executing() {
5838 Thread* t = Thread::current();
5839 // Only the VM thread or the CMS thread should be here.
5840 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5841 "Unexpected thread type");
5842 // If this is the vm thread, the foreground process
5843 // should not be waiting. Note that _foregroundGCIsActive is
5844 // true while the foreground collector is waiting.
5845 if (_foregroundGCShouldWait) {
5846 // We cannot be the VM thread
5847 assert(t->is_ConcurrentGC_thread(),
5848 "Should be CMS thread");
5849 } else {
5850 // We can be the CMS thread only if we are in a stop-world
5851 // phase of CMS collection.
5852 if (t->is_ConcurrentGC_thread()) {
5853 assert(_collectorState == InitialMarking ||
5854 _collectorState == FinalMarking,
5855 "Should be a stop-world phase");
5856 // The CMS thread should be holding the CMS_token.
5857 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5858 "Potential interference with concurrently "
5859 "executing VM thread");
5860 }
5861 }
5862 }
5863 #endif
5865 void CMSCollector::sweep(bool asynch) {
5866 assert(_collectorState == Sweeping, "just checking");
5867 check_correct_thread_executing();
5868 verify_work_stacks_empty();
5869 verify_overflow_empty();
5870 incrementSweepCount();
5871 _sweep_timer.stop();
5872 _sweep_estimate.sample(_sweep_timer.seconds());
5873 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5875 // PermGen verification support: If perm gen sweeping is disabled in
5876 // this cycle, we preserve the perm gen object "deadness" information
5877 // in the perm_gen_verify_bit_map. In order to do that we traverse
5878 // all blocks in perm gen and mark all dead objects.
5879 if (verifying() && !should_unload_classes()) {
5880 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5881 "Should have already been allocated");
5882 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5883 markBitMap(), perm_gen_verify_bit_map());
5884 if (asynch) {
5885 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5886 bitMapLock());
5887 _permGen->cmsSpace()->blk_iterate(&mdo);
5888 } else {
5889 // In the case of synchronous sweep, we already have
5890 // the requisite locks/tokens.
5891 _permGen->cmsSpace()->blk_iterate(&mdo);
5892 }
5893 }
5895 if (asynch) {
5896 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5897 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5898 // First sweep the old gen then the perm gen
5899 {
5900 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5901 bitMapLock());
5902 sweepWork(_cmsGen, asynch);
5903 }
5905 // Now repeat for perm gen
5906 if (should_unload_classes()) {
5907 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5908 bitMapLock());
5909 sweepWork(_permGen, asynch);
5910 }
5912 // Update Universe::_heap_*_at_gc figures.
5913 // We need all the free list locks to make the abstract state
5914 // transition from Sweeping to Resetting. See detailed note
5915 // further below.
5916 {
5917 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5918 _permGen->freelistLock());
5919 // Update heap occupancy information which is used as
5920 // input to soft ref clearing policy at the next gc.
5921 Universe::update_heap_info_at_gc();
5922 _collectorState = Resizing;
5923 }
5924 } else {
5925 // already have needed locks
5926 sweepWork(_cmsGen, asynch);
5928 if (should_unload_classes()) {
5929 sweepWork(_permGen, asynch);
5930 }
5931 // Update heap occupancy information which is used as
5932 // input to soft ref clearing policy at the next gc.
5933 Universe::update_heap_info_at_gc();
5934 _collectorState = Resizing;
5935 }
5936 verify_work_stacks_empty();
5937 verify_overflow_empty();
5939 _sweep_timer.reset();
5940 _sweep_timer.start();
5942 update_time_of_last_gc(os::javaTimeMillis());
5944 // NOTE on abstract state transitions:
5945 // Mutators allocate-live and/or mark the mod-union table dirty
5946 // based on the state of the collection. The former is done in
5947 // the interval [Marking, Sweeping] and the latter in the interval
5948 // [Marking, Sweeping). Thus the transitions into the Marking state
5949 // and out of the Sweeping state must be synchronously visible
5950 // globally to the mutators.
5951 // The transition into the Marking state happens with the world
5952 // stopped so the mutators will globally see it. Sweeping is
5953 // done asynchronously by the background collector so the transition
5954 // from the Sweeping state to the Resizing state must be done
5955 // under the freelistLock (as is the check for whether to
5956 // allocate-live and whether to dirty the mod-union table).
5957 assert(_collectorState == Resizing, "Change of collector state to"
5958 " Resizing must be done under the freelistLocks (plural)");
5960 // Now that sweeping has been completed, if the GCH's
5961 // incremental_collection_will_fail flag is set, clear it,
5962 // thus inviting a younger gen collection to promote into
5963 // this generation. If such a promotion may still fail,
5964 // the flag will be set again when a young collection is
5965 // attempted.
5966 // I think the incremental_collection_will_fail flag's use
5967 // is specific to a 2 generation collection policy, so i'll
5968 // assert that that's the configuration we are operating within.
5969 // The use of the flag can and should be generalized appropriately
5970 // in the future to deal with a general n-generation system.
5972 GenCollectedHeap* gch = GenCollectedHeap::heap();
5973 assert(gch->collector_policy()->is_two_generation_policy(),
5974 "Resetting of incremental_collection_will_fail flag"
5975 " may be incorrect otherwise");
5976 gch->clear_incremental_collection_will_fail();
5977 gch->update_full_collections_completed(_collection_count_start);
5978 }
5980 // FIX ME!!! Looks like this belongs in CFLSpace, with
5981 // CMSGen merely delegating to it.
5982 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5983 double nearLargestPercent = 0.999;
5984 HeapWord* minAddr = _cmsSpace->bottom();
5985 HeapWord* largestAddr =
5986 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5987 if (largestAddr == 0) {
5988 // The dictionary appears to be empty. In this case
5989 // try to coalesce at the end of the heap.
5990 largestAddr = _cmsSpace->end();
5991 }
5992 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5993 size_t nearLargestOffset =
5994 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5995 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5996 }
5998 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5999 return addr >= _cmsSpace->nearLargestChunk();
6000 }
6002 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6003 return _cmsSpace->find_chunk_at_end();
6004 }
6006 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6007 bool full) {
6008 // The next lower level has been collected. Gather any statistics
6009 // that are of interest at this point.
6010 if (!full && (current_level + 1) == level()) {
6011 // Gather statistics on the young generation collection.
6012 collector()->stats().record_gc0_end(used());
6013 }
6014 }
6016 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6017 GenCollectedHeap* gch = GenCollectedHeap::heap();
6018 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6019 "Wrong type of heap");
6020 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6021 gch->gen_policy()->size_policy();
6022 assert(sp->is_gc_cms_adaptive_size_policy(),
6023 "Wrong type of size policy");
6024 return sp;
6025 }
6027 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6028 if (PrintGCDetails && Verbose) {
6029 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6030 }
6031 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6032 _debug_collection_type =
6033 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6034 if (PrintGCDetails && Verbose) {
6035 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6036 }
6037 }
6039 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6040 bool asynch) {
6041 // We iterate over the space(s) underlying this generation,
6042 // checking the mark bit map to see if the bits corresponding
6043 // to specific blocks are marked or not. Blocks that are
6044 // marked are live and are not swept up. All remaining blocks
6045 // are swept up, with coalescing on-the-fly as we sweep up
6046 // contiguous free and/or garbage blocks:
6047 // We need to ensure that the sweeper synchronizes with allocators
6048 // and stop-the-world collectors. In particular, the following
6049 // locks are used:
6050 // . CMS token: if this is held, a stop the world collection cannot occur
6051 // . freelistLock: if this is held no allocation can occur from this
6052 // generation by another thread
6053 // . bitMapLock: if this is held, no other thread can access or update
6054 //
6056 // Note that we need to hold the freelistLock if we use
6057 // block iterate below; else the iterator might go awry if
6058 // a mutator (or promotion) causes block contents to change
6059 // (for instance if the allocator divvies up a block).
6060 // If we hold the free list lock, for all practical purposes
6061 // young generation GC's can't occur (they'll usually need to
6062 // promote), so we might as well prevent all young generation
6063 // GC's while we do a sweeping step. For the same reason, we might
6064 // as well take the bit map lock for the entire duration
6066 // check that we hold the requisite locks
6067 assert(have_cms_token(), "Should hold cms token");
6068 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6069 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6070 "Should possess CMS token to sweep");
6071 assert_lock_strong(gen->freelistLock());
6072 assert_lock_strong(bitMapLock());
6074 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
6075 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
6076 _sweep_estimate.padded_average());
6077 gen->setNearLargestChunk();
6079 {
6080 SweepClosure sweepClosure(this, gen, &_markBitMap,
6081 CMSYield && asynch);
6082 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6083 // We need to free-up/coalesce garbage/blocks from a
6084 // co-terminal free run. This is done in the SweepClosure
6085 // destructor; so, do not remove this scope, else the
6086 // end-of-sweep-census below will be off by a little bit.
6087 }
6088 gen->cmsSpace()->sweep_completed();
6089 gen->cmsSpace()->endSweepFLCensus(sweepCount());
6090 if (should_unload_classes()) { // unloaded classes this cycle,
6091 _concurrent_cycles_since_last_unload = 0; // ... reset count
6092 } else { // did not unload classes,
6093 _concurrent_cycles_since_last_unload++; // ... increment count
6094 }
6095 }
6097 // Reset CMS data structures (for now just the marking bit map)
6098 // preparatory for the next cycle.
6099 void CMSCollector::reset(bool asynch) {
6100 GenCollectedHeap* gch = GenCollectedHeap::heap();
6101 CMSAdaptiveSizePolicy* sp = size_policy();
6102 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6103 if (asynch) {
6104 CMSTokenSyncWithLocks ts(true, bitMapLock());
6106 // If the state is not "Resetting", the foreground thread
6107 // has done a collection and the resetting.
6108 if (_collectorState != Resetting) {
6109 assert(_collectorState == Idling, "The state should only change"
6110 " because the foreground collector has finished the collection");
6111 return;
6112 }
6114 // Clear the mark bitmap (no grey objects to start with)
6115 // for the next cycle.
6116 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6117 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6119 HeapWord* curAddr = _markBitMap.startWord();
6120 while (curAddr < _markBitMap.endWord()) {
6121 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6122 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6123 _markBitMap.clear_large_range(chunk);
6124 if (ConcurrentMarkSweepThread::should_yield() &&
6125 !foregroundGCIsActive() &&
6126 CMSYield) {
6127 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6128 "CMS thread should hold CMS token");
6129 assert_lock_strong(bitMapLock());
6130 bitMapLock()->unlock();
6131 ConcurrentMarkSweepThread::desynchronize(true);
6132 ConcurrentMarkSweepThread::acknowledge_yield_request();
6133 stopTimer();
6134 if (PrintCMSStatistics != 0) {
6135 incrementYields();
6136 }
6137 icms_wait();
6139 // See the comment in coordinator_yield()
6140 for (unsigned i = 0; i < CMSYieldSleepCount &&
6141 ConcurrentMarkSweepThread::should_yield() &&
6142 !CMSCollector::foregroundGCIsActive(); ++i) {
6143 os::sleep(Thread::current(), 1, false);
6144 ConcurrentMarkSweepThread::acknowledge_yield_request();
6145 }
6147 ConcurrentMarkSweepThread::synchronize(true);
6148 bitMapLock()->lock_without_safepoint_check();
6149 startTimer();
6150 }
6151 curAddr = chunk.end();
6152 }
6153 _collectorState = Idling;
6154 } else {
6155 // already have the lock
6156 assert(_collectorState == Resetting, "just checking");
6157 assert_lock_strong(bitMapLock());
6158 _markBitMap.clear_all();
6159 _collectorState = Idling;
6160 }
6162 // Stop incremental mode after a cycle completes, so that any future cycles
6163 // are triggered by allocation.
6164 stop_icms();
6166 NOT_PRODUCT(
6167 if (RotateCMSCollectionTypes) {
6168 _cmsGen->rotate_debug_collection_type();
6169 }
6170 )
6171 }
6173 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6174 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6175 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6176 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6177 TraceCollectorStats tcs(counters());
6179 switch (op) {
6180 case CMS_op_checkpointRootsInitial: {
6181 checkpointRootsInitial(true); // asynch
6182 if (PrintGC) {
6183 _cmsGen->printOccupancy("initial-mark");
6184 }
6185 break;
6186 }
6187 case CMS_op_checkpointRootsFinal: {
6188 checkpointRootsFinal(true, // asynch
6189 false, // !clear_all_soft_refs
6190 false); // !init_mark_was_synchronous
6191 if (PrintGC) {
6192 _cmsGen->printOccupancy("remark");
6193 }
6194 break;
6195 }
6196 default:
6197 fatal("No such CMS_op");
6198 }
6199 }
6201 #ifndef PRODUCT
6202 size_t const CMSCollector::skip_header_HeapWords() {
6203 return FreeChunk::header_size();
6204 }
6206 // Try and collect here conditions that should hold when
6207 // CMS thread is exiting. The idea is that the foreground GC
6208 // thread should not be blocked if it wants to terminate
6209 // the CMS thread and yet continue to run the VM for a while
6210 // after that.
6211 void CMSCollector::verify_ok_to_terminate() const {
6212 assert(Thread::current()->is_ConcurrentGC_thread(),
6213 "should be called by CMS thread");
6214 assert(!_foregroundGCShouldWait, "should be false");
6215 // We could check here that all the various low-level locks
6216 // are not held by the CMS thread, but that is overkill; see
6217 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6218 // is checked.
6219 }
6220 #endif
6222 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6223 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6224 "missing Printezis mark?");
6225 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6226 size_t size = pointer_delta(nextOneAddr + 1, addr);
6227 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6228 "alignment problem");
6229 assert(size >= 3, "Necessary for Printezis marks to work");
6230 return size;
6231 }
6233 // A variant of the above (block_size_using_printezis_bits()) except
6234 // that we return 0 if the P-bits are not yet set.
6235 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6236 if (_markBitMap.isMarked(addr)) {
6237 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6238 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6239 size_t size = pointer_delta(nextOneAddr + 1, addr);
6240 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6241 "alignment problem");
6242 assert(size >= 3, "Necessary for Printezis marks to work");
6243 return size;
6244 } else {
6245 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6246 return 0;
6247 }
6248 }
6250 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6251 size_t sz = 0;
6252 oop p = (oop)addr;
6253 if (p->klass_or_null() != NULL && p->is_parsable()) {
6254 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6255 } else {
6256 sz = block_size_using_printezis_bits(addr);
6257 }
6258 assert(sz > 0, "size must be nonzero");
6259 HeapWord* next_block = addr + sz;
6260 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6261 CardTableModRefBS::card_size);
6262 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6263 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6264 "must be different cards");
6265 return next_card;
6266 }
6269 // CMS Bit Map Wrapper /////////////////////////////////////////
6271 // Construct a CMS bit map infrastructure, but don't create the
6272 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6273 // further below.
6274 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6275 _bm(),
6276 _shifter(shifter),
6277 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6278 {
6279 _bmStartWord = 0;
6280 _bmWordSize = 0;
6281 }
6283 bool CMSBitMap::allocate(MemRegion mr) {
6284 _bmStartWord = mr.start();
6285 _bmWordSize = mr.word_size();
6286 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6287 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6288 if (!brs.is_reserved()) {
6289 warning("CMS bit map allocation failure");
6290 return false;
6291 }
6292 // For now we'll just commit all of the bit map up fromt.
6293 // Later on we'll try to be more parsimonious with swap.
6294 if (!_virtual_space.initialize(brs, brs.size())) {
6295 warning("CMS bit map backing store failure");
6296 return false;
6297 }
6298 assert(_virtual_space.committed_size() == brs.size(),
6299 "didn't reserve backing store for all of CMS bit map?");
6300 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6301 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6302 _bmWordSize, "inconsistency in bit map sizing");
6303 _bm.set_size(_bmWordSize >> _shifter);
6305 // bm.clear(); // can we rely on getting zero'd memory? verify below
6306 assert(isAllClear(),
6307 "Expected zero'd memory from ReservedSpace constructor");
6308 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6309 "consistency check");
6310 return true;
6311 }
6313 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6314 HeapWord *next_addr, *end_addr, *last_addr;
6315 assert_locked();
6316 assert(covers(mr), "out-of-range error");
6317 // XXX assert that start and end are appropriately aligned
6318 for (next_addr = mr.start(), end_addr = mr.end();
6319 next_addr < end_addr; next_addr = last_addr) {
6320 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6321 last_addr = dirty_region.end();
6322 if (!dirty_region.is_empty()) {
6323 cl->do_MemRegion(dirty_region);
6324 } else {
6325 assert(last_addr == end_addr, "program logic");
6326 return;
6327 }
6328 }
6329 }
6331 #ifndef PRODUCT
6332 void CMSBitMap::assert_locked() const {
6333 CMSLockVerifier::assert_locked(lock());
6334 }
6336 bool CMSBitMap::covers(MemRegion mr) const {
6337 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6338 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6339 "size inconsistency");
6340 return (mr.start() >= _bmStartWord) &&
6341 (mr.end() <= endWord());
6342 }
6344 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6345 return (start >= _bmStartWord && (start + size) <= endWord());
6346 }
6348 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6349 // verify that there are no 1 bits in the interval [left, right)
6350 FalseBitMapClosure falseBitMapClosure;
6351 iterate(&falseBitMapClosure, left, right);
6352 }
6354 void CMSBitMap::region_invariant(MemRegion mr)
6355 {
6356 assert_locked();
6357 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6358 assert(!mr.is_empty(), "unexpected empty region");
6359 assert(covers(mr), "mr should be covered by bit map");
6360 // convert address range into offset range
6361 size_t start_ofs = heapWordToOffset(mr.start());
6362 // Make sure that end() is appropriately aligned
6363 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6364 (1 << (_shifter+LogHeapWordSize))),
6365 "Misaligned mr.end()");
6366 size_t end_ofs = heapWordToOffset(mr.end());
6367 assert(end_ofs > start_ofs, "Should mark at least one bit");
6368 }
6370 #endif
6372 bool CMSMarkStack::allocate(size_t size) {
6373 // allocate a stack of the requisite depth
6374 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6375 size * sizeof(oop)));
6376 if (!rs.is_reserved()) {
6377 warning("CMSMarkStack allocation failure");
6378 return false;
6379 }
6380 if (!_virtual_space.initialize(rs, rs.size())) {
6381 warning("CMSMarkStack backing store failure");
6382 return false;
6383 }
6384 assert(_virtual_space.committed_size() == rs.size(),
6385 "didn't reserve backing store for all of CMS stack?");
6386 _base = (oop*)(_virtual_space.low());
6387 _index = 0;
6388 _capacity = size;
6389 NOT_PRODUCT(_max_depth = 0);
6390 return true;
6391 }
6393 // XXX FIX ME !!! In the MT case we come in here holding a
6394 // leaf lock. For printing we need to take a further lock
6395 // which has lower rank. We need to recallibrate the two
6396 // lock-ranks involved in order to be able to rpint the
6397 // messages below. (Or defer the printing to the caller.
6398 // For now we take the expedient path of just disabling the
6399 // messages for the problematic case.)
6400 void CMSMarkStack::expand() {
6401 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6402 if (_capacity == CMSMarkStackSizeMax) {
6403 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6404 // We print a warning message only once per CMS cycle.
6405 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6406 }
6407 return;
6408 }
6409 // Double capacity if possible
6410 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6411 // Do not give up existing stack until we have managed to
6412 // get the double capacity that we desired.
6413 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6414 new_capacity * sizeof(oop)));
6415 if (rs.is_reserved()) {
6416 // Release the backing store associated with old stack
6417 _virtual_space.release();
6418 // Reinitialize virtual space for new stack
6419 if (!_virtual_space.initialize(rs, rs.size())) {
6420 fatal("Not enough swap for expanded marking stack");
6421 }
6422 _base = (oop*)(_virtual_space.low());
6423 _index = 0;
6424 _capacity = new_capacity;
6425 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6426 // Failed to double capacity, continue;
6427 // we print a detail message only once per CMS cycle.
6428 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6429 SIZE_FORMAT"K",
6430 _capacity / K, new_capacity / K);
6431 }
6432 }
6435 // Closures
6436 // XXX: there seems to be a lot of code duplication here;
6437 // should refactor and consolidate common code.
6439 // This closure is used to mark refs into the CMS generation in
6440 // the CMS bit map. Called at the first checkpoint. This closure
6441 // assumes that we do not need to re-mark dirty cards; if the CMS
6442 // generation on which this is used is not an oldest (modulo perm gen)
6443 // generation then this will lose younger_gen cards!
6445 MarkRefsIntoClosure::MarkRefsIntoClosure(
6446 MemRegion span, CMSBitMap* bitMap):
6447 _span(span),
6448 _bitMap(bitMap)
6449 {
6450 assert(_ref_processor == NULL, "deliberately left NULL");
6451 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6452 }
6454 void MarkRefsIntoClosure::do_oop(oop obj) {
6455 // if p points into _span, then mark corresponding bit in _markBitMap
6456 assert(obj->is_oop(), "expected an oop");
6457 HeapWord* addr = (HeapWord*)obj;
6458 if (_span.contains(addr)) {
6459 // this should be made more efficient
6460 _bitMap->mark(addr);
6461 }
6462 }
6464 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6465 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6467 // A variant of the above, used for CMS marking verification.
6468 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6469 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6470 _span(span),
6471 _verification_bm(verification_bm),
6472 _cms_bm(cms_bm)
6473 {
6474 assert(_ref_processor == NULL, "deliberately left NULL");
6475 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6476 }
6478 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6479 // if p points into _span, then mark corresponding bit in _markBitMap
6480 assert(obj->is_oop(), "expected an oop");
6481 HeapWord* addr = (HeapWord*)obj;
6482 if (_span.contains(addr)) {
6483 _verification_bm->mark(addr);
6484 if (!_cms_bm->isMarked(addr)) {
6485 oop(addr)->print();
6486 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6487 fatal("... aborting");
6488 }
6489 }
6490 }
6492 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6493 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6495 //////////////////////////////////////////////////
6496 // MarkRefsIntoAndScanClosure
6497 //////////////////////////////////////////////////
6499 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6500 ReferenceProcessor* rp,
6501 CMSBitMap* bit_map,
6502 CMSBitMap* mod_union_table,
6503 CMSMarkStack* mark_stack,
6504 CMSMarkStack* revisit_stack,
6505 CMSCollector* collector,
6506 bool should_yield,
6507 bool concurrent_precleaning):
6508 _collector(collector),
6509 _span(span),
6510 _bit_map(bit_map),
6511 _mark_stack(mark_stack),
6512 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6513 mark_stack, revisit_stack, concurrent_precleaning),
6514 _yield(should_yield),
6515 _concurrent_precleaning(concurrent_precleaning),
6516 _freelistLock(NULL)
6517 {
6518 _ref_processor = rp;
6519 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6520 }
6522 // This closure is used to mark refs into the CMS generation at the
6523 // second (final) checkpoint, and to scan and transitively follow
6524 // the unmarked oops. It is also used during the concurrent precleaning
6525 // phase while scanning objects on dirty cards in the CMS generation.
6526 // The marks are made in the marking bit map and the marking stack is
6527 // used for keeping the (newly) grey objects during the scan.
6528 // The parallel version (Par_...) appears further below.
6529 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6530 if (obj != NULL) {
6531 assert(obj->is_oop(), "expected an oop");
6532 HeapWord* addr = (HeapWord*)obj;
6533 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6534 assert(_collector->overflow_list_is_empty(),
6535 "overflow list should be empty");
6536 if (_span.contains(addr) &&
6537 !_bit_map->isMarked(addr)) {
6538 // mark bit map (object is now grey)
6539 _bit_map->mark(addr);
6540 // push on marking stack (stack should be empty), and drain the
6541 // stack by applying this closure to the oops in the oops popped
6542 // from the stack (i.e. blacken the grey objects)
6543 bool res = _mark_stack->push(obj);
6544 assert(res, "Should have space to push on empty stack");
6545 do {
6546 oop new_oop = _mark_stack->pop();
6547 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6548 assert(new_oop->is_parsable(), "Found unparsable oop");
6549 assert(_bit_map->isMarked((HeapWord*)new_oop),
6550 "only grey objects on this stack");
6551 // iterate over the oops in this oop, marking and pushing
6552 // the ones in CMS heap (i.e. in _span).
6553 new_oop->oop_iterate(&_pushAndMarkClosure);
6554 // check if it's time to yield
6555 do_yield_check();
6556 } while (!_mark_stack->isEmpty() ||
6557 (!_concurrent_precleaning && take_from_overflow_list()));
6558 // if marking stack is empty, and we are not doing this
6559 // during precleaning, then check the overflow list
6560 }
6561 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6562 assert(_collector->overflow_list_is_empty(),
6563 "overflow list was drained above");
6564 // We could restore evacuated mark words, if any, used for
6565 // overflow list links here because the overflow list is
6566 // provably empty here. That would reduce the maximum
6567 // size requirements for preserved_{oop,mark}_stack.
6568 // But we'll just postpone it until we are all done
6569 // so we can just stream through.
6570 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6571 _collector->restore_preserved_marks_if_any();
6572 assert(_collector->no_preserved_marks(), "No preserved marks");
6573 }
6574 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6575 "All preserved marks should have been restored above");
6576 }
6577 }
6579 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6580 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6582 void MarkRefsIntoAndScanClosure::do_yield_work() {
6583 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6584 "CMS thread should hold CMS token");
6585 assert_lock_strong(_freelistLock);
6586 assert_lock_strong(_bit_map->lock());
6587 // relinquish the free_list_lock and bitMaplock()
6588 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6589 _bit_map->lock()->unlock();
6590 _freelistLock->unlock();
6591 ConcurrentMarkSweepThread::desynchronize(true);
6592 ConcurrentMarkSweepThread::acknowledge_yield_request();
6593 _collector->stopTimer();
6594 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6595 if (PrintCMSStatistics != 0) {
6596 _collector->incrementYields();
6597 }
6598 _collector->icms_wait();
6600 // See the comment in coordinator_yield()
6601 for (unsigned i = 0;
6602 i < CMSYieldSleepCount &&
6603 ConcurrentMarkSweepThread::should_yield() &&
6604 !CMSCollector::foregroundGCIsActive();
6605 ++i) {
6606 os::sleep(Thread::current(), 1, false);
6607 ConcurrentMarkSweepThread::acknowledge_yield_request();
6608 }
6610 ConcurrentMarkSweepThread::synchronize(true);
6611 _freelistLock->lock_without_safepoint_check();
6612 _bit_map->lock()->lock_without_safepoint_check();
6613 _collector->startTimer();
6614 }
6616 ///////////////////////////////////////////////////////////
6617 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6618 // MarkRefsIntoAndScanClosure
6619 ///////////////////////////////////////////////////////////
6620 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6621 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6622 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6623 _span(span),
6624 _bit_map(bit_map),
6625 _work_queue(work_queue),
6626 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6627 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6628 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6629 revisit_stack)
6630 {
6631 _ref_processor = rp;
6632 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6633 }
6635 // This closure is used to mark refs into the CMS generation at the
6636 // second (final) checkpoint, and to scan and transitively follow
6637 // the unmarked oops. The marks are made in the marking bit map and
6638 // the work_queue is used for keeping the (newly) grey objects during
6639 // the scan phase whence they are also available for stealing by parallel
6640 // threads. Since the marking bit map is shared, updates are
6641 // synchronized (via CAS).
6642 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6643 if (obj != NULL) {
6644 // Ignore mark word because this could be an already marked oop
6645 // that may be chained at the end of the overflow list.
6646 assert(obj->is_oop(true), "expected an oop");
6647 HeapWord* addr = (HeapWord*)obj;
6648 if (_span.contains(addr) &&
6649 !_bit_map->isMarked(addr)) {
6650 // mark bit map (object will become grey):
6651 // It is possible for several threads to be
6652 // trying to "claim" this object concurrently;
6653 // the unique thread that succeeds in marking the
6654 // object first will do the subsequent push on
6655 // to the work queue (or overflow list).
6656 if (_bit_map->par_mark(addr)) {
6657 // push on work_queue (which may not be empty), and trim the
6658 // queue to an appropriate length by applying this closure to
6659 // the oops in the oops popped from the stack (i.e. blacken the
6660 // grey objects)
6661 bool res = _work_queue->push(obj);
6662 assert(res, "Low water mark should be less than capacity?");
6663 trim_queue(_low_water_mark);
6664 } // Else, another thread claimed the object
6665 }
6666 }
6667 }
6669 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6670 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6672 // This closure is used to rescan the marked objects on the dirty cards
6673 // in the mod union table and the card table proper.
6674 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6675 oop p, MemRegion mr) {
6677 size_t size = 0;
6678 HeapWord* addr = (HeapWord*)p;
6679 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6680 assert(_span.contains(addr), "we are scanning the CMS generation");
6681 // check if it's time to yield
6682 if (do_yield_check()) {
6683 // We yielded for some foreground stop-world work,
6684 // and we have been asked to abort this ongoing preclean cycle.
6685 return 0;
6686 }
6687 if (_bitMap->isMarked(addr)) {
6688 // it's marked; is it potentially uninitialized?
6689 if (p->klass_or_null() != NULL) {
6690 // If is_conc_safe is false, the object may be undergoing
6691 // change by the VM outside a safepoint. Don't try to
6692 // scan it, but rather leave it for the remark phase.
6693 if (CMSPermGenPrecleaningEnabled &&
6694 (!p->is_conc_safe() || !p->is_parsable())) {
6695 // Signal precleaning to redirty the card since
6696 // the klass pointer is already installed.
6697 assert(size == 0, "Initial value");
6698 } else {
6699 assert(p->is_parsable(), "must be parsable.");
6700 // an initialized object; ignore mark word in verification below
6701 // since we are running concurrent with mutators
6702 assert(p->is_oop(true), "should be an oop");
6703 if (p->is_objArray()) {
6704 // objArrays are precisely marked; restrict scanning
6705 // to dirty cards only.
6706 size = CompactibleFreeListSpace::adjustObjectSize(
6707 p->oop_iterate(_scanningClosure, mr));
6708 } else {
6709 // A non-array may have been imprecisely marked; we need
6710 // to scan object in its entirety.
6711 size = CompactibleFreeListSpace::adjustObjectSize(
6712 p->oop_iterate(_scanningClosure));
6713 }
6714 #ifdef DEBUG
6715 size_t direct_size =
6716 CompactibleFreeListSpace::adjustObjectSize(p->size());
6717 assert(size == direct_size, "Inconsistency in size");
6718 assert(size >= 3, "Necessary for Printezis marks to work");
6719 if (!_bitMap->isMarked(addr+1)) {
6720 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6721 } else {
6722 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6723 assert(_bitMap->isMarked(addr+size-1),
6724 "inconsistent Printezis mark");
6725 }
6726 #endif // DEBUG
6727 }
6728 } else {
6729 // an unitialized object
6730 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6731 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6732 size = pointer_delta(nextOneAddr + 1, addr);
6733 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6734 "alignment problem");
6735 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6736 // will dirty the card when the klass pointer is installed in the
6737 // object (signalling the completion of initialization).
6738 }
6739 } else {
6740 // Either a not yet marked object or an uninitialized object
6741 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6742 // An uninitialized object, skip to the next card, since
6743 // we may not be able to read its P-bits yet.
6744 assert(size == 0, "Initial value");
6745 } else {
6746 // An object not (yet) reached by marking: we merely need to
6747 // compute its size so as to go look at the next block.
6748 assert(p->is_oop(true), "should be an oop");
6749 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6750 }
6751 }
6752 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6753 return size;
6754 }
6756 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6757 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6758 "CMS thread should hold CMS token");
6759 assert_lock_strong(_freelistLock);
6760 assert_lock_strong(_bitMap->lock());
6761 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6762 // relinquish the free_list_lock and bitMaplock()
6763 _bitMap->lock()->unlock();
6764 _freelistLock->unlock();
6765 ConcurrentMarkSweepThread::desynchronize(true);
6766 ConcurrentMarkSweepThread::acknowledge_yield_request();
6767 _collector->stopTimer();
6768 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6769 if (PrintCMSStatistics != 0) {
6770 _collector->incrementYields();
6771 }
6772 _collector->icms_wait();
6774 // See the comment in coordinator_yield()
6775 for (unsigned i = 0; i < CMSYieldSleepCount &&
6776 ConcurrentMarkSweepThread::should_yield() &&
6777 !CMSCollector::foregroundGCIsActive(); ++i) {
6778 os::sleep(Thread::current(), 1, false);
6779 ConcurrentMarkSweepThread::acknowledge_yield_request();
6780 }
6782 ConcurrentMarkSweepThread::synchronize(true);
6783 _freelistLock->lock_without_safepoint_check();
6784 _bitMap->lock()->lock_without_safepoint_check();
6785 _collector->startTimer();
6786 }
6789 //////////////////////////////////////////////////////////////////
6790 // SurvivorSpacePrecleanClosure
6791 //////////////////////////////////////////////////////////////////
6792 // This (single-threaded) closure is used to preclean the oops in
6793 // the survivor spaces.
6794 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6796 HeapWord* addr = (HeapWord*)p;
6797 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6798 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6799 assert(p->klass_or_null() != NULL, "object should be initializd");
6800 assert(p->is_parsable(), "must be parsable.");
6801 // an initialized object; ignore mark word in verification below
6802 // since we are running concurrent with mutators
6803 assert(p->is_oop(true), "should be an oop");
6804 // Note that we do not yield while we iterate over
6805 // the interior oops of p, pushing the relevant ones
6806 // on our marking stack.
6807 size_t size = p->oop_iterate(_scanning_closure);
6808 do_yield_check();
6809 // Observe that below, we do not abandon the preclean
6810 // phase as soon as we should; rather we empty the
6811 // marking stack before returning. This is to satisfy
6812 // some existing assertions. In general, it may be a
6813 // good idea to abort immediately and complete the marking
6814 // from the grey objects at a later time.
6815 while (!_mark_stack->isEmpty()) {
6816 oop new_oop = _mark_stack->pop();
6817 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6818 assert(new_oop->is_parsable(), "Found unparsable oop");
6819 assert(_bit_map->isMarked((HeapWord*)new_oop),
6820 "only grey objects on this stack");
6821 // iterate over the oops in this oop, marking and pushing
6822 // the ones in CMS heap (i.e. in _span).
6823 new_oop->oop_iterate(_scanning_closure);
6824 // check if it's time to yield
6825 do_yield_check();
6826 }
6827 unsigned int after_count =
6828 GenCollectedHeap::heap()->total_collections();
6829 bool abort = (_before_count != after_count) ||
6830 _collector->should_abort_preclean();
6831 return abort ? 0 : size;
6832 }
6834 void SurvivorSpacePrecleanClosure::do_yield_work() {
6835 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6836 "CMS thread should hold CMS token");
6837 assert_lock_strong(_bit_map->lock());
6838 DEBUG_ONLY(RememberKlassesChecker smx(false);)
6839 // Relinquish the bit map lock
6840 _bit_map->lock()->unlock();
6841 ConcurrentMarkSweepThread::desynchronize(true);
6842 ConcurrentMarkSweepThread::acknowledge_yield_request();
6843 _collector->stopTimer();
6844 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6845 if (PrintCMSStatistics != 0) {
6846 _collector->incrementYields();
6847 }
6848 _collector->icms_wait();
6850 // See the comment in coordinator_yield()
6851 for (unsigned i = 0; i < CMSYieldSleepCount &&
6852 ConcurrentMarkSweepThread::should_yield() &&
6853 !CMSCollector::foregroundGCIsActive(); ++i) {
6854 os::sleep(Thread::current(), 1, false);
6855 ConcurrentMarkSweepThread::acknowledge_yield_request();
6856 }
6858 ConcurrentMarkSweepThread::synchronize(true);
6859 _bit_map->lock()->lock_without_safepoint_check();
6860 _collector->startTimer();
6861 }
6863 // This closure is used to rescan the marked objects on the dirty cards
6864 // in the mod union table and the card table proper. In the parallel
6865 // case, although the bitMap is shared, we do a single read so the
6866 // isMarked() query is "safe".
6867 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6868 // Ignore mark word because we are running concurrent with mutators
6869 assert(p->is_oop_or_null(true), "expected an oop or null");
6870 HeapWord* addr = (HeapWord*)p;
6871 assert(_span.contains(addr), "we are scanning the CMS generation");
6872 bool is_obj_array = false;
6873 #ifdef DEBUG
6874 if (!_parallel) {
6875 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6876 assert(_collector->overflow_list_is_empty(),
6877 "overflow list should be empty");
6879 }
6880 #endif // DEBUG
6881 if (_bit_map->isMarked(addr)) {
6882 // Obj arrays are precisely marked, non-arrays are not;
6883 // so we scan objArrays precisely and non-arrays in their
6884 // entirety.
6885 if (p->is_objArray()) {
6886 is_obj_array = true;
6887 if (_parallel) {
6888 p->oop_iterate(_par_scan_closure, mr);
6889 } else {
6890 p->oop_iterate(_scan_closure, mr);
6891 }
6892 } else {
6893 if (_parallel) {
6894 p->oop_iterate(_par_scan_closure);
6895 } else {
6896 p->oop_iterate(_scan_closure);
6897 }
6898 }
6899 }
6900 #ifdef DEBUG
6901 if (!_parallel) {
6902 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6903 assert(_collector->overflow_list_is_empty(),
6904 "overflow list should be empty");
6906 }
6907 #endif // DEBUG
6908 return is_obj_array;
6909 }
6911 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6912 MemRegion span,
6913 CMSBitMap* bitMap, CMSMarkStack* markStack,
6914 CMSMarkStack* revisitStack,
6915 bool should_yield, bool verifying):
6916 _collector(collector),
6917 _span(span),
6918 _bitMap(bitMap),
6919 _mut(&collector->_modUnionTable),
6920 _markStack(markStack),
6921 _revisitStack(revisitStack),
6922 _yield(should_yield),
6923 _skipBits(0)
6924 {
6925 assert(_markStack->isEmpty(), "stack should be empty");
6926 _finger = _bitMap->startWord();
6927 _threshold = _finger;
6928 assert(_collector->_restart_addr == NULL, "Sanity check");
6929 assert(_span.contains(_finger), "Out of bounds _finger?");
6930 DEBUG_ONLY(_verifying = verifying;)
6931 }
6933 void MarkFromRootsClosure::reset(HeapWord* addr) {
6934 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6935 assert(_span.contains(addr), "Out of bounds _finger?");
6936 _finger = addr;
6937 _threshold = (HeapWord*)round_to(
6938 (intptr_t)_finger, CardTableModRefBS::card_size);
6939 }
6941 // Should revisit to see if this should be restructured for
6942 // greater efficiency.
6943 bool MarkFromRootsClosure::do_bit(size_t offset) {
6944 if (_skipBits > 0) {
6945 _skipBits--;
6946 return true;
6947 }
6948 // convert offset into a HeapWord*
6949 HeapWord* addr = _bitMap->startWord() + offset;
6950 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6951 "address out of range");
6952 assert(_bitMap->isMarked(addr), "tautology");
6953 if (_bitMap->isMarked(addr+1)) {
6954 // this is an allocated but not yet initialized object
6955 assert(_skipBits == 0, "tautology");
6956 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6957 oop p = oop(addr);
6958 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6959 DEBUG_ONLY(if (!_verifying) {)
6960 // We re-dirty the cards on which this object lies and increase
6961 // the _threshold so that we'll come back to scan this object
6962 // during the preclean or remark phase. (CMSCleanOnEnter)
6963 if (CMSCleanOnEnter) {
6964 size_t sz = _collector->block_size_using_printezis_bits(addr);
6965 HeapWord* end_card_addr = (HeapWord*)round_to(
6966 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6967 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6968 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6969 // Bump _threshold to end_card_addr; note that
6970 // _threshold cannot possibly exceed end_card_addr, anyhow.
6971 // This prevents future clearing of the card as the scan proceeds
6972 // to the right.
6973 assert(_threshold <= end_card_addr,
6974 "Because we are just scanning into this object");
6975 if (_threshold < end_card_addr) {
6976 _threshold = end_card_addr;
6977 }
6978 if (p->klass_or_null() != NULL) {
6979 // Redirty the range of cards...
6980 _mut->mark_range(redirty_range);
6981 } // ...else the setting of klass will dirty the card anyway.
6982 }
6983 DEBUG_ONLY(})
6984 return true;
6985 }
6986 }
6987 scanOopsInOop(addr);
6988 return true;
6989 }
6991 // We take a break if we've been at this for a while,
6992 // so as to avoid monopolizing the locks involved.
6993 void MarkFromRootsClosure::do_yield_work() {
6994 // First give up the locks, then yield, then re-lock
6995 // We should probably use a constructor/destructor idiom to
6996 // do this unlock/lock or modify the MutexUnlocker class to
6997 // serve our purpose. XXX
6998 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6999 "CMS thread should hold CMS token");
7000 assert_lock_strong(_bitMap->lock());
7001 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7002 _bitMap->lock()->unlock();
7003 ConcurrentMarkSweepThread::desynchronize(true);
7004 ConcurrentMarkSweepThread::acknowledge_yield_request();
7005 _collector->stopTimer();
7006 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7007 if (PrintCMSStatistics != 0) {
7008 _collector->incrementYields();
7009 }
7010 _collector->icms_wait();
7012 // See the comment in coordinator_yield()
7013 for (unsigned i = 0; i < CMSYieldSleepCount &&
7014 ConcurrentMarkSweepThread::should_yield() &&
7015 !CMSCollector::foregroundGCIsActive(); ++i) {
7016 os::sleep(Thread::current(), 1, false);
7017 ConcurrentMarkSweepThread::acknowledge_yield_request();
7018 }
7020 ConcurrentMarkSweepThread::synchronize(true);
7021 _bitMap->lock()->lock_without_safepoint_check();
7022 _collector->startTimer();
7023 }
7025 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7026 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7027 assert(_markStack->isEmpty(),
7028 "should drain stack to limit stack usage");
7029 // convert ptr to an oop preparatory to scanning
7030 oop obj = oop(ptr);
7031 // Ignore mark word in verification below, since we
7032 // may be running concurrent with mutators.
7033 assert(obj->is_oop(true), "should be an oop");
7034 assert(_finger <= ptr, "_finger runneth ahead");
7035 // advance the finger to right end of this object
7036 _finger = ptr + obj->size();
7037 assert(_finger > ptr, "we just incremented it above");
7038 // On large heaps, it may take us some time to get through
7039 // the marking phase (especially if running iCMS). During
7040 // this time it's possible that a lot of mutations have
7041 // accumulated in the card table and the mod union table --
7042 // these mutation records are redundant until we have
7043 // actually traced into the corresponding card.
7044 // Here, we check whether advancing the finger would make
7045 // us cross into a new card, and if so clear corresponding
7046 // cards in the MUT (preclean them in the card-table in the
7047 // future).
7049 DEBUG_ONLY(if (!_verifying) {)
7050 // The clean-on-enter optimization is disabled by default,
7051 // until we fix 6178663.
7052 if (CMSCleanOnEnter && (_finger > _threshold)) {
7053 // [_threshold, _finger) represents the interval
7054 // of cards to be cleared in MUT (or precleaned in card table).
7055 // The set of cards to be cleared is all those that overlap
7056 // with the interval [_threshold, _finger); note that
7057 // _threshold is always kept card-aligned but _finger isn't
7058 // always card-aligned.
7059 HeapWord* old_threshold = _threshold;
7060 assert(old_threshold == (HeapWord*)round_to(
7061 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7062 "_threshold should always be card-aligned");
7063 _threshold = (HeapWord*)round_to(
7064 (intptr_t)_finger, CardTableModRefBS::card_size);
7065 MemRegion mr(old_threshold, _threshold);
7066 assert(!mr.is_empty(), "Control point invariant");
7067 assert(_span.contains(mr), "Should clear within span");
7068 // XXX When _finger crosses from old gen into perm gen
7069 // we may be doing unnecessary cleaning; do better in the
7070 // future by detecting that condition and clearing fewer
7071 // MUT/CT entries.
7072 _mut->clear_range(mr);
7073 }
7074 DEBUG_ONLY(})
7075 // Note: the finger doesn't advance while we drain
7076 // the stack below.
7077 PushOrMarkClosure pushOrMarkClosure(_collector,
7078 _span, _bitMap, _markStack,
7079 _revisitStack,
7080 _finger, this);
7081 bool res = _markStack->push(obj);
7082 assert(res, "Empty non-zero size stack should have space for single push");
7083 while (!_markStack->isEmpty()) {
7084 oop new_oop = _markStack->pop();
7085 // Skip verifying header mark word below because we are
7086 // running concurrent with mutators.
7087 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7088 // now scan this oop's oops
7089 new_oop->oop_iterate(&pushOrMarkClosure);
7090 do_yield_check();
7091 }
7092 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7093 }
7095 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7096 CMSCollector* collector, MemRegion span,
7097 CMSBitMap* bit_map,
7098 OopTaskQueue* work_queue,
7099 CMSMarkStack* overflow_stack,
7100 CMSMarkStack* revisit_stack,
7101 bool should_yield):
7102 _collector(collector),
7103 _whole_span(collector->_span),
7104 _span(span),
7105 _bit_map(bit_map),
7106 _mut(&collector->_modUnionTable),
7107 _work_queue(work_queue),
7108 _overflow_stack(overflow_stack),
7109 _revisit_stack(revisit_stack),
7110 _yield(should_yield),
7111 _skip_bits(0),
7112 _task(task)
7113 {
7114 assert(_work_queue->size() == 0, "work_queue should be empty");
7115 _finger = span.start();
7116 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7117 assert(_span.contains(_finger), "Out of bounds _finger?");
7118 }
7120 // Should revisit to see if this should be restructured for
7121 // greater efficiency.
7122 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7123 if (_skip_bits > 0) {
7124 _skip_bits--;
7125 return true;
7126 }
7127 // convert offset into a HeapWord*
7128 HeapWord* addr = _bit_map->startWord() + offset;
7129 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7130 "address out of range");
7131 assert(_bit_map->isMarked(addr), "tautology");
7132 if (_bit_map->isMarked(addr+1)) {
7133 // this is an allocated object that might not yet be initialized
7134 assert(_skip_bits == 0, "tautology");
7135 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7136 oop p = oop(addr);
7137 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7138 // in the case of Clean-on-Enter optimization, redirty card
7139 // and avoid clearing card by increasing the threshold.
7140 return true;
7141 }
7142 }
7143 scan_oops_in_oop(addr);
7144 return true;
7145 }
7147 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7148 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7149 // Should we assert that our work queue is empty or
7150 // below some drain limit?
7151 assert(_work_queue->size() == 0,
7152 "should drain stack to limit stack usage");
7153 // convert ptr to an oop preparatory to scanning
7154 oop obj = oop(ptr);
7155 // Ignore mark word in verification below, since we
7156 // may be running concurrent with mutators.
7157 assert(obj->is_oop(true), "should be an oop");
7158 assert(_finger <= ptr, "_finger runneth ahead");
7159 // advance the finger to right end of this object
7160 _finger = ptr + obj->size();
7161 assert(_finger > ptr, "we just incremented it above");
7162 // On large heaps, it may take us some time to get through
7163 // the marking phase (especially if running iCMS). During
7164 // this time it's possible that a lot of mutations have
7165 // accumulated in the card table and the mod union table --
7166 // these mutation records are redundant until we have
7167 // actually traced into the corresponding card.
7168 // Here, we check whether advancing the finger would make
7169 // us cross into a new card, and if so clear corresponding
7170 // cards in the MUT (preclean them in the card-table in the
7171 // future).
7173 // The clean-on-enter optimization is disabled by default,
7174 // until we fix 6178663.
7175 if (CMSCleanOnEnter && (_finger > _threshold)) {
7176 // [_threshold, _finger) represents the interval
7177 // of cards to be cleared in MUT (or precleaned in card table).
7178 // The set of cards to be cleared is all those that overlap
7179 // with the interval [_threshold, _finger); note that
7180 // _threshold is always kept card-aligned but _finger isn't
7181 // always card-aligned.
7182 HeapWord* old_threshold = _threshold;
7183 assert(old_threshold == (HeapWord*)round_to(
7184 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7185 "_threshold should always be card-aligned");
7186 _threshold = (HeapWord*)round_to(
7187 (intptr_t)_finger, CardTableModRefBS::card_size);
7188 MemRegion mr(old_threshold, _threshold);
7189 assert(!mr.is_empty(), "Control point invariant");
7190 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7191 // XXX When _finger crosses from old gen into perm gen
7192 // we may be doing unnecessary cleaning; do better in the
7193 // future by detecting that condition and clearing fewer
7194 // MUT/CT entries.
7195 _mut->clear_range(mr);
7196 }
7198 // Note: the local finger doesn't advance while we drain
7199 // the stack below, but the global finger sure can and will.
7200 HeapWord** gfa = _task->global_finger_addr();
7201 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7202 _span, _bit_map,
7203 _work_queue,
7204 _overflow_stack,
7205 _revisit_stack,
7206 _finger,
7207 gfa, this);
7208 bool res = _work_queue->push(obj); // overflow could occur here
7209 assert(res, "Will hold once we use workqueues");
7210 while (true) {
7211 oop new_oop;
7212 if (!_work_queue->pop_local(new_oop)) {
7213 // We emptied our work_queue; check if there's stuff that can
7214 // be gotten from the overflow stack.
7215 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7216 _overflow_stack, _work_queue)) {
7217 do_yield_check();
7218 continue;
7219 } else { // done
7220 break;
7221 }
7222 }
7223 // Skip verifying header mark word below because we are
7224 // running concurrent with mutators.
7225 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7226 // now scan this oop's oops
7227 new_oop->oop_iterate(&pushOrMarkClosure);
7228 do_yield_check();
7229 }
7230 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7231 }
7233 // Yield in response to a request from VM Thread or
7234 // from mutators.
7235 void Par_MarkFromRootsClosure::do_yield_work() {
7236 assert(_task != NULL, "sanity");
7237 _task->yield();
7238 }
7240 // A variant of the above used for verifying CMS marking work.
7241 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7242 MemRegion span,
7243 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7244 CMSMarkStack* mark_stack):
7245 _collector(collector),
7246 _span(span),
7247 _verification_bm(verification_bm),
7248 _cms_bm(cms_bm),
7249 _mark_stack(mark_stack),
7250 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7251 mark_stack)
7252 {
7253 assert(_mark_stack->isEmpty(), "stack should be empty");
7254 _finger = _verification_bm->startWord();
7255 assert(_collector->_restart_addr == NULL, "Sanity check");
7256 assert(_span.contains(_finger), "Out of bounds _finger?");
7257 }
7259 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7260 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7261 assert(_span.contains(addr), "Out of bounds _finger?");
7262 _finger = addr;
7263 }
7265 // Should revisit to see if this should be restructured for
7266 // greater efficiency.
7267 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7268 // convert offset into a HeapWord*
7269 HeapWord* addr = _verification_bm->startWord() + offset;
7270 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7271 "address out of range");
7272 assert(_verification_bm->isMarked(addr), "tautology");
7273 assert(_cms_bm->isMarked(addr), "tautology");
7275 assert(_mark_stack->isEmpty(),
7276 "should drain stack to limit stack usage");
7277 // convert addr to an oop preparatory to scanning
7278 oop obj = oop(addr);
7279 assert(obj->is_oop(), "should be an oop");
7280 assert(_finger <= addr, "_finger runneth ahead");
7281 // advance the finger to right end of this object
7282 _finger = addr + obj->size();
7283 assert(_finger > addr, "we just incremented it above");
7284 // Note: the finger doesn't advance while we drain
7285 // the stack below.
7286 bool res = _mark_stack->push(obj);
7287 assert(res, "Empty non-zero size stack should have space for single push");
7288 while (!_mark_stack->isEmpty()) {
7289 oop new_oop = _mark_stack->pop();
7290 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7291 // now scan this oop's oops
7292 new_oop->oop_iterate(&_pam_verify_closure);
7293 }
7294 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7295 return true;
7296 }
7298 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7299 CMSCollector* collector, MemRegion span,
7300 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7301 CMSMarkStack* mark_stack):
7302 OopClosure(collector->ref_processor()),
7303 _collector(collector),
7304 _span(span),
7305 _verification_bm(verification_bm),
7306 _cms_bm(cms_bm),
7307 _mark_stack(mark_stack)
7308 { }
7310 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7311 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7313 // Upon stack overflow, we discard (part of) the stack,
7314 // remembering the least address amongst those discarded
7315 // in CMSCollector's _restart_address.
7316 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7317 // Remember the least grey address discarded
7318 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7319 _collector->lower_restart_addr(ra);
7320 _mark_stack->reset(); // discard stack contents
7321 _mark_stack->expand(); // expand the stack if possible
7322 }
7324 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7325 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7326 HeapWord* addr = (HeapWord*)obj;
7327 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7328 // Oop lies in _span and isn't yet grey or black
7329 _verification_bm->mark(addr); // now grey
7330 if (!_cms_bm->isMarked(addr)) {
7331 oop(addr)->print();
7332 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7333 addr);
7334 fatal("... aborting");
7335 }
7337 if (!_mark_stack->push(obj)) { // stack overflow
7338 if (PrintCMSStatistics != 0) {
7339 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7340 SIZE_FORMAT, _mark_stack->capacity());
7341 }
7342 assert(_mark_stack->isFull(), "Else push should have succeeded");
7343 handle_stack_overflow(addr);
7344 }
7345 // anything including and to the right of _finger
7346 // will be scanned as we iterate over the remainder of the
7347 // bit map
7348 }
7349 }
7351 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7352 MemRegion span,
7353 CMSBitMap* bitMap, CMSMarkStack* markStack,
7354 CMSMarkStack* revisitStack,
7355 HeapWord* finger, MarkFromRootsClosure* parent) :
7356 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7357 _span(span),
7358 _bitMap(bitMap),
7359 _markStack(markStack),
7360 _finger(finger),
7361 _parent(parent)
7362 { }
7364 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7365 MemRegion span,
7366 CMSBitMap* bit_map,
7367 OopTaskQueue* work_queue,
7368 CMSMarkStack* overflow_stack,
7369 CMSMarkStack* revisit_stack,
7370 HeapWord* finger,
7371 HeapWord** global_finger_addr,
7372 Par_MarkFromRootsClosure* parent) :
7373 Par_KlassRememberingOopClosure(collector,
7374 collector->ref_processor(),
7375 revisit_stack),
7376 _whole_span(collector->_span),
7377 _span(span),
7378 _bit_map(bit_map),
7379 _work_queue(work_queue),
7380 _overflow_stack(overflow_stack),
7381 _finger(finger),
7382 _global_finger_addr(global_finger_addr),
7383 _parent(parent)
7384 { }
7386 // Assumes thread-safe access by callers, who are
7387 // responsible for mutual exclusion.
7388 void CMSCollector::lower_restart_addr(HeapWord* low) {
7389 assert(_span.contains(low), "Out of bounds addr");
7390 if (_restart_addr == NULL) {
7391 _restart_addr = low;
7392 } else {
7393 _restart_addr = MIN2(_restart_addr, low);
7394 }
7395 }
7397 // Upon stack overflow, we discard (part of) the stack,
7398 // remembering the least address amongst those discarded
7399 // in CMSCollector's _restart_address.
7400 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7401 // Remember the least grey address discarded
7402 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7403 _collector->lower_restart_addr(ra);
7404 _markStack->reset(); // discard stack contents
7405 _markStack->expand(); // expand the stack if possible
7406 }
7408 // Upon stack overflow, we discard (part of) the stack,
7409 // remembering the least address amongst those discarded
7410 // in CMSCollector's _restart_address.
7411 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7412 // We need to do this under a mutex to prevent other
7413 // workers from interfering with the work done below.
7414 MutexLockerEx ml(_overflow_stack->par_lock(),
7415 Mutex::_no_safepoint_check_flag);
7416 // Remember the least grey address discarded
7417 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7418 _collector->lower_restart_addr(ra);
7419 _overflow_stack->reset(); // discard stack contents
7420 _overflow_stack->expand(); // expand the stack if possible
7421 }
7423 void PushOrMarkClosure::do_oop(oop obj) {
7424 // Ignore mark word because we are running concurrent with mutators.
7425 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7426 HeapWord* addr = (HeapWord*)obj;
7427 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7428 // Oop lies in _span and isn't yet grey or black
7429 _bitMap->mark(addr); // now grey
7430 if (addr < _finger) {
7431 // the bit map iteration has already either passed, or
7432 // sampled, this bit in the bit map; we'll need to
7433 // use the marking stack to scan this oop's oops.
7434 bool simulate_overflow = false;
7435 NOT_PRODUCT(
7436 if (CMSMarkStackOverflowALot &&
7437 _collector->simulate_overflow()) {
7438 // simulate a stack overflow
7439 simulate_overflow = true;
7440 }
7441 )
7442 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7443 if (PrintCMSStatistics != 0) {
7444 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7445 SIZE_FORMAT, _markStack->capacity());
7446 }
7447 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7448 handle_stack_overflow(addr);
7449 }
7450 }
7451 // anything including and to the right of _finger
7452 // will be scanned as we iterate over the remainder of the
7453 // bit map
7454 do_yield_check();
7455 }
7456 }
7458 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7459 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7461 void Par_PushOrMarkClosure::do_oop(oop obj) {
7462 // Ignore mark word because we are running concurrent with mutators.
7463 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7464 HeapWord* addr = (HeapWord*)obj;
7465 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7466 // Oop lies in _span and isn't yet grey or black
7467 // We read the global_finger (volatile read) strictly after marking oop
7468 bool res = _bit_map->par_mark(addr); // now grey
7469 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7470 // Should we push this marked oop on our stack?
7471 // -- if someone else marked it, nothing to do
7472 // -- if target oop is above global finger nothing to do
7473 // -- if target oop is in chunk and above local finger
7474 // then nothing to do
7475 // -- else push on work queue
7476 if ( !res // someone else marked it, they will deal with it
7477 || (addr >= *gfa) // will be scanned in a later task
7478 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7479 return;
7480 }
7481 // the bit map iteration has already either passed, or
7482 // sampled, this bit in the bit map; we'll need to
7483 // use the marking stack to scan this oop's oops.
7484 bool simulate_overflow = false;
7485 NOT_PRODUCT(
7486 if (CMSMarkStackOverflowALot &&
7487 _collector->simulate_overflow()) {
7488 // simulate a stack overflow
7489 simulate_overflow = true;
7490 }
7491 )
7492 if (simulate_overflow ||
7493 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7494 // stack overflow
7495 if (PrintCMSStatistics != 0) {
7496 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7497 SIZE_FORMAT, _overflow_stack->capacity());
7498 }
7499 // We cannot assert that the overflow stack is full because
7500 // it may have been emptied since.
7501 assert(simulate_overflow ||
7502 _work_queue->size() == _work_queue->max_elems(),
7503 "Else push should have succeeded");
7504 handle_stack_overflow(addr);
7505 }
7506 do_yield_check();
7507 }
7508 }
7510 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7511 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7513 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7514 ReferenceProcessor* rp,
7515 CMSMarkStack* revisit_stack) :
7516 OopClosure(rp),
7517 _collector(collector),
7518 _revisit_stack(revisit_stack),
7519 _should_remember_klasses(collector->should_unload_classes()) {}
7521 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7522 MemRegion span,
7523 ReferenceProcessor* rp,
7524 CMSBitMap* bit_map,
7525 CMSBitMap* mod_union_table,
7526 CMSMarkStack* mark_stack,
7527 CMSMarkStack* revisit_stack,
7528 bool concurrent_precleaning):
7529 KlassRememberingOopClosure(collector, rp, revisit_stack),
7530 _span(span),
7531 _bit_map(bit_map),
7532 _mod_union_table(mod_union_table),
7533 _mark_stack(mark_stack),
7534 _concurrent_precleaning(concurrent_precleaning)
7535 {
7536 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7537 }
7539 // Grey object rescan during pre-cleaning and second checkpoint phases --
7540 // the non-parallel version (the parallel version appears further below.)
7541 void PushAndMarkClosure::do_oop(oop obj) {
7542 // Ignore mark word verification. If during concurrent precleaning,
7543 // the object monitor may be locked. If during the checkpoint
7544 // phases, the object may already have been reached by a different
7545 // path and may be at the end of the global overflow list (so
7546 // the mark word may be NULL).
7547 assert(obj->is_oop_or_null(true /* ignore mark word */),
7548 "expected an oop or NULL");
7549 HeapWord* addr = (HeapWord*)obj;
7550 // Check if oop points into the CMS generation
7551 // and is not marked
7552 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7553 // a white object ...
7554 _bit_map->mark(addr); // ... now grey
7555 // push on the marking stack (grey set)
7556 bool simulate_overflow = false;
7557 NOT_PRODUCT(
7558 if (CMSMarkStackOverflowALot &&
7559 _collector->simulate_overflow()) {
7560 // simulate a stack overflow
7561 simulate_overflow = true;
7562 }
7563 )
7564 if (simulate_overflow || !_mark_stack->push(obj)) {
7565 if (_concurrent_precleaning) {
7566 // During precleaning we can just dirty the appropriate card(s)
7567 // in the mod union table, thus ensuring that the object remains
7568 // in the grey set and continue. In the case of object arrays
7569 // we need to dirty all of the cards that the object spans,
7570 // since the rescan of object arrays will be limited to the
7571 // dirty cards.
7572 // Note that no one can be intefering with us in this action
7573 // of dirtying the mod union table, so no locking or atomics
7574 // are required.
7575 if (obj->is_objArray()) {
7576 size_t sz = obj->size();
7577 HeapWord* end_card_addr = (HeapWord*)round_to(
7578 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7579 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7580 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7581 _mod_union_table->mark_range(redirty_range);
7582 } else {
7583 _mod_union_table->mark(addr);
7584 }
7585 _collector->_ser_pmc_preclean_ovflw++;
7586 } else {
7587 // During the remark phase, we need to remember this oop
7588 // in the overflow list.
7589 _collector->push_on_overflow_list(obj);
7590 _collector->_ser_pmc_remark_ovflw++;
7591 }
7592 }
7593 }
7594 }
7596 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7597 MemRegion span,
7598 ReferenceProcessor* rp,
7599 CMSBitMap* bit_map,
7600 OopTaskQueue* work_queue,
7601 CMSMarkStack* revisit_stack):
7602 Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7603 _span(span),
7604 _bit_map(bit_map),
7605 _work_queue(work_queue)
7606 {
7607 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7608 }
7610 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7611 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7613 // Grey object rescan during second checkpoint phase --
7614 // the parallel version.
7615 void Par_PushAndMarkClosure::do_oop(oop obj) {
7616 // In the assert below, we ignore the mark word because
7617 // this oop may point to an already visited object that is
7618 // on the overflow stack (in which case the mark word has
7619 // been hijacked for chaining into the overflow stack --
7620 // if this is the last object in the overflow stack then
7621 // its mark word will be NULL). Because this object may
7622 // have been subsequently popped off the global overflow
7623 // stack, and the mark word possibly restored to the prototypical
7624 // value, by the time we get to examined this failing assert in
7625 // the debugger, is_oop_or_null(false) may subsequently start
7626 // to hold.
7627 assert(obj->is_oop_or_null(true),
7628 "expected an oop or NULL");
7629 HeapWord* addr = (HeapWord*)obj;
7630 // Check if oop points into the CMS generation
7631 // and is not marked
7632 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7633 // a white object ...
7634 // If we manage to "claim" the object, by being the
7635 // first thread to mark it, then we push it on our
7636 // marking stack
7637 if (_bit_map->par_mark(addr)) { // ... now grey
7638 // push on work queue (grey set)
7639 bool simulate_overflow = false;
7640 NOT_PRODUCT(
7641 if (CMSMarkStackOverflowALot &&
7642 _collector->par_simulate_overflow()) {
7643 // simulate a stack overflow
7644 simulate_overflow = true;
7645 }
7646 )
7647 if (simulate_overflow || !_work_queue->push(obj)) {
7648 _collector->par_push_on_overflow_list(obj);
7649 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7650 }
7651 } // Else, some other thread got there first
7652 }
7653 }
7655 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7656 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7658 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7659 // TBD
7660 }
7662 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7663 // TBD
7664 }
7666 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7667 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7668 Mutex* bml = _collector->bitMapLock();
7669 assert_lock_strong(bml);
7670 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7671 "CMS thread should hold CMS token");
7673 bml->unlock();
7674 ConcurrentMarkSweepThread::desynchronize(true);
7676 ConcurrentMarkSweepThread::acknowledge_yield_request();
7678 _collector->stopTimer();
7679 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7680 if (PrintCMSStatistics != 0) {
7681 _collector->incrementYields();
7682 }
7683 _collector->icms_wait();
7685 // See the comment in coordinator_yield()
7686 for (unsigned i = 0; i < CMSYieldSleepCount &&
7687 ConcurrentMarkSweepThread::should_yield() &&
7688 !CMSCollector::foregroundGCIsActive(); ++i) {
7689 os::sleep(Thread::current(), 1, false);
7690 ConcurrentMarkSweepThread::acknowledge_yield_request();
7691 }
7693 ConcurrentMarkSweepThread::synchronize(true);
7694 bml->lock();
7696 _collector->startTimer();
7697 }
7699 bool CMSPrecleanRefsYieldClosure::should_return() {
7700 if (ConcurrentMarkSweepThread::should_yield()) {
7701 do_yield_work();
7702 }
7703 return _collector->foregroundGCIsActive();
7704 }
7706 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7707 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7708 "mr should be aligned to start at a card boundary");
7709 // We'd like to assert:
7710 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7711 // "mr should be a range of cards");
7712 // However, that would be too strong in one case -- the last
7713 // partition ends at _unallocated_block which, in general, can be
7714 // an arbitrary boundary, not necessarily card aligned.
7715 if (PrintCMSStatistics != 0) {
7716 _num_dirty_cards +=
7717 mr.word_size()/CardTableModRefBS::card_size_in_words;
7718 }
7719 _space->object_iterate_mem(mr, &_scan_cl);
7720 }
7722 SweepClosure::SweepClosure(CMSCollector* collector,
7723 ConcurrentMarkSweepGeneration* g,
7724 CMSBitMap* bitMap, bool should_yield) :
7725 _collector(collector),
7726 _g(g),
7727 _sp(g->cmsSpace()),
7728 _limit(_sp->sweep_limit()),
7729 _freelistLock(_sp->freelistLock()),
7730 _bitMap(bitMap),
7731 _yield(should_yield),
7732 _inFreeRange(false), // No free range at beginning of sweep
7733 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7734 _lastFreeRangeCoalesced(false),
7735 _freeFinger(g->used_region().start())
7736 {
7737 NOT_PRODUCT(
7738 _numObjectsFreed = 0;
7739 _numWordsFreed = 0;
7740 _numObjectsLive = 0;
7741 _numWordsLive = 0;
7742 _numObjectsAlreadyFree = 0;
7743 _numWordsAlreadyFree = 0;
7744 _last_fc = NULL;
7746 _sp->initializeIndexedFreeListArrayReturnedBytes();
7747 _sp->dictionary()->initializeDictReturnedBytes();
7748 )
7749 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7750 "sweep _limit out of bounds");
7751 if (CMSTraceSweeper) {
7752 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7753 }
7754 }
7756 // We need this destructor to reclaim any space at the end
7757 // of the space, which do_blk below may not have added back to
7758 // the free lists. [basically dealing with the "fringe effect"]
7759 SweepClosure::~SweepClosure() {
7760 assert_lock_strong(_freelistLock);
7761 // this should be treated as the end of a free run if any
7762 // The current free range should be returned to the free lists
7763 // as one coalesced chunk.
7764 if (inFreeRange()) {
7765 flushCurFreeChunk(freeFinger(),
7766 pointer_delta(_limit, freeFinger()));
7767 assert(freeFinger() < _limit, "the finger pointeth off base");
7768 if (CMSTraceSweeper) {
7769 gclog_or_tty->print("destructor:");
7770 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7771 "[coalesced:"SIZE_FORMAT"]\n",
7772 freeFinger(), pointer_delta(_limit, freeFinger()),
7773 lastFreeRangeCoalesced());
7774 }
7775 }
7776 NOT_PRODUCT(
7777 if (Verbose && PrintGC) {
7778 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7779 SIZE_FORMAT " bytes",
7780 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7781 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7782 SIZE_FORMAT" bytes "
7783 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7784 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7785 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7786 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7787 sizeof(HeapWord);
7788 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7790 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7791 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7792 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7793 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7794 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7795 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7796 indexListReturnedBytes);
7797 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7798 dictReturnedBytes);
7799 }
7800 }
7801 )
7802 // Now, in debug mode, just null out the sweep_limit
7803 NOT_PRODUCT(_sp->clear_sweep_limit();)
7804 if (CMSTraceSweeper) {
7805 gclog_or_tty->print("end of sweep\n================\n");
7806 }
7807 }
7809 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7810 bool freeRangeInFreeLists) {
7811 if (CMSTraceSweeper) {
7812 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7813 freeFinger, _sp->block_size(freeFinger),
7814 freeRangeInFreeLists);
7815 }
7816 assert(!inFreeRange(), "Trampling existing free range");
7817 set_inFreeRange(true);
7818 set_lastFreeRangeCoalesced(false);
7820 set_freeFinger(freeFinger);
7821 set_freeRangeInFreeLists(freeRangeInFreeLists);
7822 if (CMSTestInFreeList) {
7823 if (freeRangeInFreeLists) {
7824 FreeChunk* fc = (FreeChunk*) freeFinger;
7825 assert(fc->isFree(), "A chunk on the free list should be free.");
7826 assert(fc->size() > 0, "Free range should have a size");
7827 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7828 }
7829 }
7830 }
7832 // Note that the sweeper runs concurrently with mutators. Thus,
7833 // it is possible for direct allocation in this generation to happen
7834 // in the middle of the sweep. Note that the sweeper also coalesces
7835 // contiguous free blocks. Thus, unless the sweeper and the allocator
7836 // synchronize appropriately freshly allocated blocks may get swept up.
7837 // This is accomplished by the sweeper locking the free lists while
7838 // it is sweeping. Thus blocks that are determined to be free are
7839 // indeed free. There is however one additional complication:
7840 // blocks that have been allocated since the final checkpoint and
7841 // mark, will not have been marked and so would be treated as
7842 // unreachable and swept up. To prevent this, the allocator marks
7843 // the bit map when allocating during the sweep phase. This leads,
7844 // however, to a further complication -- objects may have been allocated
7845 // but not yet initialized -- in the sense that the header isn't yet
7846 // installed. The sweeper can not then determine the size of the block
7847 // in order to skip over it. To deal with this case, we use a technique
7848 // (due to Printezis) to encode such uninitialized block sizes in the
7849 // bit map. Since the bit map uses a bit per every HeapWord, but the
7850 // CMS generation has a minimum object size of 3 HeapWords, it follows
7851 // that "normal marks" won't be adjacent in the bit map (there will
7852 // always be at least two 0 bits between successive 1 bits). We make use
7853 // of these "unused" bits to represent uninitialized blocks -- the bit
7854 // corresponding to the start of the uninitialized object and the next
7855 // bit are both set. Finally, a 1 bit marks the end of the object that
7856 // started with the two consecutive 1 bits to indicate its potentially
7857 // uninitialized state.
7859 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7860 FreeChunk* fc = (FreeChunk*)addr;
7861 size_t res;
7863 // check if we are done sweepinrg
7864 if (addr == _limit) { // we have swept up to the limit, do nothing more
7865 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7866 "sweep _limit out of bounds");
7867 // help the closure application finish
7868 return pointer_delta(_sp->end(), _limit);
7869 }
7870 assert(addr <= _limit, "sweep invariant");
7872 // check if we should yield
7873 do_yield_check(addr);
7874 if (fc->isFree()) {
7875 // Chunk that is already free
7876 res = fc->size();
7877 doAlreadyFreeChunk(fc);
7878 debug_only(_sp->verifyFreeLists());
7879 assert(res == fc->size(), "Don't expect the size to change");
7880 NOT_PRODUCT(
7881 _numObjectsAlreadyFree++;
7882 _numWordsAlreadyFree += res;
7883 )
7884 NOT_PRODUCT(_last_fc = fc;)
7885 } else if (!_bitMap->isMarked(addr)) {
7886 // Chunk is fresh garbage
7887 res = doGarbageChunk(fc);
7888 debug_only(_sp->verifyFreeLists());
7889 NOT_PRODUCT(
7890 _numObjectsFreed++;
7891 _numWordsFreed += res;
7892 )
7893 } else {
7894 // Chunk that is alive.
7895 res = doLiveChunk(fc);
7896 debug_only(_sp->verifyFreeLists());
7897 NOT_PRODUCT(
7898 _numObjectsLive++;
7899 _numWordsLive += res;
7900 )
7901 }
7902 return res;
7903 }
7905 // For the smart allocation, record following
7906 // split deaths - a free chunk is removed from its free list because
7907 // it is being split into two or more chunks.
7908 // split birth - a free chunk is being added to its free list because
7909 // a larger free chunk has been split and resulted in this free chunk.
7910 // coal death - a free chunk is being removed from its free list because
7911 // it is being coalesced into a large free chunk.
7912 // coal birth - a free chunk is being added to its free list because
7913 // it was created when two or more free chunks where coalesced into
7914 // this free chunk.
7915 //
7916 // These statistics are used to determine the desired number of free
7917 // chunks of a given size. The desired number is chosen to be relative
7918 // to the end of a CMS sweep. The desired number at the end of a sweep
7919 // is the
7920 // count-at-end-of-previous-sweep (an amount that was enough)
7921 // - count-at-beginning-of-current-sweep (the excess)
7922 // + split-births (gains in this size during interval)
7923 // - split-deaths (demands on this size during interval)
7924 // where the interval is from the end of one sweep to the end of the
7925 // next.
7926 //
7927 // When sweeping the sweeper maintains an accumulated chunk which is
7928 // the chunk that is made up of chunks that have been coalesced. That
7929 // will be termed the left-hand chunk. A new chunk of garbage that
7930 // is being considered for coalescing will be referred to as the
7931 // right-hand chunk.
7932 //
7933 // When making a decision on whether to coalesce a right-hand chunk with
7934 // the current left-hand chunk, the current count vs. the desired count
7935 // of the left-hand chunk is considered. Also if the right-hand chunk
7936 // is near the large chunk at the end of the heap (see
7937 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7938 // left-hand chunk is coalesced.
7939 //
7940 // When making a decision about whether to split a chunk, the desired count
7941 // vs. the current count of the candidate to be split is also considered.
7942 // If the candidate is underpopulated (currently fewer chunks than desired)
7943 // a chunk of an overpopulated (currently more chunks than desired) size may
7944 // be chosen. The "hint" associated with a free list, if non-null, points
7945 // to a free list which may be overpopulated.
7946 //
7948 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7949 size_t size = fc->size();
7950 // Chunks that cannot be coalesced are not in the
7951 // free lists.
7952 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7953 assert(_sp->verifyChunkInFreeLists(fc),
7954 "free chunk should be in free lists");
7955 }
7956 // a chunk that is already free, should not have been
7957 // marked in the bit map
7958 HeapWord* addr = (HeapWord*) fc;
7959 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7960 // Verify that the bit map has no bits marked between
7961 // addr and purported end of this block.
7962 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7964 // Some chunks cannot be coalesced in under any circumstances.
7965 // See the definition of cantCoalesce().
7966 if (!fc->cantCoalesce()) {
7967 // This chunk can potentially be coalesced.
7968 if (_sp->adaptive_freelists()) {
7969 // All the work is done in
7970 doPostIsFreeOrGarbageChunk(fc, size);
7971 } else { // Not adaptive free lists
7972 // this is a free chunk that can potentially be coalesced by the sweeper;
7973 if (!inFreeRange()) {
7974 // if the next chunk is a free block that can't be coalesced
7975 // it doesn't make sense to remove this chunk from the free lists
7976 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7977 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7978 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7979 nextChunk->isFree() && // which is free...
7980 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7981 // nothing to do
7982 } else {
7983 // Potentially the start of a new free range:
7984 // Don't eagerly remove it from the free lists.
7985 // No need to remove it if it will just be put
7986 // back again. (Also from a pragmatic point of view
7987 // if it is a free block in a region that is beyond
7988 // any allocated blocks, an assertion will fail)
7989 // Remember the start of a free run.
7990 initialize_free_range(addr, true);
7991 // end - can coalesce with next chunk
7992 }
7993 } else {
7994 // the midst of a free range, we are coalescing
7995 debug_only(record_free_block_coalesced(fc);)
7996 if (CMSTraceSweeper) {
7997 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7998 }
7999 // remove it from the free lists
8000 _sp->removeFreeChunkFromFreeLists(fc);
8001 set_lastFreeRangeCoalesced(true);
8002 // If the chunk is being coalesced and the current free range is
8003 // in the free lists, remove the current free range so that it
8004 // will be returned to the free lists in its entirety - all
8005 // the coalesced pieces included.
8006 if (freeRangeInFreeLists()) {
8007 FreeChunk* ffc = (FreeChunk*) freeFinger();
8008 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8009 "Size of free range is inconsistent with chunk size.");
8010 if (CMSTestInFreeList) {
8011 assert(_sp->verifyChunkInFreeLists(ffc),
8012 "free range is not in free lists");
8013 }
8014 _sp->removeFreeChunkFromFreeLists(ffc);
8015 set_freeRangeInFreeLists(false);
8016 }
8017 }
8018 }
8019 } else {
8020 // Code path common to both original and adaptive free lists.
8022 // cant coalesce with previous block; this should be treated
8023 // as the end of a free run if any
8024 if (inFreeRange()) {
8025 // we kicked some butt; time to pick up the garbage
8026 assert(freeFinger() < addr, "the finger pointeth off base");
8027 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8028 }
8029 // else, nothing to do, just continue
8030 }
8031 }
8033 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
8034 // This is a chunk of garbage. It is not in any free list.
8035 // Add it to a free list or let it possibly be coalesced into
8036 // a larger chunk.
8037 HeapWord* addr = (HeapWord*) fc;
8038 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8040 if (_sp->adaptive_freelists()) {
8041 // Verify that the bit map has no bits marked between
8042 // addr and purported end of just dead object.
8043 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8045 doPostIsFreeOrGarbageChunk(fc, size);
8046 } else {
8047 if (!inFreeRange()) {
8048 // start of a new free range
8049 assert(size > 0, "A free range should have a size");
8050 initialize_free_range(addr, false);
8052 } else {
8053 // this will be swept up when we hit the end of the
8054 // free range
8055 if (CMSTraceSweeper) {
8056 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8057 }
8058 // If the chunk is being coalesced and the current free range is
8059 // in the free lists, remove the current free range so that it
8060 // will be returned to the free lists in its entirety - all
8061 // the coalesced pieces included.
8062 if (freeRangeInFreeLists()) {
8063 FreeChunk* ffc = (FreeChunk*)freeFinger();
8064 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8065 "Size of free range is inconsistent with chunk size.");
8066 if (CMSTestInFreeList) {
8067 assert(_sp->verifyChunkInFreeLists(ffc),
8068 "free range is not in free lists");
8069 }
8070 _sp->removeFreeChunkFromFreeLists(ffc);
8071 set_freeRangeInFreeLists(false);
8072 }
8073 set_lastFreeRangeCoalesced(true);
8074 }
8075 // this will be swept up when we hit the end of the free range
8077 // Verify that the bit map has no bits marked between
8078 // addr and purported end of just dead object.
8079 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8080 }
8081 return size;
8082 }
8084 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8085 HeapWord* addr = (HeapWord*) fc;
8086 // The sweeper has just found a live object. Return any accumulated
8087 // left hand chunk to the free lists.
8088 if (inFreeRange()) {
8089 if (_sp->adaptive_freelists()) {
8090 flushCurFreeChunk(freeFinger(),
8091 pointer_delta(addr, freeFinger()));
8092 } else { // not adaptive freelists
8093 set_inFreeRange(false);
8094 // Add the free range back to the free list if it is not already
8095 // there.
8096 if (!freeRangeInFreeLists()) {
8097 assert(freeFinger() < addr, "the finger pointeth off base");
8098 if (CMSTraceSweeper) {
8099 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8100 "[coalesced:%d]\n",
8101 freeFinger(), pointer_delta(addr, freeFinger()),
8102 lastFreeRangeCoalesced());
8103 }
8104 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8105 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8106 }
8107 }
8108 }
8110 // Common code path for original and adaptive free lists.
8112 // this object is live: we'd normally expect this to be
8113 // an oop, and like to assert the following:
8114 // assert(oop(addr)->is_oop(), "live block should be an oop");
8115 // However, as we commented above, this may be an object whose
8116 // header hasn't yet been initialized.
8117 size_t size;
8118 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8119 if (_bitMap->isMarked(addr + 1)) {
8120 // Determine the size from the bit map, rather than trying to
8121 // compute it from the object header.
8122 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8123 size = pointer_delta(nextOneAddr + 1, addr);
8124 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8125 "alignment problem");
8127 #ifdef DEBUG
8128 if (oop(addr)->klass_or_null() != NULL &&
8129 ( !_collector->should_unload_classes()
8130 || (oop(addr)->is_parsable()) &&
8131 oop(addr)->is_conc_safe())) {
8132 // Ignore mark word because we are running concurrent with mutators
8133 assert(oop(addr)->is_oop(true), "live block should be an oop");
8134 // is_conc_safe is checked before performing this assertion
8135 // because an object that is not is_conc_safe may yet have
8136 // the return from size() correct.
8137 assert(size ==
8138 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8139 "P-mark and computed size do not agree");
8140 }
8141 #endif
8143 } else {
8144 // This should be an initialized object that's alive.
8145 assert(oop(addr)->klass_or_null() != NULL &&
8146 (!_collector->should_unload_classes()
8147 || oop(addr)->is_parsable()),
8148 "Should be an initialized object");
8149 // Note that there are objects used during class redefinition
8150 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
8151 // which are discarded with their is_conc_safe state still
8152 // false. These object may be floating garbage so may be
8153 // seen here. If they are floating garbage their size
8154 // should be attainable from their klass. Do not that
8155 // is_conc_safe() is true for oop(addr).
8156 // Ignore mark word because we are running concurrent with mutators
8157 assert(oop(addr)->is_oop(true), "live block should be an oop");
8158 // Verify that the bit map has no bits marked between
8159 // addr and purported end of this block.
8160 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8161 assert(size >= 3, "Necessary for Printezis marks to work");
8162 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8163 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8164 }
8165 return size;
8166 }
8168 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8169 size_t chunkSize) {
8170 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8171 // scheme.
8172 bool fcInFreeLists = fc->isFree();
8173 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8174 assert((HeapWord*)fc <= _limit, "sweep invariant");
8175 if (CMSTestInFreeList && fcInFreeLists) {
8176 assert(_sp->verifyChunkInFreeLists(fc),
8177 "free chunk is not in free lists");
8178 }
8181 if (CMSTraceSweeper) {
8182 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8183 }
8185 HeapWord* addr = (HeapWord*) fc;
8187 bool coalesce;
8188 size_t left = pointer_delta(addr, freeFinger());
8189 size_t right = chunkSize;
8190 switch (FLSCoalescePolicy) {
8191 // numeric value forms a coalition aggressiveness metric
8192 case 0: { // never coalesce
8193 coalesce = false;
8194 break;
8195 }
8196 case 1: { // coalesce if left & right chunks on overpopulated lists
8197 coalesce = _sp->coalOverPopulated(left) &&
8198 _sp->coalOverPopulated(right);
8199 break;
8200 }
8201 case 2: { // coalesce if left chunk on overpopulated list (default)
8202 coalesce = _sp->coalOverPopulated(left);
8203 break;
8204 }
8205 case 3: { // coalesce if left OR right chunk on overpopulated list
8206 coalesce = _sp->coalOverPopulated(left) ||
8207 _sp->coalOverPopulated(right);
8208 break;
8209 }
8210 case 4: { // always coalesce
8211 coalesce = true;
8212 break;
8213 }
8214 default:
8215 ShouldNotReachHere();
8216 }
8218 // Should the current free range be coalesced?
8219 // If the chunk is in a free range and either we decided to coalesce above
8220 // or the chunk is near the large block at the end of the heap
8221 // (isNearLargestChunk() returns true), then coalesce this chunk.
8222 bool doCoalesce = inFreeRange() &&
8223 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8224 if (doCoalesce) {
8225 // Coalesce the current free range on the left with the new
8226 // chunk on the right. If either is on a free list,
8227 // it must be removed from the list and stashed in the closure.
8228 if (freeRangeInFreeLists()) {
8229 FreeChunk* ffc = (FreeChunk*)freeFinger();
8230 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8231 "Size of free range is inconsistent with chunk size.");
8232 if (CMSTestInFreeList) {
8233 assert(_sp->verifyChunkInFreeLists(ffc),
8234 "Chunk is not in free lists");
8235 }
8236 _sp->coalDeath(ffc->size());
8237 _sp->removeFreeChunkFromFreeLists(ffc);
8238 set_freeRangeInFreeLists(false);
8239 }
8240 if (fcInFreeLists) {
8241 _sp->coalDeath(chunkSize);
8242 assert(fc->size() == chunkSize,
8243 "The chunk has the wrong size or is not in the free lists");
8244 _sp->removeFreeChunkFromFreeLists(fc);
8245 }
8246 set_lastFreeRangeCoalesced(true);
8247 } else { // not in a free range and/or should not coalesce
8248 // Return the current free range and start a new one.
8249 if (inFreeRange()) {
8250 // In a free range but cannot coalesce with the right hand chunk.
8251 // Put the current free range into the free lists.
8252 flushCurFreeChunk(freeFinger(),
8253 pointer_delta(addr, freeFinger()));
8254 }
8255 // Set up for new free range. Pass along whether the right hand
8256 // chunk is in the free lists.
8257 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8258 }
8259 }
8260 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8261 assert(inFreeRange(), "Should only be called if currently in a free range.");
8262 assert(size > 0,
8263 "A zero sized chunk cannot be added to the free lists.");
8264 if (!freeRangeInFreeLists()) {
8265 if(CMSTestInFreeList) {
8266 FreeChunk* fc = (FreeChunk*) chunk;
8267 fc->setSize(size);
8268 assert(!_sp->verifyChunkInFreeLists(fc),
8269 "chunk should not be in free lists yet");
8270 }
8271 if (CMSTraceSweeper) {
8272 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8273 chunk, size);
8274 }
8275 // A new free range is going to be starting. The current
8276 // free range has not been added to the free lists yet or
8277 // was removed so add it back.
8278 // If the current free range was coalesced, then the death
8279 // of the free range was recorded. Record a birth now.
8280 if (lastFreeRangeCoalesced()) {
8281 _sp->coalBirth(size);
8282 }
8283 _sp->addChunkAndRepairOffsetTable(chunk, size,
8284 lastFreeRangeCoalesced());
8285 }
8286 set_inFreeRange(false);
8287 set_freeRangeInFreeLists(false);
8288 }
8290 // We take a break if we've been at this for a while,
8291 // so as to avoid monopolizing the locks involved.
8292 void SweepClosure::do_yield_work(HeapWord* addr) {
8293 // Return current free chunk being used for coalescing (if any)
8294 // to the appropriate freelist. After yielding, the next
8295 // free block encountered will start a coalescing range of
8296 // free blocks. If the next free block is adjacent to the
8297 // chunk just flushed, they will need to wait for the next
8298 // sweep to be coalesced.
8299 if (inFreeRange()) {
8300 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8301 }
8303 // First give up the locks, then yield, then re-lock.
8304 // We should probably use a constructor/destructor idiom to
8305 // do this unlock/lock or modify the MutexUnlocker class to
8306 // serve our purpose. XXX
8307 assert_lock_strong(_bitMap->lock());
8308 assert_lock_strong(_freelistLock);
8309 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8310 "CMS thread should hold CMS token");
8311 _bitMap->lock()->unlock();
8312 _freelistLock->unlock();
8313 ConcurrentMarkSweepThread::desynchronize(true);
8314 ConcurrentMarkSweepThread::acknowledge_yield_request();
8315 _collector->stopTimer();
8316 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8317 if (PrintCMSStatistics != 0) {
8318 _collector->incrementYields();
8319 }
8320 _collector->icms_wait();
8322 // See the comment in coordinator_yield()
8323 for (unsigned i = 0; i < CMSYieldSleepCount &&
8324 ConcurrentMarkSweepThread::should_yield() &&
8325 !CMSCollector::foregroundGCIsActive(); ++i) {
8326 os::sleep(Thread::current(), 1, false);
8327 ConcurrentMarkSweepThread::acknowledge_yield_request();
8328 }
8330 ConcurrentMarkSweepThread::synchronize(true);
8331 _freelistLock->lock();
8332 _bitMap->lock()->lock_without_safepoint_check();
8333 _collector->startTimer();
8334 }
8336 #ifndef PRODUCT
8337 // This is actually very useful in a product build if it can
8338 // be called from the debugger. Compile it into the product
8339 // as needed.
8340 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8341 return debug_cms_space->verifyChunkInFreeLists(fc);
8342 }
8344 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8345 if (CMSTraceSweeper) {
8346 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8347 }
8348 }
8349 #endif
8351 // CMSIsAliveClosure
8352 bool CMSIsAliveClosure::do_object_b(oop obj) {
8353 HeapWord* addr = (HeapWord*)obj;
8354 return addr != NULL &&
8355 (!_span.contains(addr) || _bit_map->isMarked(addr));
8356 }
8358 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8359 MemRegion span,
8360 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8361 CMSMarkStack* revisit_stack, bool cpc):
8362 KlassRememberingOopClosure(collector, NULL, revisit_stack),
8363 _span(span),
8364 _bit_map(bit_map),
8365 _mark_stack(mark_stack),
8366 _concurrent_precleaning(cpc) {
8367 assert(!_span.is_empty(), "Empty span could spell trouble");
8368 }
8371 // CMSKeepAliveClosure: the serial version
8372 void CMSKeepAliveClosure::do_oop(oop obj) {
8373 HeapWord* addr = (HeapWord*)obj;
8374 if (_span.contains(addr) &&
8375 !_bit_map->isMarked(addr)) {
8376 _bit_map->mark(addr);
8377 bool simulate_overflow = false;
8378 NOT_PRODUCT(
8379 if (CMSMarkStackOverflowALot &&
8380 _collector->simulate_overflow()) {
8381 // simulate a stack overflow
8382 simulate_overflow = true;
8383 }
8384 )
8385 if (simulate_overflow || !_mark_stack->push(obj)) {
8386 if (_concurrent_precleaning) {
8387 // We dirty the overflown object and let the remark
8388 // phase deal with it.
8389 assert(_collector->overflow_list_is_empty(), "Error");
8390 // In the case of object arrays, we need to dirty all of
8391 // the cards that the object spans. No locking or atomics
8392 // are needed since no one else can be mutating the mod union
8393 // table.
8394 if (obj->is_objArray()) {
8395 size_t sz = obj->size();
8396 HeapWord* end_card_addr =
8397 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8398 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8399 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8400 _collector->_modUnionTable.mark_range(redirty_range);
8401 } else {
8402 _collector->_modUnionTable.mark(addr);
8403 }
8404 _collector->_ser_kac_preclean_ovflw++;
8405 } else {
8406 _collector->push_on_overflow_list(obj);
8407 _collector->_ser_kac_ovflw++;
8408 }
8409 }
8410 }
8411 }
8413 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8414 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8416 // CMSParKeepAliveClosure: a parallel version of the above.
8417 // The work queues are private to each closure (thread),
8418 // but (may be) available for stealing by other threads.
8419 void CMSParKeepAliveClosure::do_oop(oop obj) {
8420 HeapWord* addr = (HeapWord*)obj;
8421 if (_span.contains(addr) &&
8422 !_bit_map->isMarked(addr)) {
8423 // In general, during recursive tracing, several threads
8424 // may be concurrently getting here; the first one to
8425 // "tag" it, claims it.
8426 if (_bit_map->par_mark(addr)) {
8427 bool res = _work_queue->push(obj);
8428 assert(res, "Low water mark should be much less than capacity");
8429 // Do a recursive trim in the hope that this will keep
8430 // stack usage lower, but leave some oops for potential stealers
8431 trim_queue(_low_water_mark);
8432 } // Else, another thread got there first
8433 }
8434 }
8436 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8437 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8439 void CMSParKeepAliveClosure::trim_queue(uint max) {
8440 while (_work_queue->size() > max) {
8441 oop new_oop;
8442 if (_work_queue->pop_local(new_oop)) {
8443 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8444 assert(_bit_map->isMarked((HeapWord*)new_oop),
8445 "no white objects on this stack!");
8446 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8447 // iterate over the oops in this oop, marking and pushing
8448 // the ones in CMS heap (i.e. in _span).
8449 new_oop->oop_iterate(&_mark_and_push);
8450 }
8451 }
8452 }
8454 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8455 CMSCollector* collector,
8456 MemRegion span, CMSBitMap* bit_map,
8457 CMSMarkStack* revisit_stack,
8458 OopTaskQueue* work_queue):
8459 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8460 _span(span),
8461 _bit_map(bit_map),
8462 _work_queue(work_queue) { }
8464 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8465 HeapWord* addr = (HeapWord*)obj;
8466 if (_span.contains(addr) &&
8467 !_bit_map->isMarked(addr)) {
8468 if (_bit_map->par_mark(addr)) {
8469 bool simulate_overflow = false;
8470 NOT_PRODUCT(
8471 if (CMSMarkStackOverflowALot &&
8472 _collector->par_simulate_overflow()) {
8473 // simulate a stack overflow
8474 simulate_overflow = true;
8475 }
8476 )
8477 if (simulate_overflow || !_work_queue->push(obj)) {
8478 _collector->par_push_on_overflow_list(obj);
8479 _collector->_par_kac_ovflw++;
8480 }
8481 } // Else another thread got there already
8482 }
8483 }
8485 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8486 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8488 //////////////////////////////////////////////////////////////////
8489 // CMSExpansionCause /////////////////////////////
8490 //////////////////////////////////////////////////////////////////
8491 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8492 switch (cause) {
8493 case _no_expansion:
8494 return "No expansion";
8495 case _satisfy_free_ratio:
8496 return "Free ratio";
8497 case _satisfy_promotion:
8498 return "Satisfy promotion";
8499 case _satisfy_allocation:
8500 return "allocation";
8501 case _allocate_par_lab:
8502 return "Par LAB";
8503 case _allocate_par_spooling_space:
8504 return "Par Spooling Space";
8505 case _adaptive_size_policy:
8506 return "Ergonomics";
8507 default:
8508 return "unknown";
8509 }
8510 }
8512 void CMSDrainMarkingStackClosure::do_void() {
8513 // the max number to take from overflow list at a time
8514 const size_t num = _mark_stack->capacity()/4;
8515 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8516 "Overflow list should be NULL during concurrent phases");
8517 while (!_mark_stack->isEmpty() ||
8518 // if stack is empty, check the overflow list
8519 _collector->take_from_overflow_list(num, _mark_stack)) {
8520 oop obj = _mark_stack->pop();
8521 HeapWord* addr = (HeapWord*)obj;
8522 assert(_span.contains(addr), "Should be within span");
8523 assert(_bit_map->isMarked(addr), "Should be marked");
8524 assert(obj->is_oop(), "Should be an oop");
8525 obj->oop_iterate(_keep_alive);
8526 }
8527 }
8529 void CMSParDrainMarkingStackClosure::do_void() {
8530 // drain queue
8531 trim_queue(0);
8532 }
8534 // Trim our work_queue so its length is below max at return
8535 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8536 while (_work_queue->size() > max) {
8537 oop new_oop;
8538 if (_work_queue->pop_local(new_oop)) {
8539 assert(new_oop->is_oop(), "Expected an oop");
8540 assert(_bit_map->isMarked((HeapWord*)new_oop),
8541 "no white objects on this stack!");
8542 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8543 // iterate over the oops in this oop, marking and pushing
8544 // the ones in CMS heap (i.e. in _span).
8545 new_oop->oop_iterate(&_mark_and_push);
8546 }
8547 }
8548 }
8550 ////////////////////////////////////////////////////////////////////
8551 // Support for Marking Stack Overflow list handling and related code
8552 ////////////////////////////////////////////////////////////////////
8553 // Much of the following code is similar in shape and spirit to the
8554 // code used in ParNewGC. We should try and share that code
8555 // as much as possible in the future.
8557 #ifndef PRODUCT
8558 // Debugging support for CMSStackOverflowALot
8560 // It's OK to call this multi-threaded; the worst thing
8561 // that can happen is that we'll get a bunch of closely
8562 // spaced simulated oveflows, but that's OK, in fact
8563 // probably good as it would exercise the overflow code
8564 // under contention.
8565 bool CMSCollector::simulate_overflow() {
8566 if (_overflow_counter-- <= 0) { // just being defensive
8567 _overflow_counter = CMSMarkStackOverflowInterval;
8568 return true;
8569 } else {
8570 return false;
8571 }
8572 }
8574 bool CMSCollector::par_simulate_overflow() {
8575 return simulate_overflow();
8576 }
8577 #endif
8579 // Single-threaded
8580 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8581 assert(stack->isEmpty(), "Expected precondition");
8582 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8583 size_t i = num;
8584 oop cur = _overflow_list;
8585 const markOop proto = markOopDesc::prototype();
8586 NOT_PRODUCT(ssize_t n = 0;)
8587 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8588 next = oop(cur->mark());
8589 cur->set_mark(proto); // until proven otherwise
8590 assert(cur->is_oop(), "Should be an oop");
8591 bool res = stack->push(cur);
8592 assert(res, "Bit off more than can chew?");
8593 NOT_PRODUCT(n++;)
8594 }
8595 _overflow_list = cur;
8596 #ifndef PRODUCT
8597 assert(_num_par_pushes >= n, "Too many pops?");
8598 _num_par_pushes -=n;
8599 #endif
8600 return !stack->isEmpty();
8601 }
8603 #define BUSY (oop(0x1aff1aff))
8604 // (MT-safe) Get a prefix of at most "num" from the list.
8605 // The overflow list is chained through the mark word of
8606 // each object in the list. We fetch the entire list,
8607 // break off a prefix of the right size and return the
8608 // remainder. If other threads try to take objects from
8609 // the overflow list at that time, they will wait for
8610 // some time to see if data becomes available. If (and
8611 // only if) another thread places one or more object(s)
8612 // on the global list before we have returned the suffix
8613 // to the global list, we will walk down our local list
8614 // to find its end and append the global list to
8615 // our suffix before returning it. This suffix walk can
8616 // prove to be expensive (quadratic in the amount of traffic)
8617 // when there are many objects in the overflow list and
8618 // there is much producer-consumer contention on the list.
8619 // *NOTE*: The overflow list manipulation code here and
8620 // in ParNewGeneration:: are very similar in shape,
8621 // except that in the ParNew case we use the old (from/eden)
8622 // copy of the object to thread the list via its klass word.
8623 // Because of the common code, if you make any changes in
8624 // the code below, please check the ParNew version to see if
8625 // similar changes might be needed.
8626 // CR 6797058 has been filed to consolidate the common code.
8627 bool CMSCollector::par_take_from_overflow_list(size_t num,
8628 OopTaskQueue* work_q) {
8629 assert(work_q->size() == 0, "First empty local work queue");
8630 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8631 if (_overflow_list == NULL) {
8632 return false;
8633 }
8634 // Grab the entire list; we'll put back a suffix
8635 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8636 Thread* tid = Thread::current();
8637 size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
8638 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8639 // If the list is busy, we spin for a short while,
8640 // sleeping between attempts to get the list.
8641 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8642 os::sleep(tid, sleep_time_millis, false);
8643 if (_overflow_list == NULL) {
8644 // Nothing left to take
8645 return false;
8646 } else if (_overflow_list != BUSY) {
8647 // Try and grab the prefix
8648 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8649 }
8650 }
8651 // If the list was found to be empty, or we spun long
8652 // enough, we give up and return empty-handed. If we leave
8653 // the list in the BUSY state below, it must be the case that
8654 // some other thread holds the overflow list and will set it
8655 // to a non-BUSY state in the future.
8656 if (prefix == NULL || prefix == BUSY) {
8657 // Nothing to take or waited long enough
8658 if (prefix == NULL) {
8659 // Write back the NULL in case we overwrote it with BUSY above
8660 // and it is still the same value.
8661 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8662 }
8663 return false;
8664 }
8665 assert(prefix != NULL && prefix != BUSY, "Error");
8666 size_t i = num;
8667 oop cur = prefix;
8668 // Walk down the first "num" objects, unless we reach the end.
8669 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8670 if (cur->mark() == NULL) {
8671 // We have "num" or fewer elements in the list, so there
8672 // is nothing to return to the global list.
8673 // Write back the NULL in lieu of the BUSY we wrote
8674 // above, if it is still the same value.
8675 if (_overflow_list == BUSY) {
8676 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8677 }
8678 } else {
8679 // Chop off the suffix and rerturn it to the global list.
8680 assert(cur->mark() != BUSY, "Error");
8681 oop suffix_head = cur->mark(); // suffix will be put back on global list
8682 cur->set_mark(NULL); // break off suffix
8683 // It's possible that the list is still in the empty(busy) state
8684 // we left it in a short while ago; in that case we may be
8685 // able to place back the suffix without incurring the cost
8686 // of a walk down the list.
8687 oop observed_overflow_list = _overflow_list;
8688 oop cur_overflow_list = observed_overflow_list;
8689 bool attached = false;
8690 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8691 observed_overflow_list =
8692 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8693 if (cur_overflow_list == observed_overflow_list) {
8694 attached = true;
8695 break;
8696 } else cur_overflow_list = observed_overflow_list;
8697 }
8698 if (!attached) {
8699 // Too bad, someone else sneaked in (at least) an element; we'll need
8700 // to do a splice. Find tail of suffix so we can prepend suffix to global
8701 // list.
8702 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8703 oop suffix_tail = cur;
8704 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8705 "Tautology");
8706 observed_overflow_list = _overflow_list;
8707 do {
8708 cur_overflow_list = observed_overflow_list;
8709 if (cur_overflow_list != BUSY) {
8710 // Do the splice ...
8711 suffix_tail->set_mark(markOop(cur_overflow_list));
8712 } else { // cur_overflow_list == BUSY
8713 suffix_tail->set_mark(NULL);
8714 }
8715 // ... and try to place spliced list back on overflow_list ...
8716 observed_overflow_list =
8717 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8718 } while (cur_overflow_list != observed_overflow_list);
8719 // ... until we have succeeded in doing so.
8720 }
8721 }
8723 // Push the prefix elements on work_q
8724 assert(prefix != NULL, "control point invariant");
8725 const markOop proto = markOopDesc::prototype();
8726 oop next;
8727 NOT_PRODUCT(ssize_t n = 0;)
8728 for (cur = prefix; cur != NULL; cur = next) {
8729 next = oop(cur->mark());
8730 cur->set_mark(proto); // until proven otherwise
8731 assert(cur->is_oop(), "Should be an oop");
8732 bool res = work_q->push(cur);
8733 assert(res, "Bit off more than we can chew?");
8734 NOT_PRODUCT(n++;)
8735 }
8736 #ifndef PRODUCT
8737 assert(_num_par_pushes >= n, "Too many pops?");
8738 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8739 #endif
8740 return true;
8741 }
8743 // Single-threaded
8744 void CMSCollector::push_on_overflow_list(oop p) {
8745 NOT_PRODUCT(_num_par_pushes++;)
8746 assert(p->is_oop(), "Not an oop");
8747 preserve_mark_if_necessary(p);
8748 p->set_mark((markOop)_overflow_list);
8749 _overflow_list = p;
8750 }
8752 // Multi-threaded; use CAS to prepend to overflow list
8753 void CMSCollector::par_push_on_overflow_list(oop p) {
8754 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8755 assert(p->is_oop(), "Not an oop");
8756 par_preserve_mark_if_necessary(p);
8757 oop observed_overflow_list = _overflow_list;
8758 oop cur_overflow_list;
8759 do {
8760 cur_overflow_list = observed_overflow_list;
8761 if (cur_overflow_list != BUSY) {
8762 p->set_mark(markOop(cur_overflow_list));
8763 } else {
8764 p->set_mark(NULL);
8765 }
8766 observed_overflow_list =
8767 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8768 } while (cur_overflow_list != observed_overflow_list);
8769 }
8770 #undef BUSY
8772 // Single threaded
8773 // General Note on GrowableArray: pushes may silently fail
8774 // because we are (temporarily) out of C-heap for expanding
8775 // the stack. The problem is quite ubiquitous and affects
8776 // a lot of code in the JVM. The prudent thing for GrowableArray
8777 // to do (for now) is to exit with an error. However, that may
8778 // be too draconian in some cases because the caller may be
8779 // able to recover without much harm. For such cases, we
8780 // should probably introduce a "soft_push" method which returns
8781 // an indication of success or failure with the assumption that
8782 // the caller may be able to recover from a failure; code in
8783 // the VM can then be changed, incrementally, to deal with such
8784 // failures where possible, thus, incrementally hardening the VM
8785 // in such low resource situations.
8786 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8787 if (_preserved_oop_stack == NULL) {
8788 assert(_preserved_mark_stack == NULL,
8789 "bijection with preserved_oop_stack");
8790 // Allocate the stacks
8791 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8792 GrowableArray<oop>(PreserveMarkStackSize, true);
8793 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8794 GrowableArray<markOop>(PreserveMarkStackSize, true);
8795 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8796 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8797 "Preserved Mark/Oop Stack for CMS (C-heap)");
8798 }
8799 }
8800 _preserved_oop_stack->push(p);
8801 _preserved_mark_stack->push(m);
8802 assert(m == p->mark(), "Mark word changed");
8803 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8804 "bijection");
8805 }
8807 // Single threaded
8808 void CMSCollector::preserve_mark_if_necessary(oop p) {
8809 markOop m = p->mark();
8810 if (m->must_be_preserved(p)) {
8811 preserve_mark_work(p, m);
8812 }
8813 }
8815 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8816 markOop m = p->mark();
8817 if (m->must_be_preserved(p)) {
8818 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8819 // Even though we read the mark word without holding
8820 // the lock, we are assured that it will not change
8821 // because we "own" this oop, so no other thread can
8822 // be trying to push it on the overflow list; see
8823 // the assertion in preserve_mark_work() that checks
8824 // that m == p->mark().
8825 preserve_mark_work(p, m);
8826 }
8827 }
8829 // We should be able to do this multi-threaded,
8830 // a chunk of stack being a task (this is
8831 // correct because each oop only ever appears
8832 // once in the overflow list. However, it's
8833 // not very easy to completely overlap this with
8834 // other operations, so will generally not be done
8835 // until all work's been completed. Because we
8836 // expect the preserved oop stack (set) to be small,
8837 // it's probably fine to do this single-threaded.
8838 // We can explore cleverer concurrent/overlapped/parallel
8839 // processing of preserved marks if we feel the
8840 // need for this in the future. Stack overflow should
8841 // be so rare in practice and, when it happens, its
8842 // effect on performance so great that this will
8843 // likely just be in the noise anyway.
8844 void CMSCollector::restore_preserved_marks_if_any() {
8845 if (_preserved_oop_stack == NULL) {
8846 assert(_preserved_mark_stack == NULL,
8847 "bijection with preserved_oop_stack");
8848 return;
8849 }
8851 assert(SafepointSynchronize::is_at_safepoint(),
8852 "world should be stopped");
8853 assert(Thread::current()->is_ConcurrentGC_thread() ||
8854 Thread::current()->is_VM_thread(),
8855 "should be single-threaded");
8857 int length = _preserved_oop_stack->length();
8858 assert(_preserved_mark_stack->length() == length, "bijection");
8859 for (int i = 0; i < length; i++) {
8860 oop p = _preserved_oop_stack->at(i);
8861 assert(p->is_oop(), "Should be an oop");
8862 assert(_span.contains(p), "oop should be in _span");
8863 assert(p->mark() == markOopDesc::prototype(),
8864 "Set when taken from overflow list");
8865 markOop m = _preserved_mark_stack->at(i);
8866 p->set_mark(m);
8867 }
8868 _preserved_mark_stack->clear();
8869 _preserved_oop_stack->clear();
8870 assert(_preserved_mark_stack->is_empty() &&
8871 _preserved_oop_stack->is_empty(),
8872 "stacks were cleared above");
8873 }
8875 #ifndef PRODUCT
8876 bool CMSCollector::no_preserved_marks() const {
8877 return ( ( _preserved_mark_stack == NULL
8878 && _preserved_oop_stack == NULL)
8879 || ( _preserved_mark_stack->is_empty()
8880 && _preserved_oop_stack->is_empty()));
8881 }
8882 #endif
8884 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8885 {
8886 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8887 CMSAdaptiveSizePolicy* size_policy =
8888 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8889 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8890 "Wrong type for size policy");
8891 return size_policy;
8892 }
8894 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8895 size_t desired_promo_size) {
8896 if (cur_promo_size < desired_promo_size) {
8897 size_t expand_bytes = desired_promo_size - cur_promo_size;
8898 if (PrintAdaptiveSizePolicy && Verbose) {
8899 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8900 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8901 expand_bytes);
8902 }
8903 expand(expand_bytes,
8904 MinHeapDeltaBytes,
8905 CMSExpansionCause::_adaptive_size_policy);
8906 } else if (desired_promo_size < cur_promo_size) {
8907 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8908 if (PrintAdaptiveSizePolicy && Verbose) {
8909 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8910 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8911 shrink_bytes);
8912 }
8913 shrink(shrink_bytes);
8914 }
8915 }
8917 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8918 GenCollectedHeap* gch = GenCollectedHeap::heap();
8919 CMSGCAdaptivePolicyCounters* counters =
8920 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8921 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8922 "Wrong kind of counters");
8923 return counters;
8924 }
8927 void ASConcurrentMarkSweepGeneration::update_counters() {
8928 if (UsePerfData) {
8929 _space_counters->update_all();
8930 _gen_counters->update_all();
8931 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8932 GenCollectedHeap* gch = GenCollectedHeap::heap();
8933 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8934 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8935 "Wrong gc statistics type");
8936 counters->update_counters(gc_stats_l);
8937 }
8938 }
8940 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8941 if (UsePerfData) {
8942 _space_counters->update_used(used);
8943 _space_counters->update_capacity();
8944 _gen_counters->update_all();
8946 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8947 GenCollectedHeap* gch = GenCollectedHeap::heap();
8948 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8949 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8950 "Wrong gc statistics type");
8951 counters->update_counters(gc_stats_l);
8952 }
8953 }
8955 // The desired expansion delta is computed so that:
8956 // . desired free percentage or greater is used
8957 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8958 assert_locked_or_safepoint(Heap_lock);
8960 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8962 // If incremental collection failed, we just want to expand
8963 // to the limit.
8964 if (incremental_collection_failed()) {
8965 clear_incremental_collection_failed();
8966 grow_to_reserved();
8967 return;
8968 }
8970 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8972 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8973 "Wrong type of heap");
8974 int prev_level = level() - 1;
8975 assert(prev_level >= 0, "The cms generation is the lowest generation");
8976 Generation* prev_gen = gch->get_gen(prev_level);
8977 assert(prev_gen->kind() == Generation::ASParNew,
8978 "Wrong type of young generation");
8979 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8980 size_t cur_eden = younger_gen->eden()->capacity();
8981 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8982 size_t cur_promo = free();
8983 size_policy->compute_tenured_generation_free_space(cur_promo,
8984 max_available(),
8985 cur_eden);
8986 resize(cur_promo, size_policy->promo_size());
8988 // Record the new size of the space in the cms generation
8989 // that is available for promotions. This is temporary.
8990 // It should be the desired promo size.
8991 size_policy->avg_cms_promo()->sample(free());
8992 size_policy->avg_old_live()->sample(used());
8994 if (UsePerfData) {
8995 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8996 counters->update_cms_capacity_counter(capacity());
8997 }
8998 }
9000 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9001 assert_locked_or_safepoint(Heap_lock);
9002 assert_lock_strong(freelistLock());
9003 HeapWord* old_end = _cmsSpace->end();
9004 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9005 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9006 FreeChunk* chunk_at_end = find_chunk_at_end();
9007 if (chunk_at_end == NULL) {
9008 // No room to shrink
9009 if (PrintGCDetails && Verbose) {
9010 gclog_or_tty->print_cr("No room to shrink: old_end "
9011 PTR_FORMAT " unallocated_start " PTR_FORMAT
9012 " chunk_at_end " PTR_FORMAT,
9013 old_end, unallocated_start, chunk_at_end);
9014 }
9015 return;
9016 } else {
9018 // Find the chunk at the end of the space and determine
9019 // how much it can be shrunk.
9020 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9021 size_t aligned_shrinkable_size_in_bytes =
9022 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9023 assert(unallocated_start <= chunk_at_end->end(),
9024 "Inconsistent chunk at end of space");
9025 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9026 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9028 // Shrink the underlying space
9029 _virtual_space.shrink_by(bytes);
9030 if (PrintGCDetails && Verbose) {
9031 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9032 " desired_bytes " SIZE_FORMAT
9033 " shrinkable_size_in_bytes " SIZE_FORMAT
9034 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9035 " bytes " SIZE_FORMAT,
9036 desired_bytes, shrinkable_size_in_bytes,
9037 aligned_shrinkable_size_in_bytes, bytes);
9038 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9039 " unallocated_start " SIZE_FORMAT,
9040 old_end, unallocated_start);
9041 }
9043 // If the space did shrink (shrinking is not guaranteed),
9044 // shrink the chunk at the end by the appropriate amount.
9045 if (((HeapWord*)_virtual_space.high()) < old_end) {
9046 size_t new_word_size =
9047 heap_word_size(_virtual_space.committed_size());
9049 // Have to remove the chunk from the dictionary because it is changing
9050 // size and might be someplace elsewhere in the dictionary.
9052 // Get the chunk at end, shrink it, and put it
9053 // back.
9054 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9055 size_t word_size_change = word_size_before - new_word_size;
9056 size_t chunk_at_end_old_size = chunk_at_end->size();
9057 assert(chunk_at_end_old_size >= word_size_change,
9058 "Shrink is too large");
9059 chunk_at_end->setSize(chunk_at_end_old_size -
9060 word_size_change);
9061 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9062 word_size_change);
9064 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9066 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9067 _bts->resize(new_word_size); // resize the block offset shared array
9068 Universe::heap()->barrier_set()->resize_covered_region(mr);
9069 _cmsSpace->assert_locked();
9070 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9072 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9074 // update the space and generation capacity counters
9075 if (UsePerfData) {
9076 _space_counters->update_capacity();
9077 _gen_counters->update_all();
9078 }
9080 if (Verbose && PrintGCDetails) {
9081 size_t new_mem_size = _virtual_space.committed_size();
9082 size_t old_mem_size = new_mem_size + bytes;
9083 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9084 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9085 }
9086 }
9088 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9089 "Inconsistency at end of space");
9090 assert(chunk_at_end->end() == _cmsSpace->end(),
9091 "Shrinking is inconsistent");
9092 return;
9093 }
9094 }
9096 // Transfer some number of overflown objects to usual marking
9097 // stack. Return true if some objects were transferred.
9098 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9099 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9100 (size_t)ParGCDesiredObjsFromOverflowList);
9102 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9103 assert(_collector->overflow_list_is_empty() || res,
9104 "If list is not empty, we should have taken something");
9105 assert(!res || !_mark_stack->isEmpty(),
9106 "If we took something, it should now be on our stack");
9107 return res;
9108 }
9110 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9111 size_t res = _sp->block_size_no_stall(addr, _collector);
9112 assert(res != 0, "Should always be able to compute a size");
9113 if (_sp->block_is_obj(addr)) {
9114 if (_live_bit_map->isMarked(addr)) {
9115 // It can't have been dead in a previous cycle
9116 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9117 } else {
9118 _dead_bit_map->mark(addr); // mark the dead object
9119 }
9120 }
9121 return res;
9122 }