Mon, 01 Dec 2014 15:24:56 +0100
8075210: Refactor strong root processing in order to allow G1 to evolve separately from GenCollectedHeap
Summary: Create a G1RootProcessor and move SharedHeap root processing to GenCollectedHeap
Reviewed-by: brutisso, tschatzl, ehelin
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/codeCache.hpp"
30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
38 #include "gc_implementation/parNew/parNewGeneration.hpp"
39 #include "gc_implementation/shared/collectorCounters.hpp"
40 #include "gc_implementation/shared/gcTimer.hpp"
41 #include "gc_implementation/shared/gcTrace.hpp"
42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 #include "gc_implementation/shared/isGCActiveMark.hpp"
44 #include "gc_interface/collectedHeap.inline.hpp"
45 #include "memory/allocation.hpp"
46 #include "memory/cardTableRS.hpp"
47 #include "memory/collectorPolicy.hpp"
48 #include "memory/gcLocker.inline.hpp"
49 #include "memory/genCollectedHeap.hpp"
50 #include "memory/genMarkSweep.hpp"
51 #include "memory/genOopClosures.inline.hpp"
52 #include "memory/iterator.inline.hpp"
53 #include "memory/padded.hpp"
54 #include "memory/referencePolicy.hpp"
55 #include "memory/resourceArea.hpp"
56 #include "memory/tenuredGeneration.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "prims/jvmtiExport.hpp"
59 #include "runtime/globals_extension.hpp"
60 #include "runtime/handles.inline.hpp"
61 #include "runtime/java.hpp"
62 #include "runtime/orderAccess.inline.hpp"
63 #include "runtime/vmThread.hpp"
64 #include "services/memoryService.hpp"
65 #include "services/runtimeService.hpp"
67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
69 // statics
70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
71 bool CMSCollector::_full_gc_requested = false;
72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
74 //////////////////////////////////////////////////////////////////
75 // In support of CMS/VM thread synchronization
76 //////////////////////////////////////////////////////////////////
77 // We split use of the CGC_lock into 2 "levels".
78 // The low-level locking is of the usual CGC_lock monitor. We introduce
79 // a higher level "token" (hereafter "CMS token") built on top of the
80 // low level monitor (hereafter "CGC lock").
81 // The token-passing protocol gives priority to the VM thread. The
82 // CMS-lock doesn't provide any fairness guarantees, but clients
83 // should ensure that it is only held for very short, bounded
84 // durations.
85 //
86 // When either of the CMS thread or the VM thread is involved in
87 // collection operations during which it does not want the other
88 // thread to interfere, it obtains the CMS token.
89 //
90 // If either thread tries to get the token while the other has
91 // it, that thread waits. However, if the VM thread and CMS thread
92 // both want the token, then the VM thread gets priority while the
93 // CMS thread waits. This ensures, for instance, that the "concurrent"
94 // phases of the CMS thread's work do not block out the VM thread
95 // for long periods of time as the CMS thread continues to hog
96 // the token. (See bug 4616232).
97 //
98 // The baton-passing functions are, however, controlled by the
99 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
100 // and here the low-level CMS lock, not the high level token,
101 // ensures mutual exclusion.
102 //
103 // Two important conditions that we have to satisfy:
104 // 1. if a thread does a low-level wait on the CMS lock, then it
105 // relinquishes the CMS token if it were holding that token
106 // when it acquired the low-level CMS lock.
107 // 2. any low-level notifications on the low-level lock
108 // should only be sent when a thread has relinquished the token.
109 //
110 // In the absence of either property, we'd have potential deadlock.
111 //
112 // We protect each of the CMS (concurrent and sequential) phases
113 // with the CMS _token_, not the CMS _lock_.
114 //
115 // The only code protected by CMS lock is the token acquisition code
116 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
117 // baton-passing code.
118 //
119 // Unfortunately, i couldn't come up with a good abstraction to factor and
120 // hide the naked CGC_lock manipulation in the baton-passing code
121 // further below. That's something we should try to do. Also, the proof
122 // of correctness of this 2-level locking scheme is far from obvious,
123 // and potentially quite slippery. We have an uneasy supsicion, for instance,
124 // that there may be a theoretical possibility of delay/starvation in the
125 // low-level lock/wait/notify scheme used for the baton-passing because of
126 // potential intereference with the priority scheme embodied in the
127 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
128 // invocation further below and marked with "XXX 20011219YSR".
129 // Indeed, as we note elsewhere, this may become yet more slippery
130 // in the presence of multiple CMS and/or multiple VM threads. XXX
132 class CMSTokenSync: public StackObj {
133 private:
134 bool _is_cms_thread;
135 public:
136 CMSTokenSync(bool is_cms_thread):
137 _is_cms_thread(is_cms_thread) {
138 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
139 "Incorrect argument to constructor");
140 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
141 }
143 ~CMSTokenSync() {
144 assert(_is_cms_thread ?
145 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
146 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
147 "Incorrect state");
148 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
149 }
150 };
152 // Convenience class that does a CMSTokenSync, and then acquires
153 // upto three locks.
154 class CMSTokenSyncWithLocks: public CMSTokenSync {
155 private:
156 // Note: locks are acquired in textual declaration order
157 // and released in the opposite order
158 MutexLockerEx _locker1, _locker2, _locker3;
159 public:
160 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
161 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
162 CMSTokenSync(is_cms_thread),
163 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
164 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
165 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
166 { }
167 };
170 // Wrapper class to temporarily disable icms during a foreground cms collection.
171 class ICMSDisabler: public StackObj {
172 public:
173 // The ctor disables icms and wakes up the thread so it notices the change;
174 // the dtor re-enables icms. Note that the CMSCollector methods will check
175 // CMSIncrementalMode.
176 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
177 ~ICMSDisabler() { CMSCollector::enable_icms(); }
178 };
180 //////////////////////////////////////////////////////////////////
181 // Concurrent Mark-Sweep Generation /////////////////////////////
182 //////////////////////////////////////////////////////////////////
184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
186 // This struct contains per-thread things necessary to support parallel
187 // young-gen collection.
188 class CMSParGCThreadState: public CHeapObj<mtGC> {
189 public:
190 CFLS_LAB lab;
191 PromotionInfo promo;
193 // Constructor.
194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195 promo.setSpace(cfls);
196 }
197 };
199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200 ReservedSpace rs, size_t initial_byte_size, int level,
201 CardTableRS* ct, bool use_adaptive_freelists,
202 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
203 CardGeneration(rs, initial_byte_size, level, ct),
204 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
205 _debug_collection_type(Concurrent_collection_type),
206 _did_compact(false)
207 {
208 HeapWord* bottom = (HeapWord*) _virtual_space.low();
209 HeapWord* end = (HeapWord*) _virtual_space.high();
211 _direct_allocated_words = 0;
212 NOT_PRODUCT(
213 _numObjectsPromoted = 0;
214 _numWordsPromoted = 0;
215 _numObjectsAllocated = 0;
216 _numWordsAllocated = 0;
217 )
219 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
220 use_adaptive_freelists,
221 dictionaryChoice);
222 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
223 if (_cmsSpace == NULL) {
224 vm_exit_during_initialization(
225 "CompactibleFreeListSpace allocation failure");
226 }
227 _cmsSpace->_gen = this;
229 _gc_stats = new CMSGCStats();
231 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
232 // offsets match. The ability to tell free chunks from objects
233 // depends on this property.
234 debug_only(
235 FreeChunk* junk = NULL;
236 assert(UseCompressedClassPointers ||
237 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
238 "Offset of FreeChunk::_prev within FreeChunk must match"
239 " that of OopDesc::_klass within OopDesc");
240 )
241 if (CollectedHeap::use_parallel_gc_threads()) {
242 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
243 _par_gc_thread_states =
244 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
245 if (_par_gc_thread_states == NULL) {
246 vm_exit_during_initialization("Could not allocate par gc structs");
247 }
248 for (uint i = 0; i < ParallelGCThreads; i++) {
249 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
250 if (_par_gc_thread_states[i] == NULL) {
251 vm_exit_during_initialization("Could not allocate par gc structs");
252 }
253 }
254 } else {
255 _par_gc_thread_states = NULL;
256 }
257 _incremental_collection_failed = false;
258 // The "dilatation_factor" is the expansion that can occur on
259 // account of the fact that the minimum object size in the CMS
260 // generation may be larger than that in, say, a contiguous young
261 // generation.
262 // Ideally, in the calculation below, we'd compute the dilatation
263 // factor as: MinChunkSize/(promoting_gen's min object size)
264 // Since we do not have such a general query interface for the
265 // promoting generation, we'll instead just use the mimimum
266 // object size (which today is a header's worth of space);
267 // note that all arithmetic is in units of HeapWords.
268 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
269 assert(_dilatation_factor >= 1.0, "from previous assert");
270 }
273 // The field "_initiating_occupancy" represents the occupancy percentage
274 // at which we trigger a new collection cycle. Unless explicitly specified
275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
276 // is calculated by:
277 //
278 // Let "f" be MinHeapFreeRatio in
279 //
280 // _intiating_occupancy = 100-f +
281 // f * (CMSTriggerRatio/100)
282 // where CMSTriggerRatio is the argument "tr" below.
283 //
284 // That is, if we assume the heap is at its desired maximum occupancy at the
285 // end of a collection, we let CMSTriggerRatio of the (purported) free
286 // space be allocated before initiating a new collection cycle.
287 //
288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
289 assert(io <= 100 && tr <= 100, "Check the arguments");
290 if (io >= 0) {
291 _initiating_occupancy = (double)io / 100.0;
292 } else {
293 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
294 (double)(tr * MinHeapFreeRatio) / 100.0)
295 / 100.0;
296 }
297 }
299 void ConcurrentMarkSweepGeneration::ref_processor_init() {
300 assert(collector() != NULL, "no collector");
301 collector()->ref_processor_init();
302 }
304 void CMSCollector::ref_processor_init() {
305 if (_ref_processor == NULL) {
306 // Allocate and initialize a reference processor
307 _ref_processor =
308 new ReferenceProcessor(_span, // span
309 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
310 (int) ParallelGCThreads, // mt processing degree
311 _cmsGen->refs_discovery_is_mt(), // mt discovery
312 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
313 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
314 &_is_alive_closure); // closure for liveness info
315 // Initialize the _ref_processor field of CMSGen
316 _cmsGen->set_ref_processor(_ref_processor);
318 }
319 }
321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
322 GenCollectedHeap* gch = GenCollectedHeap::heap();
323 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
324 "Wrong type of heap");
325 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
326 gch->gen_policy()->size_policy();
327 assert(sp->is_gc_cms_adaptive_size_policy(),
328 "Wrong type of size policy");
329 return sp;
330 }
332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
333 CMSGCAdaptivePolicyCounters* results =
334 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
335 assert(
336 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
337 "Wrong gc policy counter kind");
338 return results;
339 }
342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
344 const char* gen_name = "old";
346 // Generation Counters - generation 1, 1 subspace
347 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
349 _space_counters = new GSpaceCounters(gen_name, 0,
350 _virtual_space.reserved_size(),
351 this, _gen_counters);
352 }
354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
355 _cms_gen(cms_gen)
356 {
357 assert(alpha <= 100, "bad value");
358 _saved_alpha = alpha;
360 // Initialize the alphas to the bootstrap value of 100.
361 _gc0_alpha = _cms_alpha = 100;
363 _cms_begin_time.update();
364 _cms_end_time.update();
366 _gc0_duration = 0.0;
367 _gc0_period = 0.0;
368 _gc0_promoted = 0;
370 _cms_duration = 0.0;
371 _cms_period = 0.0;
372 _cms_allocated = 0;
374 _cms_used_at_gc0_begin = 0;
375 _cms_used_at_gc0_end = 0;
376 _allow_duty_cycle_reduction = false;
377 _valid_bits = 0;
378 _icms_duty_cycle = CMSIncrementalDutyCycle;
379 }
381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
382 // TBD: CR 6909490
383 return 1.0;
384 }
386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
387 }
389 // If promotion failure handling is on use
390 // the padded average size of the promotion for each
391 // young generation collection.
392 double CMSStats::time_until_cms_gen_full() const {
393 size_t cms_free = _cms_gen->cmsSpace()->free();
394 GenCollectedHeap* gch = GenCollectedHeap::heap();
395 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
396 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
397 if (cms_free > expected_promotion) {
398 // Start a cms collection if there isn't enough space to promote
399 // for the next minor collection. Use the padded average as
400 // a safety factor.
401 cms_free -= expected_promotion;
403 // Adjust by the safety factor.
404 double cms_free_dbl = (double)cms_free;
405 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
406 // Apply a further correction factor which tries to adjust
407 // for recent occurance of concurrent mode failures.
408 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
409 cms_free_dbl = cms_free_dbl * cms_adjustment;
411 if (PrintGCDetails && Verbose) {
412 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
413 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
414 cms_free, expected_promotion);
415 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
416 cms_free_dbl, cms_consumption_rate() + 1.0);
417 }
418 // Add 1 in case the consumption rate goes to zero.
419 return cms_free_dbl / (cms_consumption_rate() + 1.0);
420 }
421 return 0.0;
422 }
424 // Compare the duration of the cms collection to the
425 // time remaining before the cms generation is empty.
426 // Note that the time from the start of the cms collection
427 // to the start of the cms sweep (less than the total
428 // duration of the cms collection) can be used. This
429 // has been tried and some applications experienced
430 // promotion failures early in execution. This was
431 // possibly because the averages were not accurate
432 // enough at the beginning.
433 double CMSStats::time_until_cms_start() const {
434 // We add "gc0_period" to the "work" calculation
435 // below because this query is done (mostly) at the
436 // end of a scavenge, so we need to conservatively
437 // account for that much possible delay
438 // in the query so as to avoid concurrent mode failures
439 // due to starting the collection just a wee bit too
440 // late.
441 double work = cms_duration() + gc0_period();
442 double deadline = time_until_cms_gen_full();
443 // If a concurrent mode failure occurred recently, we want to be
444 // more conservative and halve our expected time_until_cms_gen_full()
445 if (work > deadline) {
446 if (Verbose && PrintGCDetails) {
447 gclog_or_tty->print(
448 " CMSCollector: collect because of anticipated promotion "
449 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
450 gc0_period(), time_until_cms_gen_full());
451 }
452 return 0.0;
453 }
454 return work - deadline;
455 }
457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
458 // amount of change to prevent wild oscillation.
459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
460 unsigned int new_duty_cycle) {
461 assert(old_duty_cycle <= 100, "bad input value");
462 assert(new_duty_cycle <= 100, "bad input value");
464 // Note: use subtraction with caution since it may underflow (values are
465 // unsigned). Addition is safe since we're in the range 0-100.
466 unsigned int damped_duty_cycle = new_duty_cycle;
467 if (new_duty_cycle < old_duty_cycle) {
468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
469 if (new_duty_cycle + largest_delta < old_duty_cycle) {
470 damped_duty_cycle = old_duty_cycle - largest_delta;
471 }
472 } else if (new_duty_cycle > old_duty_cycle) {
473 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
474 if (new_duty_cycle > old_duty_cycle + largest_delta) {
475 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
476 }
477 }
478 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
480 if (CMSTraceIncrementalPacing) {
481 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
482 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
483 }
484 return damped_duty_cycle;
485 }
487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
488 assert(CMSIncrementalPacing && valid(),
489 "should be handled in icms_update_duty_cycle()");
491 double cms_time_so_far = cms_timer().seconds();
492 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
493 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
495 // Avoid division by 0.
496 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
497 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
499 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
500 if (new_duty_cycle > _icms_duty_cycle) {
501 // Avoid very small duty cycles (1 or 2); 0 is allowed.
502 if (new_duty_cycle > 2) {
503 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
504 new_duty_cycle);
505 }
506 } else if (_allow_duty_cycle_reduction) {
507 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
508 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
509 // Respect the minimum duty cycle.
510 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
511 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
512 }
514 if (PrintGCDetails || CMSTraceIncrementalPacing) {
515 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
516 }
518 _allow_duty_cycle_reduction = false;
519 return _icms_duty_cycle;
520 }
522 #ifndef PRODUCT
523 void CMSStats::print_on(outputStream *st) const {
524 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
525 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
526 gc0_duration(), gc0_period(), gc0_promoted());
527 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
528 cms_duration(), cms_duration_per_mb(),
529 cms_period(), cms_allocated());
530 st->print(",cms_since_beg=%g,cms_since_end=%g",
531 cms_time_since_begin(), cms_time_since_end());
532 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
533 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
534 if (CMSIncrementalMode) {
535 st->print(",dc=%d", icms_duty_cycle());
536 }
538 if (valid()) {
539 st->print(",promo_rate=%g,cms_alloc_rate=%g",
540 promotion_rate(), cms_allocation_rate());
541 st->print(",cms_consumption_rate=%g,time_until_full=%g",
542 cms_consumption_rate(), time_until_cms_gen_full());
543 }
544 st->print(" ");
545 }
546 #endif // #ifndef PRODUCT
548 CMSCollector::CollectorState CMSCollector::_collectorState =
549 CMSCollector::Idling;
550 bool CMSCollector::_foregroundGCIsActive = false;
551 bool CMSCollector::_foregroundGCShouldWait = false;
553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
554 CardTableRS* ct,
555 ConcurrentMarkSweepPolicy* cp):
556 _cmsGen(cmsGen),
557 _ct(ct),
558 _ref_processor(NULL), // will be set later
559 _conc_workers(NULL), // may be set later
560 _abort_preclean(false),
561 _start_sampling(false),
562 _between_prologue_and_epilogue(false),
563 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
564 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
565 -1 /* lock-free */, "No_lock" /* dummy */),
566 _modUnionClosure(&_modUnionTable),
567 _modUnionClosurePar(&_modUnionTable),
568 // Adjust my span to cover old (cms) gen
569 _span(cmsGen->reserved()),
570 // Construct the is_alive_closure with _span & markBitMap
571 _is_alive_closure(_span, &_markBitMap),
572 _restart_addr(NULL),
573 _overflow_list(NULL),
574 _stats(cmsGen),
575 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
576 _eden_chunk_array(NULL), // may be set in ctor body
577 _eden_chunk_capacity(0), // -- ditto --
578 _eden_chunk_index(0), // -- ditto --
579 _survivor_plab_array(NULL), // -- ditto --
580 _survivor_chunk_array(NULL), // -- ditto --
581 _survivor_chunk_capacity(0), // -- ditto --
582 _survivor_chunk_index(0), // -- ditto --
583 _ser_pmc_preclean_ovflw(0),
584 _ser_kac_preclean_ovflw(0),
585 _ser_pmc_remark_ovflw(0),
586 _par_pmc_remark_ovflw(0),
587 _ser_kac_ovflw(0),
588 _par_kac_ovflw(0),
589 #ifndef PRODUCT
590 _num_par_pushes(0),
591 #endif
592 _collection_count_start(0),
593 _verifying(false),
594 _icms_start_limit(NULL),
595 _icms_stop_limit(NULL),
596 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
597 _completed_initialization(false),
598 _collector_policy(cp),
599 _should_unload_classes(CMSClassUnloadingEnabled),
600 _concurrent_cycles_since_last_unload(0),
601 _roots_scanning_options(GenCollectedHeap::SO_None),
602 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
603 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
604 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
605 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
606 _cms_start_registered(false)
607 {
608 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
609 ExplicitGCInvokesConcurrent = true;
610 }
611 // Now expand the span and allocate the collection support structures
612 // (MUT, marking bit map etc.) to cover both generations subject to
613 // collection.
615 // For use by dirty card to oop closures.
616 _cmsGen->cmsSpace()->set_collector(this);
618 // Allocate MUT and marking bit map
619 {
620 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
621 if (!_markBitMap.allocate(_span)) {
622 warning("Failed to allocate CMS Bit Map");
623 return;
624 }
625 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
626 }
627 {
628 _modUnionTable.allocate(_span);
629 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
630 }
632 if (!_markStack.allocate(MarkStackSize)) {
633 warning("Failed to allocate CMS Marking Stack");
634 return;
635 }
637 // Support for multi-threaded concurrent phases
638 if (CMSConcurrentMTEnabled) {
639 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
640 // just for now
641 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
642 }
643 if (ConcGCThreads > 1) {
644 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
645 ConcGCThreads, true);
646 if (_conc_workers == NULL) {
647 warning("GC/CMS: _conc_workers allocation failure: "
648 "forcing -CMSConcurrentMTEnabled");
649 CMSConcurrentMTEnabled = false;
650 } else {
651 _conc_workers->initialize_workers();
652 }
653 } else {
654 CMSConcurrentMTEnabled = false;
655 }
656 }
657 if (!CMSConcurrentMTEnabled) {
658 ConcGCThreads = 0;
659 } else {
660 // Turn off CMSCleanOnEnter optimization temporarily for
661 // the MT case where it's not fixed yet; see 6178663.
662 CMSCleanOnEnter = false;
663 }
664 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
665 "Inconsistency");
667 // Parallel task queues; these are shared for the
668 // concurrent and stop-world phases of CMS, but
669 // are not shared with parallel scavenge (ParNew).
670 {
671 uint i;
672 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
674 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
675 || ParallelRefProcEnabled)
676 && num_queues > 0) {
677 _task_queues = new OopTaskQueueSet(num_queues);
678 if (_task_queues == NULL) {
679 warning("task_queues allocation failure.");
680 return;
681 }
682 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
683 if (_hash_seed == NULL) {
684 warning("_hash_seed array allocation failure");
685 return;
686 }
688 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
689 for (i = 0; i < num_queues; i++) {
690 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
691 if (q == NULL) {
692 warning("work_queue allocation failure.");
693 return;
694 }
695 _task_queues->register_queue(i, q);
696 }
697 for (i = 0; i < num_queues; i++) {
698 _task_queues->queue(i)->initialize();
699 _hash_seed[i] = 17; // copied from ParNew
700 }
701 }
702 }
704 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
706 // Clip CMSBootstrapOccupancy between 0 and 100.
707 _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
709 _full_gcs_since_conc_gc = 0;
711 // Now tell CMS generations the identity of their collector
712 ConcurrentMarkSweepGeneration::set_collector(this);
714 // Create & start a CMS thread for this CMS collector
715 _cmsThread = ConcurrentMarkSweepThread::start(this);
716 assert(cmsThread() != NULL, "CMS Thread should have been created");
717 assert(cmsThread()->collector() == this,
718 "CMS Thread should refer to this gen");
719 assert(CGC_lock != NULL, "Where's the CGC_lock?");
721 // Support for parallelizing young gen rescan
722 GenCollectedHeap* gch = GenCollectedHeap::heap();
723 _young_gen = gch->prev_gen(_cmsGen);
724 if (gch->supports_inline_contig_alloc()) {
725 _top_addr = gch->top_addr();
726 _end_addr = gch->end_addr();
727 assert(_young_gen != NULL, "no _young_gen");
728 _eden_chunk_index = 0;
729 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
730 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
731 if (_eden_chunk_array == NULL) {
732 _eden_chunk_capacity = 0;
733 warning("GC/CMS: _eden_chunk_array allocation failure");
734 }
735 }
736 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
738 // Support for parallelizing survivor space rescan
739 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
740 const size_t max_plab_samples =
741 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
743 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
744 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
745 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
746 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
747 || _cursor == NULL) {
748 warning("Failed to allocate survivor plab/chunk array");
749 if (_survivor_plab_array != NULL) {
750 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
751 _survivor_plab_array = NULL;
752 }
753 if (_survivor_chunk_array != NULL) {
754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
755 _survivor_chunk_array = NULL;
756 }
757 if (_cursor != NULL) {
758 FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
759 _cursor = NULL;
760 }
761 } else {
762 _survivor_chunk_capacity = 2*max_plab_samples;
763 for (uint i = 0; i < ParallelGCThreads; i++) {
764 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
765 if (vec == NULL) {
766 warning("Failed to allocate survivor plab array");
767 for (int j = i; j > 0; j--) {
768 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
769 }
770 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
771 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
772 _survivor_plab_array = NULL;
773 _survivor_chunk_array = NULL;
774 _survivor_chunk_capacity = 0;
775 break;
776 } else {
777 ChunkArray* cur =
778 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
779 max_plab_samples);
780 assert(cur->end() == 0, "Should be 0");
781 assert(cur->array() == vec, "Should be vec");
782 assert(cur->capacity() == max_plab_samples, "Error");
783 }
784 }
785 }
786 }
787 assert( ( _survivor_plab_array != NULL
788 && _survivor_chunk_array != NULL)
789 || ( _survivor_chunk_capacity == 0
790 && _survivor_chunk_index == 0),
791 "Error");
793 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
794 _gc_counters = new CollectorCounters("CMS", 1);
795 _completed_initialization = true;
796 _inter_sweep_timer.start(); // start of time
797 }
799 size_t CMSCollector::plab_sample_minimum_size() {
800 // The default value of MinTLABSize is 2k, but there is
801 // no way to get the default value if the flag has been overridden.
802 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
803 }
805 const char* ConcurrentMarkSweepGeneration::name() const {
806 return "concurrent mark-sweep generation";
807 }
808 void ConcurrentMarkSweepGeneration::update_counters() {
809 if (UsePerfData) {
810 _space_counters->update_all();
811 _gen_counters->update_all();
812 }
813 }
815 // this is an optimized version of update_counters(). it takes the
816 // used value as a parameter rather than computing it.
817 //
818 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
819 if (UsePerfData) {
820 _space_counters->update_used(used);
821 _space_counters->update_capacity();
822 _gen_counters->update_all();
823 }
824 }
826 void ConcurrentMarkSweepGeneration::print() const {
827 Generation::print();
828 cmsSpace()->print();
829 }
831 #ifndef PRODUCT
832 void ConcurrentMarkSweepGeneration::print_statistics() {
833 cmsSpace()->printFLCensus(0);
834 }
835 #endif
837 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 if (PrintGCDetails) {
840 if (Verbose) {
841 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
842 level(), short_name(), s, used(), capacity());
843 } else {
844 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
845 level(), short_name(), s, used() / K, capacity() / K);
846 }
847 }
848 if (Verbose) {
849 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
850 gch->used(), gch->capacity());
851 } else {
852 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
853 gch->used() / K, gch->capacity() / K);
854 }
855 }
857 size_t
858 ConcurrentMarkSweepGeneration::contiguous_available() const {
859 // dld proposes an improvement in precision here. If the committed
860 // part of the space ends in a free block we should add that to
861 // uncommitted size in the calculation below. Will make this
862 // change later, staying with the approximation below for the
863 // time being. -- ysr.
864 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
865 }
867 size_t
868 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
869 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
870 }
872 size_t ConcurrentMarkSweepGeneration::max_available() const {
873 return free() + _virtual_space.uncommitted_size();
874 }
876 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
877 size_t available = max_available();
878 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
879 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
880 if (Verbose && PrintGCDetails) {
881 gclog_or_tty->print_cr(
882 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
883 "max_promo("SIZE_FORMAT")",
884 res? "":" not", available, res? ">=":"<",
885 av_promo, max_promotion_in_bytes);
886 }
887 return res;
888 }
890 // At a promotion failure dump information on block layout in heap
891 // (cms old generation).
892 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
893 if (CMSDumpAtPromotionFailure) {
894 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
895 }
896 }
898 CompactibleSpace*
899 ConcurrentMarkSweepGeneration::first_compaction_space() const {
900 return _cmsSpace;
901 }
903 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
904 // Clear the promotion information. These pointers can be adjusted
905 // along with all the other pointers into the heap but
906 // compaction is expected to be a rare event with
907 // a heap using cms so don't do it without seeing the need.
908 if (CollectedHeap::use_parallel_gc_threads()) {
909 for (uint i = 0; i < ParallelGCThreads; i++) {
910 _par_gc_thread_states[i]->promo.reset();
911 }
912 }
913 }
915 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
916 blk->do_space(_cmsSpace);
917 }
919 void ConcurrentMarkSweepGeneration::compute_new_size() {
920 assert_locked_or_safepoint(Heap_lock);
922 // If incremental collection failed, we just want to expand
923 // to the limit.
924 if (incremental_collection_failed()) {
925 clear_incremental_collection_failed();
926 grow_to_reserved();
927 return;
928 }
930 // The heap has been compacted but not reset yet.
931 // Any metric such as free() or used() will be incorrect.
933 CardGeneration::compute_new_size();
935 // Reset again after a possible resizing
936 if (did_compact()) {
937 cmsSpace()->reset_after_compaction();
938 }
939 }
941 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
942 assert_locked_or_safepoint(Heap_lock);
944 // If incremental collection failed, we just want to expand
945 // to the limit.
946 if (incremental_collection_failed()) {
947 clear_incremental_collection_failed();
948 grow_to_reserved();
949 return;
950 }
952 double free_percentage = ((double) free()) / capacity();
953 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
954 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
956 // compute expansion delta needed for reaching desired free percentage
957 if (free_percentage < desired_free_percentage) {
958 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
959 assert(desired_capacity >= capacity(), "invalid expansion size");
960 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
961 if (PrintGCDetails && Verbose) {
962 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
963 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
964 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
965 gclog_or_tty->print_cr(" Desired free fraction %f",
966 desired_free_percentage);
967 gclog_or_tty->print_cr(" Maximum free fraction %f",
968 maximum_free_percentage);
969 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
970 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
971 desired_capacity/1000);
972 int prev_level = level() - 1;
973 if (prev_level >= 0) {
974 size_t prev_size = 0;
975 GenCollectedHeap* gch = GenCollectedHeap::heap();
976 Generation* prev_gen = gch->_gens[prev_level];
977 prev_size = prev_gen->capacity();
978 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
979 prev_size/1000);
980 }
981 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
982 unsafe_max_alloc_nogc()/1000);
983 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
984 contiguous_available()/1000);
985 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
986 expand_bytes);
987 }
988 // safe if expansion fails
989 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
990 if (PrintGCDetails && Verbose) {
991 gclog_or_tty->print_cr(" Expanded free fraction %f",
992 ((double) free()) / capacity());
993 }
994 } else {
995 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
996 assert(desired_capacity <= capacity(), "invalid expansion size");
997 size_t shrink_bytes = capacity() - desired_capacity;
998 // Don't shrink unless the delta is greater than the minimum shrink we want
999 if (shrink_bytes >= MinHeapDeltaBytes) {
1000 shrink_free_list_by(shrink_bytes);
1001 }
1002 }
1003 }
1005 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1006 return cmsSpace()->freelistLock();
1007 }
1009 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1010 bool tlab) {
1011 CMSSynchronousYieldRequest yr;
1012 MutexLockerEx x(freelistLock(),
1013 Mutex::_no_safepoint_check_flag);
1014 return have_lock_and_allocate(size, tlab);
1015 }
1017 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1018 bool tlab /* ignored */) {
1019 assert_lock_strong(freelistLock());
1020 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1021 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1022 // Allocate the object live (grey) if the background collector has
1023 // started marking. This is necessary because the marker may
1024 // have passed this address and consequently this object will
1025 // not otherwise be greyed and would be incorrectly swept up.
1026 // Note that if this object contains references, the writing
1027 // of those references will dirty the card containing this object
1028 // allowing the object to be blackened (and its references scanned)
1029 // either during a preclean phase or at the final checkpoint.
1030 if (res != NULL) {
1031 // We may block here with an uninitialized object with
1032 // its mark-bit or P-bits not yet set. Such objects need
1033 // to be safely navigable by block_start().
1034 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1035 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1036 collector()->direct_allocated(res, adjustedSize);
1037 _direct_allocated_words += adjustedSize;
1038 // allocation counters
1039 NOT_PRODUCT(
1040 _numObjectsAllocated++;
1041 _numWordsAllocated += (int)adjustedSize;
1042 )
1043 }
1044 return res;
1045 }
1047 // In the case of direct allocation by mutators in a generation that
1048 // is being concurrently collected, the object must be allocated
1049 // live (grey) if the background collector has started marking.
1050 // This is necessary because the marker may
1051 // have passed this address and consequently this object will
1052 // not otherwise be greyed and would be incorrectly swept up.
1053 // Note that if this object contains references, the writing
1054 // of those references will dirty the card containing this object
1055 // allowing the object to be blackened (and its references scanned)
1056 // either during a preclean phase or at the final checkpoint.
1057 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1058 assert(_markBitMap.covers(start, size), "Out of bounds");
1059 if (_collectorState >= Marking) {
1060 MutexLockerEx y(_markBitMap.lock(),
1061 Mutex::_no_safepoint_check_flag);
1062 // [see comments preceding SweepClosure::do_blk() below for details]
1063 //
1064 // Can the P-bits be deleted now? JJJ
1065 //
1066 // 1. need to mark the object as live so it isn't collected
1067 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1068 // 3. need to mark the end of the object so marking, precleaning or sweeping
1069 // can skip over uninitialized or unparsable objects. An allocated
1070 // object is considered uninitialized for our purposes as long as
1071 // its klass word is NULL. All old gen objects are parsable
1072 // as soon as they are initialized.)
1073 _markBitMap.mark(start); // object is live
1074 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1075 _markBitMap.mark(start + size - 1);
1076 // mark end of object
1077 }
1078 // check that oop looks uninitialized
1079 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1080 }
1082 void CMSCollector::promoted(bool par, HeapWord* start,
1083 bool is_obj_array, size_t obj_size) {
1084 assert(_markBitMap.covers(start), "Out of bounds");
1085 // See comment in direct_allocated() about when objects should
1086 // be allocated live.
1087 if (_collectorState >= Marking) {
1088 // we already hold the marking bit map lock, taken in
1089 // the prologue
1090 if (par) {
1091 _markBitMap.par_mark(start);
1092 } else {
1093 _markBitMap.mark(start);
1094 }
1095 // We don't need to mark the object as uninitialized (as
1096 // in direct_allocated above) because this is being done with the
1097 // world stopped and the object will be initialized by the
1098 // time the marking, precleaning or sweeping get to look at it.
1099 // But see the code for copying objects into the CMS generation,
1100 // where we need to ensure that concurrent readers of the
1101 // block offset table are able to safely navigate a block that
1102 // is in flux from being free to being allocated (and in
1103 // transition while being copied into) and subsequently
1104 // becoming a bona-fide object when the copy/promotion is complete.
1105 assert(SafepointSynchronize::is_at_safepoint(),
1106 "expect promotion only at safepoints");
1108 if (_collectorState < Sweeping) {
1109 // Mark the appropriate cards in the modUnionTable, so that
1110 // this object gets scanned before the sweep. If this is
1111 // not done, CMS generation references in the object might
1112 // not get marked.
1113 // For the case of arrays, which are otherwise precisely
1114 // marked, we need to dirty the entire array, not just its head.
1115 if (is_obj_array) {
1116 // The [par_]mark_range() method expects mr.end() below to
1117 // be aligned to the granularity of a bit's representation
1118 // in the heap. In the case of the MUT below, that's a
1119 // card size.
1120 MemRegion mr(start,
1121 (HeapWord*)round_to((intptr_t)(start + obj_size),
1122 CardTableModRefBS::card_size /* bytes */));
1123 if (par) {
1124 _modUnionTable.par_mark_range(mr);
1125 } else {
1126 _modUnionTable.mark_range(mr);
1127 }
1128 } else { // not an obj array; we can just mark the head
1129 if (par) {
1130 _modUnionTable.par_mark(start);
1131 } else {
1132 _modUnionTable.mark(start);
1133 }
1134 }
1135 }
1136 }
1137 }
1139 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1140 {
1141 size_t delta = pointer_delta(addr, space->bottom());
1142 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1143 }
1145 void CMSCollector::icms_update_allocation_limits()
1146 {
1147 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1148 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1150 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1151 if (CMSTraceIncrementalPacing) {
1152 stats().print();
1153 }
1155 assert(duty_cycle <= 100, "invalid duty cycle");
1156 if (duty_cycle != 0) {
1157 // The duty_cycle is a percentage between 0 and 100; convert to words and
1158 // then compute the offset from the endpoints of the space.
1159 size_t free_words = eden->free() / HeapWordSize;
1160 double free_words_dbl = (double)free_words;
1161 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1162 size_t offset_words = (free_words - duty_cycle_words) / 2;
1164 _icms_start_limit = eden->top() + offset_words;
1165 _icms_stop_limit = eden->end() - offset_words;
1167 // The limits may be adjusted (shifted to the right) by
1168 // CMSIncrementalOffset, to allow the application more mutator time after a
1169 // young gen gc (when all mutators were stopped) and before CMS starts and
1170 // takes away one or more cpus.
1171 if (CMSIncrementalOffset != 0) {
1172 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1173 size_t adjustment = (size_t)adjustment_dbl;
1174 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1175 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1176 _icms_start_limit += adjustment;
1177 _icms_stop_limit = tmp_stop;
1178 }
1179 }
1180 }
1181 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1182 _icms_start_limit = _icms_stop_limit = eden->end();
1183 }
1185 // Install the new start limit.
1186 eden->set_soft_end(_icms_start_limit);
1188 if (CMSTraceIncrementalMode) {
1189 gclog_or_tty->print(" icms alloc limits: "
1190 PTR_FORMAT "," PTR_FORMAT
1191 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1192 p2i(_icms_start_limit), p2i(_icms_stop_limit),
1193 percent_of_space(eden, _icms_start_limit),
1194 percent_of_space(eden, _icms_stop_limit));
1195 if (Verbose) {
1196 gclog_or_tty->print("eden: ");
1197 eden->print_on(gclog_or_tty);
1198 }
1199 }
1200 }
1202 // Any changes here should try to maintain the invariant
1203 // that if this method is called with _icms_start_limit
1204 // and _icms_stop_limit both NULL, then it should return NULL
1205 // and not notify the icms thread.
1206 HeapWord*
1207 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1208 size_t word_size)
1209 {
1210 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1211 // nop.
1212 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1213 if (top <= _icms_start_limit) {
1214 if (CMSTraceIncrementalMode) {
1215 space->print_on(gclog_or_tty);
1216 gclog_or_tty->stamp();
1217 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1218 ", new limit=" PTR_FORMAT
1219 " (" SIZE_FORMAT "%%)",
1220 p2i(top), p2i(_icms_stop_limit),
1221 percent_of_space(space, _icms_stop_limit));
1222 }
1223 ConcurrentMarkSweepThread::start_icms();
1224 assert(top < _icms_stop_limit, "Tautology");
1225 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1226 return _icms_stop_limit;
1227 }
1229 // The allocation will cross both the _start and _stop limits, so do the
1230 // stop notification also and return end().
1231 if (CMSTraceIncrementalMode) {
1232 space->print_on(gclog_or_tty);
1233 gclog_or_tty->stamp();
1234 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1235 ", new limit=" PTR_FORMAT
1236 " (" SIZE_FORMAT "%%)",
1237 p2i(top), p2i(space->end()),
1238 percent_of_space(space, space->end()));
1239 }
1240 ConcurrentMarkSweepThread::stop_icms();
1241 return space->end();
1242 }
1244 if (top <= _icms_stop_limit) {
1245 if (CMSTraceIncrementalMode) {
1246 space->print_on(gclog_or_tty);
1247 gclog_or_tty->stamp();
1248 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1249 ", new limit=" PTR_FORMAT
1250 " (" SIZE_FORMAT "%%)",
1251 top, space->end(),
1252 percent_of_space(space, space->end()));
1253 }
1254 ConcurrentMarkSweepThread::stop_icms();
1255 return space->end();
1256 }
1258 if (CMSTraceIncrementalMode) {
1259 space->print_on(gclog_or_tty);
1260 gclog_or_tty->stamp();
1261 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1262 ", new limit=" PTR_FORMAT,
1263 top, NULL);
1264 }
1265 }
1267 return NULL;
1268 }
1270 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1271 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1272 // allocate, copy and if necessary update promoinfo --
1273 // delegate to underlying space.
1274 assert_lock_strong(freelistLock());
1276 #ifndef PRODUCT
1277 if (Universe::heap()->promotion_should_fail()) {
1278 return NULL;
1279 }
1280 #endif // #ifndef PRODUCT
1282 oop res = _cmsSpace->promote(obj, obj_size);
1283 if (res == NULL) {
1284 // expand and retry
1285 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1286 expand(s*HeapWordSize, MinHeapDeltaBytes,
1287 CMSExpansionCause::_satisfy_promotion);
1288 // Since there's currently no next generation, we don't try to promote
1289 // into a more senior generation.
1290 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1291 "is made to pass on a possibly failing "
1292 "promotion to next generation");
1293 res = _cmsSpace->promote(obj, obj_size);
1294 }
1295 if (res != NULL) {
1296 // See comment in allocate() about when objects should
1297 // be allocated live.
1298 assert(obj->is_oop(), "Will dereference klass pointer below");
1299 collector()->promoted(false, // Not parallel
1300 (HeapWord*)res, obj->is_objArray(), obj_size);
1301 // promotion counters
1302 NOT_PRODUCT(
1303 _numObjectsPromoted++;
1304 _numWordsPromoted +=
1305 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1306 )
1307 }
1308 return res;
1309 }
1312 HeapWord*
1313 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1314 HeapWord* top,
1315 size_t word_sz)
1316 {
1317 return collector()->allocation_limit_reached(space, top, word_sz);
1318 }
1320 // IMPORTANT: Notes on object size recognition in CMS.
1321 // ---------------------------------------------------
1322 // A block of storage in the CMS generation is always in
1323 // one of three states. A free block (FREE), an allocated
1324 // object (OBJECT) whose size() method reports the correct size,
1325 // and an intermediate state (TRANSIENT) in which its size cannot
1326 // be accurately determined.
1327 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1328 // -----------------------------------------------------
1329 // FREE: klass_word & 1 == 1; mark_word holds block size
1330 //
1331 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1332 // obj->size() computes correct size
1333 //
1334 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1335 //
1336 // STATE IDENTIFICATION: (64 bit+COOPS)
1337 // ------------------------------------
1338 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1339 //
1340 // OBJECT: klass_word installed; klass_word != 0;
1341 // obj->size() computes correct size
1342 //
1343 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1344 //
1345 //
1346 // STATE TRANSITION DIAGRAM
1347 //
1348 // mut / parnew mut / parnew
1349 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1350 // ^ |
1351 // |------------------------ DEAD <------------------------------------|
1352 // sweep mut
1353 //
1354 // While a block is in TRANSIENT state its size cannot be determined
1355 // so readers will either need to come back later or stall until
1356 // the size can be determined. Note that for the case of direct
1357 // allocation, P-bits, when available, may be used to determine the
1358 // size of an object that may not yet have been initialized.
1360 // Things to support parallel young-gen collection.
1361 oop
1362 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1363 oop old, markOop m,
1364 size_t word_sz) {
1365 #ifndef PRODUCT
1366 if (Universe::heap()->promotion_should_fail()) {
1367 return NULL;
1368 }
1369 #endif // #ifndef PRODUCT
1371 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1372 PromotionInfo* promoInfo = &ps->promo;
1373 // if we are tracking promotions, then first ensure space for
1374 // promotion (including spooling space for saving header if necessary).
1375 // then allocate and copy, then track promoted info if needed.
1376 // When tracking (see PromotionInfo::track()), the mark word may
1377 // be displaced and in this case restoration of the mark word
1378 // occurs in the (oop_since_save_marks_)iterate phase.
1379 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1380 // Out of space for allocating spooling buffers;
1381 // try expanding and allocating spooling buffers.
1382 if (!expand_and_ensure_spooling_space(promoInfo)) {
1383 return NULL;
1384 }
1385 }
1386 assert(promoInfo->has_spooling_space(), "Control point invariant");
1387 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1388 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1389 if (obj_ptr == NULL) {
1390 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1391 if (obj_ptr == NULL) {
1392 return NULL;
1393 }
1394 }
1395 oop obj = oop(obj_ptr);
1396 OrderAccess::storestore();
1397 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1398 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1399 // IMPORTANT: See note on object initialization for CMS above.
1400 // Otherwise, copy the object. Here we must be careful to insert the
1401 // klass pointer last, since this marks the block as an allocated object.
1402 // Except with compressed oops it's the mark word.
1403 HeapWord* old_ptr = (HeapWord*)old;
1404 // Restore the mark word copied above.
1405 obj->set_mark(m);
1406 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1407 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1408 OrderAccess::storestore();
1410 if (UseCompressedClassPointers) {
1411 // Copy gap missed by (aligned) header size calculation below
1412 obj->set_klass_gap(old->klass_gap());
1413 }
1414 if (word_sz > (size_t)oopDesc::header_size()) {
1415 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1416 obj_ptr + oopDesc::header_size(),
1417 word_sz - oopDesc::header_size());
1418 }
1420 // Now we can track the promoted object, if necessary. We take care
1421 // to delay the transition from uninitialized to full object
1422 // (i.e., insertion of klass pointer) until after, so that it
1423 // atomically becomes a promoted object.
1424 if (promoInfo->tracking()) {
1425 promoInfo->track((PromotedObject*)obj, old->klass());
1426 }
1427 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1428 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1429 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1431 // Finally, install the klass pointer (this should be volatile).
1432 OrderAccess::storestore();
1433 obj->set_klass(old->klass());
1434 // We should now be able to calculate the right size for this object
1435 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1437 collector()->promoted(true, // parallel
1438 obj_ptr, old->is_objArray(), word_sz);
1440 NOT_PRODUCT(
1441 Atomic::inc_ptr(&_numObjectsPromoted);
1442 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1443 )
1445 return obj;
1446 }
1448 void
1449 ConcurrentMarkSweepGeneration::
1450 par_promote_alloc_undo(int thread_num,
1451 HeapWord* obj, size_t word_sz) {
1452 // CMS does not support promotion undo.
1453 ShouldNotReachHere();
1454 }
1456 void
1457 ConcurrentMarkSweepGeneration::
1458 par_promote_alloc_done(int thread_num) {
1459 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1460 ps->lab.retire(thread_num);
1461 }
1463 void
1464 ConcurrentMarkSweepGeneration::
1465 par_oop_since_save_marks_iterate_done(int thread_num) {
1466 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1467 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1468 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1469 }
1471 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1472 size_t size,
1473 bool tlab)
1474 {
1475 // We allow a STW collection only if a full
1476 // collection was requested.
1477 return full || should_allocate(size, tlab); // FIX ME !!!
1478 // This and promotion failure handling are connected at the
1479 // hip and should be fixed by untying them.
1480 }
1482 bool CMSCollector::shouldConcurrentCollect() {
1483 if (_full_gc_requested) {
1484 if (Verbose && PrintGCDetails) {
1485 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1486 " gc request (or gc_locker)");
1487 }
1488 return true;
1489 }
1491 // For debugging purposes, change the type of collection.
1492 // If the rotation is not on the concurrent collection
1493 // type, don't start a concurrent collection.
1494 NOT_PRODUCT(
1495 if (RotateCMSCollectionTypes &&
1496 (_cmsGen->debug_collection_type() !=
1497 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1498 assert(_cmsGen->debug_collection_type() !=
1499 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1500 "Bad cms collection type");
1501 return false;
1502 }
1503 )
1505 FreelistLocker x(this);
1506 // ------------------------------------------------------------------
1507 // Print out lots of information which affects the initiation of
1508 // a collection.
1509 if (PrintCMSInitiationStatistics && stats().valid()) {
1510 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1511 gclog_or_tty->stamp();
1512 gclog_or_tty->cr();
1513 stats().print_on(gclog_or_tty);
1514 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1515 stats().time_until_cms_gen_full());
1516 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1517 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1518 _cmsGen->contiguous_available());
1519 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1520 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1521 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1522 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1523 gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1524 gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1525 gclog_or_tty->print_cr("metadata initialized %d",
1526 MetaspaceGC::should_concurrent_collect());
1527 }
1528 // ------------------------------------------------------------------
1530 // If the estimated time to complete a cms collection (cms_duration())
1531 // is less than the estimated time remaining until the cms generation
1532 // is full, start a collection.
1533 if (!UseCMSInitiatingOccupancyOnly) {
1534 if (stats().valid()) {
1535 if (stats().time_until_cms_start() == 0.0) {
1536 return true;
1537 }
1538 } else {
1539 // We want to conservatively collect somewhat early in order
1540 // to try and "bootstrap" our CMS/promotion statistics;
1541 // this branch will not fire after the first successful CMS
1542 // collection because the stats should then be valid.
1543 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1544 if (Verbose && PrintGCDetails) {
1545 gclog_or_tty->print_cr(
1546 " CMSCollector: collect for bootstrapping statistics:"
1547 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1548 _bootstrap_occupancy);
1549 }
1550 return true;
1551 }
1552 }
1553 }
1555 // Otherwise, we start a collection cycle if
1556 // old gen want a collection cycle started. Each may use
1557 // an appropriate criterion for making this decision.
1558 // XXX We need to make sure that the gen expansion
1559 // criterion dovetails well with this. XXX NEED TO FIX THIS
1560 if (_cmsGen->should_concurrent_collect()) {
1561 if (Verbose && PrintGCDetails) {
1562 gclog_or_tty->print_cr("CMS old gen initiated");
1563 }
1564 return true;
1565 }
1567 // We start a collection if we believe an incremental collection may fail;
1568 // this is not likely to be productive in practice because it's probably too
1569 // late anyway.
1570 GenCollectedHeap* gch = GenCollectedHeap::heap();
1571 assert(gch->collector_policy()->is_two_generation_policy(),
1572 "You may want to check the correctness of the following");
1573 if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1574 if (Verbose && PrintGCDetails) {
1575 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1576 }
1577 return true;
1578 }
1580 if (MetaspaceGC::should_concurrent_collect()) {
1581 if (Verbose && PrintGCDetails) {
1582 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1583 }
1584 return true;
1585 }
1587 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1588 if (CMSTriggerInterval >= 0) {
1589 if (CMSTriggerInterval == 0) {
1590 // Trigger always
1591 return true;
1592 }
1594 // Check the CMS time since begin (we do not check the stats validity
1595 // as we want to be able to trigger the first CMS cycle as well)
1596 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1597 if (Verbose && PrintGCDetails) {
1598 if (stats().valid()) {
1599 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1600 stats().cms_time_since_begin());
1601 } else {
1602 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1603 }
1604 }
1605 return true;
1606 }
1607 }
1609 return false;
1610 }
1612 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1614 // Clear _expansion_cause fields of constituent generations
1615 void CMSCollector::clear_expansion_cause() {
1616 _cmsGen->clear_expansion_cause();
1617 }
1619 // We should be conservative in starting a collection cycle. To
1620 // start too eagerly runs the risk of collecting too often in the
1621 // extreme. To collect too rarely falls back on full collections,
1622 // which works, even if not optimum in terms of concurrent work.
1623 // As a work around for too eagerly collecting, use the flag
1624 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1625 // giving the user an easily understandable way of controlling the
1626 // collections.
1627 // We want to start a new collection cycle if any of the following
1628 // conditions hold:
1629 // . our current occupancy exceeds the configured initiating occupancy
1630 // for this generation, or
1631 // . we recently needed to expand this space and have not, since that
1632 // expansion, done a collection of this generation, or
1633 // . the underlying space believes that it may be a good idea to initiate
1634 // a concurrent collection (this may be based on criteria such as the
1635 // following: the space uses linear allocation and linear allocation is
1636 // going to fail, or there is believed to be excessive fragmentation in
1637 // the generation, etc... or ...
1638 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1639 // the case of the old generation; see CR 6543076):
1640 // we may be approaching a point at which allocation requests may fail because
1641 // we will be out of sufficient free space given allocation rate estimates.]
1642 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1644 assert_lock_strong(freelistLock());
1645 if (occupancy() > initiating_occupancy()) {
1646 if (PrintGCDetails && Verbose) {
1647 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1648 short_name(), occupancy(), initiating_occupancy());
1649 }
1650 return true;
1651 }
1652 if (UseCMSInitiatingOccupancyOnly) {
1653 return false;
1654 }
1655 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1656 if (PrintGCDetails && Verbose) {
1657 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1658 short_name());
1659 }
1660 return true;
1661 }
1662 if (_cmsSpace->should_concurrent_collect()) {
1663 if (PrintGCDetails && Verbose) {
1664 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1665 short_name());
1666 }
1667 return true;
1668 }
1669 return false;
1670 }
1672 void ConcurrentMarkSweepGeneration::collect(bool full,
1673 bool clear_all_soft_refs,
1674 size_t size,
1675 bool tlab)
1676 {
1677 collector()->collect(full, clear_all_soft_refs, size, tlab);
1678 }
1680 void CMSCollector::collect(bool full,
1681 bool clear_all_soft_refs,
1682 size_t size,
1683 bool tlab)
1684 {
1685 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1686 // For debugging purposes skip the collection if the state
1687 // is not currently idle
1688 if (TraceCMSState) {
1689 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1690 Thread::current(), full, _collectorState);
1691 }
1692 return;
1693 }
1695 // The following "if" branch is present for defensive reasons.
1696 // In the current uses of this interface, it can be replaced with:
1697 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1698 // But I am not placing that assert here to allow future
1699 // generality in invoking this interface.
1700 if (GC_locker::is_active()) {
1701 // A consistency test for GC_locker
1702 assert(GC_locker::needs_gc(), "Should have been set already");
1703 // Skip this foreground collection, instead
1704 // expanding the heap if necessary.
1705 // Need the free list locks for the call to free() in compute_new_size()
1706 compute_new_size();
1707 return;
1708 }
1709 acquire_control_and_collect(full, clear_all_soft_refs);
1710 _full_gcs_since_conc_gc++;
1711 }
1713 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1714 GenCollectedHeap* gch = GenCollectedHeap::heap();
1715 unsigned int gc_count = gch->total_full_collections();
1716 if (gc_count == full_gc_count) {
1717 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1718 _full_gc_requested = true;
1719 _full_gc_cause = cause;
1720 CGC_lock->notify(); // nudge CMS thread
1721 } else {
1722 assert(gc_count > full_gc_count, "Error: causal loop");
1723 }
1724 }
1726 bool CMSCollector::is_external_interruption() {
1727 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1728 return GCCause::is_user_requested_gc(cause) ||
1729 GCCause::is_serviceability_requested_gc(cause);
1730 }
1732 void CMSCollector::report_concurrent_mode_interruption() {
1733 if (is_external_interruption()) {
1734 if (PrintGCDetails) {
1735 gclog_or_tty->print(" (concurrent mode interrupted)");
1736 }
1737 } else {
1738 if (PrintGCDetails) {
1739 gclog_or_tty->print(" (concurrent mode failure)");
1740 }
1741 _gc_tracer_cm->report_concurrent_mode_failure();
1742 }
1743 }
1746 // The foreground and background collectors need to coordinate in order
1747 // to make sure that they do not mutually interfere with CMS collections.
1748 // When a background collection is active,
1749 // the foreground collector may need to take over (preempt) and
1750 // synchronously complete an ongoing collection. Depending on the
1751 // frequency of the background collections and the heap usage
1752 // of the application, this preemption can be seldom or frequent.
1753 // There are only certain
1754 // points in the background collection that the "collection-baton"
1755 // can be passed to the foreground collector.
1756 //
1757 // The foreground collector will wait for the baton before
1758 // starting any part of the collection. The foreground collector
1759 // will only wait at one location.
1760 //
1761 // The background collector will yield the baton before starting a new
1762 // phase of the collection (e.g., before initial marking, marking from roots,
1763 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1764 // of the loop which switches the phases. The background collector does some
1765 // of the phases (initial mark, final re-mark) with the world stopped.
1766 // Because of locking involved in stopping the world,
1767 // the foreground collector should not block waiting for the background
1768 // collector when it is doing a stop-the-world phase. The background
1769 // collector will yield the baton at an additional point just before
1770 // it enters a stop-the-world phase. Once the world is stopped, the
1771 // background collector checks the phase of the collection. If the
1772 // phase has not changed, it proceeds with the collection. If the
1773 // phase has changed, it skips that phase of the collection. See
1774 // the comments on the use of the Heap_lock in collect_in_background().
1775 //
1776 // Variable used in baton passing.
1777 // _foregroundGCIsActive - Set to true by the foreground collector when
1778 // it wants the baton. The foreground clears it when it has finished
1779 // the collection.
1780 // _foregroundGCShouldWait - Set to true by the background collector
1781 // when it is running. The foreground collector waits while
1782 // _foregroundGCShouldWait is true.
1783 // CGC_lock - monitor used to protect access to the above variables
1784 // and to notify the foreground and background collectors.
1785 // _collectorState - current state of the CMS collection.
1786 //
1787 // The foreground collector
1788 // acquires the CGC_lock
1789 // sets _foregroundGCIsActive
1790 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1791 // various locks acquired in preparation for the collection
1792 // are released so as not to block the background collector
1793 // that is in the midst of a collection
1794 // proceeds with the collection
1795 // clears _foregroundGCIsActive
1796 // returns
1797 //
1798 // The background collector in a loop iterating on the phases of the
1799 // collection
1800 // acquires the CGC_lock
1801 // sets _foregroundGCShouldWait
1802 // if _foregroundGCIsActive is set
1803 // clears _foregroundGCShouldWait, notifies _CGC_lock
1804 // waits on _CGC_lock for _foregroundGCIsActive to become false
1805 // and exits the loop.
1806 // otherwise
1807 // proceed with that phase of the collection
1808 // if the phase is a stop-the-world phase,
1809 // yield the baton once more just before enqueueing
1810 // the stop-world CMS operation (executed by the VM thread).
1811 // returns after all phases of the collection are done
1812 //
1814 void CMSCollector::acquire_control_and_collect(bool full,
1815 bool clear_all_soft_refs) {
1816 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1817 assert(!Thread::current()->is_ConcurrentGC_thread(),
1818 "shouldn't try to acquire control from self!");
1820 // Start the protocol for acquiring control of the
1821 // collection from the background collector (aka CMS thread).
1822 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1823 "VM thread should have CMS token");
1824 // Remember the possibly interrupted state of an ongoing
1825 // concurrent collection
1826 CollectorState first_state = _collectorState;
1828 // Signal to a possibly ongoing concurrent collection that
1829 // we want to do a foreground collection.
1830 _foregroundGCIsActive = true;
1832 // Disable incremental mode during a foreground collection.
1833 ICMSDisabler icms_disabler;
1835 // release locks and wait for a notify from the background collector
1836 // releasing the locks in only necessary for phases which
1837 // do yields to improve the granularity of the collection.
1838 assert_lock_strong(bitMapLock());
1839 // We need to lock the Free list lock for the space that we are
1840 // currently collecting.
1841 assert(haveFreelistLocks(), "Must be holding free list locks");
1842 bitMapLock()->unlock();
1843 releaseFreelistLocks();
1844 {
1845 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1846 if (_foregroundGCShouldWait) {
1847 // We are going to be waiting for action for the CMS thread;
1848 // it had better not be gone (for instance at shutdown)!
1849 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1850 "CMS thread must be running");
1851 // Wait here until the background collector gives us the go-ahead
1852 ConcurrentMarkSweepThread::clear_CMS_flag(
1853 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1854 // Get a possibly blocked CMS thread going:
1855 // Note that we set _foregroundGCIsActive true above,
1856 // without protection of the CGC_lock.
1857 CGC_lock->notify();
1858 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1859 "Possible deadlock");
1860 while (_foregroundGCShouldWait) {
1861 // wait for notification
1862 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1863 // Possibility of delay/starvation here, since CMS token does
1864 // not know to give priority to VM thread? Actually, i think
1865 // there wouldn't be any delay/starvation, but the proof of
1866 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1867 }
1868 ConcurrentMarkSweepThread::set_CMS_flag(
1869 ConcurrentMarkSweepThread::CMS_vm_has_token);
1870 }
1871 }
1872 // The CMS_token is already held. Get back the other locks.
1873 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1874 "VM thread should have CMS token");
1875 getFreelistLocks();
1876 bitMapLock()->lock_without_safepoint_check();
1877 if (TraceCMSState) {
1878 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1879 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1880 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1881 }
1883 // Check if we need to do a compaction, or if not, whether
1884 // we need to start the mark-sweep from scratch.
1885 bool should_compact = false;
1886 bool should_start_over = false;
1887 decide_foreground_collection_type(clear_all_soft_refs,
1888 &should_compact, &should_start_over);
1890 NOT_PRODUCT(
1891 if (RotateCMSCollectionTypes) {
1892 if (_cmsGen->debug_collection_type() ==
1893 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1894 should_compact = true;
1895 } else if (_cmsGen->debug_collection_type() ==
1896 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1897 should_compact = false;
1898 }
1899 }
1900 )
1902 if (first_state > Idling) {
1903 report_concurrent_mode_interruption();
1904 }
1906 set_did_compact(should_compact);
1907 if (should_compact) {
1908 // If the collection is being acquired from the background
1909 // collector, there may be references on the discovered
1910 // references lists that have NULL referents (being those
1911 // that were concurrently cleared by a mutator) or
1912 // that are no longer active (having been enqueued concurrently
1913 // by the mutator).
1914 // Scrub the list of those references because Mark-Sweep-Compact
1915 // code assumes referents are not NULL and that all discovered
1916 // Reference objects are active.
1917 ref_processor()->clean_up_discovered_references();
1919 if (first_state > Idling) {
1920 save_heap_summary();
1921 }
1923 do_compaction_work(clear_all_soft_refs);
1925 // Has the GC time limit been exceeded?
1926 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1927 size_t max_eden_size = young_gen->max_capacity() -
1928 young_gen->to()->capacity() -
1929 young_gen->from()->capacity();
1930 GenCollectedHeap* gch = GenCollectedHeap::heap();
1931 GCCause::Cause gc_cause = gch->gc_cause();
1932 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1933 young_gen->eden()->used(),
1934 _cmsGen->max_capacity(),
1935 max_eden_size,
1936 full,
1937 gc_cause,
1938 gch->collector_policy());
1939 } else {
1940 do_mark_sweep_work(clear_all_soft_refs, first_state,
1941 should_start_over);
1942 }
1943 // Reset the expansion cause, now that we just completed
1944 // a collection cycle.
1945 clear_expansion_cause();
1946 _foregroundGCIsActive = false;
1947 return;
1948 }
1950 // Resize the tenured generation
1951 // after obtaining the free list locks for the
1952 // two generations.
1953 void CMSCollector::compute_new_size() {
1954 assert_locked_or_safepoint(Heap_lock);
1955 FreelistLocker z(this);
1956 MetaspaceGC::compute_new_size();
1957 _cmsGen->compute_new_size_free_list();
1958 }
1960 // A work method used by foreground collection to determine
1961 // what type of collection (compacting or not, continuing or fresh)
1962 // it should do.
1963 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1964 // and CMSCompactWhenClearAllSoftRefs the default in the future
1965 // and do away with the flags after a suitable period.
1966 void CMSCollector::decide_foreground_collection_type(
1967 bool clear_all_soft_refs, bool* should_compact,
1968 bool* should_start_over) {
1969 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1970 // flag is set, and we have either requested a System.gc() or
1971 // the number of full gc's since the last concurrent cycle
1972 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1973 // or if an incremental collection has failed
1974 GenCollectedHeap* gch = GenCollectedHeap::heap();
1975 assert(gch->collector_policy()->is_two_generation_policy(),
1976 "You may want to check the correctness of the following");
1977 // Inform cms gen if this was due to partial collection failing.
1978 // The CMS gen may use this fact to determine its expansion policy.
1979 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1980 assert(!_cmsGen->incremental_collection_failed(),
1981 "Should have been noticed, reacted to and cleared");
1982 _cmsGen->set_incremental_collection_failed();
1983 }
1984 *should_compact =
1985 UseCMSCompactAtFullCollection &&
1986 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1987 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1988 gch->incremental_collection_will_fail(true /* consult_young */));
1989 *should_start_over = false;
1990 if (clear_all_soft_refs && !*should_compact) {
1991 // We are about to do a last ditch collection attempt
1992 // so it would normally make sense to do a compaction
1993 // to reclaim as much space as possible.
1994 if (CMSCompactWhenClearAllSoftRefs) {
1995 // Default: The rationale is that in this case either
1996 // we are past the final marking phase, in which case
1997 // we'd have to start over, or so little has been done
1998 // that there's little point in saving that work. Compaction
1999 // appears to be the sensible choice in either case.
2000 *should_compact = true;
2001 } else {
2002 // We have been asked to clear all soft refs, but not to
2003 // compact. Make sure that we aren't past the final checkpoint
2004 // phase, for that is where we process soft refs. If we are already
2005 // past that phase, we'll need to redo the refs discovery phase and
2006 // if necessary clear soft refs that weren't previously
2007 // cleared. We do so by remembering the phase in which
2008 // we came in, and if we are past the refs processing
2009 // phase, we'll choose to just redo the mark-sweep
2010 // collection from scratch.
2011 if (_collectorState > FinalMarking) {
2012 // We are past the refs processing phase;
2013 // start over and do a fresh synchronous CMS cycle
2014 _collectorState = Resetting; // skip to reset to start new cycle
2015 reset(false /* == !asynch */);
2016 *should_start_over = true;
2017 } // else we can continue a possibly ongoing current cycle
2018 }
2019 }
2020 }
2022 // A work method used by the foreground collector to do
2023 // a mark-sweep-compact.
2024 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2025 GenCollectedHeap* gch = GenCollectedHeap::heap();
2027 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2028 gc_timer->register_gc_start();
2030 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2031 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2033 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
2034 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2035 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2036 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2037 }
2039 // Sample collection interval time and reset for collection pause.
2040 if (UseAdaptiveSizePolicy) {
2041 size_policy()->msc_collection_begin();
2042 }
2044 // Temporarily widen the span of the weak reference processing to
2045 // the entire heap.
2046 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2047 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2048 // Temporarily, clear the "is_alive_non_header" field of the
2049 // reference processor.
2050 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2051 // Temporarily make reference _processing_ single threaded (non-MT).
2052 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2053 // Temporarily make refs discovery atomic
2054 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2055 // Temporarily make reference _discovery_ single threaded (non-MT)
2056 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2058 ref_processor()->set_enqueuing_is_done(false);
2059 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2060 ref_processor()->setup_policy(clear_all_soft_refs);
2061 // If an asynchronous collection finishes, the _modUnionTable is
2062 // all clear. If we are assuming the collection from an asynchronous
2063 // collection, clear the _modUnionTable.
2064 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2065 "_modUnionTable should be clear if the baton was not passed");
2066 _modUnionTable.clear_all();
2067 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2068 "mod union for klasses should be clear if the baton was passed");
2069 _ct->klass_rem_set()->clear_mod_union();
2071 // We must adjust the allocation statistics being maintained
2072 // in the free list space. We do so by reading and clearing
2073 // the sweep timer and updating the block flux rate estimates below.
2074 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2075 if (_inter_sweep_timer.is_active()) {
2076 _inter_sweep_timer.stop();
2077 // Note that we do not use this sample to update the _inter_sweep_estimate.
2078 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2079 _inter_sweep_estimate.padded_average(),
2080 _intra_sweep_estimate.padded_average());
2081 }
2083 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2084 ref_processor(), clear_all_soft_refs);
2085 #ifdef ASSERT
2086 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2087 size_t free_size = cms_space->free();
2088 assert(free_size ==
2089 pointer_delta(cms_space->end(), cms_space->compaction_top())
2090 * HeapWordSize,
2091 "All the free space should be compacted into one chunk at top");
2092 assert(cms_space->dictionary()->total_chunk_size(
2093 debug_only(cms_space->freelistLock())) == 0 ||
2094 cms_space->totalSizeInIndexedFreeLists() == 0,
2095 "All the free space should be in a single chunk");
2096 size_t num = cms_space->totalCount();
2097 assert((free_size == 0 && num == 0) ||
2098 (free_size > 0 && (num == 1 || num == 2)),
2099 "There should be at most 2 free chunks after compaction");
2100 #endif // ASSERT
2101 _collectorState = Resetting;
2102 assert(_restart_addr == NULL,
2103 "Should have been NULL'd before baton was passed");
2104 reset(false /* == !asynch */);
2105 _cmsGen->reset_after_compaction();
2106 _concurrent_cycles_since_last_unload = 0;
2108 // Clear any data recorded in the PLAB chunk arrays.
2109 if (_survivor_plab_array != NULL) {
2110 reset_survivor_plab_arrays();
2111 }
2113 // Adjust the per-size allocation stats for the next epoch.
2114 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2115 // Restart the "inter sweep timer" for the next epoch.
2116 _inter_sweep_timer.reset();
2117 _inter_sweep_timer.start();
2119 // Sample collection pause time and reset for collection interval.
2120 if (UseAdaptiveSizePolicy) {
2121 size_policy()->msc_collection_end(gch->gc_cause());
2122 }
2124 gc_timer->register_gc_end();
2126 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2128 // For a mark-sweep-compact, compute_new_size() will be called
2129 // in the heap's do_collection() method.
2130 }
2132 // A work method used by the foreground collector to do
2133 // a mark-sweep, after taking over from a possibly on-going
2134 // concurrent mark-sweep collection.
2135 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2136 CollectorState first_state, bool should_start_over) {
2137 if (PrintGC && Verbose) {
2138 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2139 "collector with count %d",
2140 _full_gcs_since_conc_gc);
2141 }
2142 switch (_collectorState) {
2143 case Idling:
2144 if (first_state == Idling || should_start_over) {
2145 // The background GC was not active, or should
2146 // restarted from scratch; start the cycle.
2147 _collectorState = InitialMarking;
2148 }
2149 // If first_state was not Idling, then a background GC
2150 // was in progress and has now finished. No need to do it
2151 // again. Leave the state as Idling.
2152 break;
2153 case Precleaning:
2154 // In the foreground case don't do the precleaning since
2155 // it is not done concurrently and there is extra work
2156 // required.
2157 _collectorState = FinalMarking;
2158 }
2159 collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2161 // For a mark-sweep, compute_new_size() will be called
2162 // in the heap's do_collection() method.
2163 }
2166 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2167 DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2168 EdenSpace* eden_space = dng->eden();
2169 ContiguousSpace* from_space = dng->from();
2170 ContiguousSpace* to_space = dng->to();
2171 // Eden
2172 if (_eden_chunk_array != NULL) {
2173 gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2174 eden_space->bottom(), eden_space->top(),
2175 eden_space->end(), eden_space->capacity());
2176 gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2177 "_eden_chunk_capacity=" SIZE_FORMAT,
2178 _eden_chunk_index, _eden_chunk_capacity);
2179 for (size_t i = 0; i < _eden_chunk_index; i++) {
2180 gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2181 i, _eden_chunk_array[i]);
2182 }
2183 }
2184 // Survivor
2185 if (_survivor_chunk_array != NULL) {
2186 gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2187 from_space->bottom(), from_space->top(),
2188 from_space->end(), from_space->capacity());
2189 gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2190 "_survivor_chunk_capacity=" SIZE_FORMAT,
2191 _survivor_chunk_index, _survivor_chunk_capacity);
2192 for (size_t i = 0; i < _survivor_chunk_index; i++) {
2193 gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2194 i, _survivor_chunk_array[i]);
2195 }
2196 }
2197 }
2199 void CMSCollector::getFreelistLocks() const {
2200 // Get locks for all free lists in all generations that this
2201 // collector is responsible for
2202 _cmsGen->freelistLock()->lock_without_safepoint_check();
2203 }
2205 void CMSCollector::releaseFreelistLocks() const {
2206 // Release locks for all free lists in all generations that this
2207 // collector is responsible for
2208 _cmsGen->freelistLock()->unlock();
2209 }
2211 bool CMSCollector::haveFreelistLocks() const {
2212 // Check locks for all free lists in all generations that this
2213 // collector is responsible for
2214 assert_lock_strong(_cmsGen->freelistLock());
2215 PRODUCT_ONLY(ShouldNotReachHere());
2216 return true;
2217 }
2219 // A utility class that is used by the CMS collector to
2220 // temporarily "release" the foreground collector from its
2221 // usual obligation to wait for the background collector to
2222 // complete an ongoing phase before proceeding.
2223 class ReleaseForegroundGC: public StackObj {
2224 private:
2225 CMSCollector* _c;
2226 public:
2227 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2228 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2229 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2230 // allow a potentially blocked foreground collector to proceed
2231 _c->_foregroundGCShouldWait = false;
2232 if (_c->_foregroundGCIsActive) {
2233 CGC_lock->notify();
2234 }
2235 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2236 "Possible deadlock");
2237 }
2239 ~ReleaseForegroundGC() {
2240 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2241 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2242 _c->_foregroundGCShouldWait = true;
2243 }
2244 };
2246 // There are separate collect_in_background and collect_in_foreground because of
2247 // the different locking requirements of the background collector and the
2248 // foreground collector. There was originally an attempt to share
2249 // one "collect" method between the background collector and the foreground
2250 // collector but the if-then-else required made it cleaner to have
2251 // separate methods.
2252 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2253 assert(Thread::current()->is_ConcurrentGC_thread(),
2254 "A CMS asynchronous collection is only allowed on a CMS thread.");
2256 GenCollectedHeap* gch = GenCollectedHeap::heap();
2257 {
2258 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2259 MutexLockerEx hl(Heap_lock, safepoint_check);
2260 FreelistLocker fll(this);
2261 MutexLockerEx x(CGC_lock, safepoint_check);
2262 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2263 // The foreground collector is active or we're
2264 // not using asynchronous collections. Skip this
2265 // background collection.
2266 assert(!_foregroundGCShouldWait, "Should be clear");
2267 return;
2268 } else {
2269 assert(_collectorState == Idling, "Should be idling before start.");
2270 _collectorState = InitialMarking;
2271 register_gc_start(cause);
2272 // Reset the expansion cause, now that we are about to begin
2273 // a new cycle.
2274 clear_expansion_cause();
2276 // Clear the MetaspaceGC flag since a concurrent collection
2277 // is starting but also clear it after the collection.
2278 MetaspaceGC::set_should_concurrent_collect(false);
2279 }
2280 // Decide if we want to enable class unloading as part of the
2281 // ensuing concurrent GC cycle.
2282 update_should_unload_classes();
2283 _full_gc_requested = false; // acks all outstanding full gc requests
2284 _full_gc_cause = GCCause::_no_gc;
2285 // Signal that we are about to start a collection
2286 gch->increment_total_full_collections(); // ... starting a collection cycle
2287 _collection_count_start = gch->total_full_collections();
2288 }
2290 // Used for PrintGC
2291 size_t prev_used;
2292 if (PrintGC && Verbose) {
2293 prev_used = _cmsGen->used(); // XXXPERM
2294 }
2296 // The change of the collection state is normally done at this level;
2297 // the exceptions are phases that are executed while the world is
2298 // stopped. For those phases the change of state is done while the
2299 // world is stopped. For baton passing purposes this allows the
2300 // background collector to finish the phase and change state atomically.
2301 // The foreground collector cannot wait on a phase that is done
2302 // while the world is stopped because the foreground collector already
2303 // has the world stopped and would deadlock.
2304 while (_collectorState != Idling) {
2305 if (TraceCMSState) {
2306 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2307 Thread::current(), _collectorState);
2308 }
2309 // The foreground collector
2310 // holds the Heap_lock throughout its collection.
2311 // holds the CMS token (but not the lock)
2312 // except while it is waiting for the background collector to yield.
2313 //
2314 // The foreground collector should be blocked (not for long)
2315 // if the background collector is about to start a phase
2316 // executed with world stopped. If the background
2317 // collector has already started such a phase, the
2318 // foreground collector is blocked waiting for the
2319 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2320 // are executed in the VM thread.
2321 //
2322 // The locking order is
2323 // PendingListLock (PLL) -- if applicable (FinalMarking)
2324 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2325 // CMS token (claimed in
2326 // stop_world_and_do() -->
2327 // safepoint_synchronize() -->
2328 // CMSThread::synchronize())
2330 {
2331 // Check if the FG collector wants us to yield.
2332 CMSTokenSync x(true); // is cms thread
2333 if (waitForForegroundGC()) {
2334 // We yielded to a foreground GC, nothing more to be
2335 // done this round.
2336 assert(_foregroundGCShouldWait == false, "We set it to false in "
2337 "waitForForegroundGC()");
2338 if (TraceCMSState) {
2339 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2340 " exiting collection CMS state %d",
2341 Thread::current(), _collectorState);
2342 }
2343 return;
2344 } else {
2345 // The background collector can run but check to see if the
2346 // foreground collector has done a collection while the
2347 // background collector was waiting to get the CGC_lock
2348 // above. If yes, break so that _foregroundGCShouldWait
2349 // is cleared before returning.
2350 if (_collectorState == Idling) {
2351 break;
2352 }
2353 }
2354 }
2356 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2357 "should be waiting");
2359 switch (_collectorState) {
2360 case InitialMarking:
2361 {
2362 ReleaseForegroundGC x(this);
2363 stats().record_cms_begin();
2364 VM_CMS_Initial_Mark initial_mark_op(this);
2365 VMThread::execute(&initial_mark_op);
2366 }
2367 // The collector state may be any legal state at this point
2368 // since the background collector may have yielded to the
2369 // foreground collector.
2370 break;
2371 case Marking:
2372 // initial marking in checkpointRootsInitialWork has been completed
2373 if (markFromRoots(true)) { // we were successful
2374 assert(_collectorState == Precleaning, "Collector state should "
2375 "have changed");
2376 } else {
2377 assert(_foregroundGCIsActive, "Internal state inconsistency");
2378 }
2379 break;
2380 case Precleaning:
2381 if (UseAdaptiveSizePolicy) {
2382 size_policy()->concurrent_precleaning_begin();
2383 }
2384 // marking from roots in markFromRoots has been completed
2385 preclean();
2386 if (UseAdaptiveSizePolicy) {
2387 size_policy()->concurrent_precleaning_end();
2388 }
2389 assert(_collectorState == AbortablePreclean ||
2390 _collectorState == FinalMarking,
2391 "Collector state should have changed");
2392 break;
2393 case AbortablePreclean:
2394 if (UseAdaptiveSizePolicy) {
2395 size_policy()->concurrent_phases_resume();
2396 }
2397 abortable_preclean();
2398 if (UseAdaptiveSizePolicy) {
2399 size_policy()->concurrent_precleaning_end();
2400 }
2401 assert(_collectorState == FinalMarking, "Collector state should "
2402 "have changed");
2403 break;
2404 case FinalMarking:
2405 {
2406 ReleaseForegroundGC x(this);
2408 VM_CMS_Final_Remark final_remark_op(this);
2409 VMThread::execute(&final_remark_op);
2410 }
2411 assert(_foregroundGCShouldWait, "block post-condition");
2412 break;
2413 case Sweeping:
2414 if (UseAdaptiveSizePolicy) {
2415 size_policy()->concurrent_sweeping_begin();
2416 }
2417 // final marking in checkpointRootsFinal has been completed
2418 sweep(true);
2419 assert(_collectorState == Resizing, "Collector state change "
2420 "to Resizing must be done under the free_list_lock");
2421 _full_gcs_since_conc_gc = 0;
2423 // Stop the timers for adaptive size policy for the concurrent phases
2424 if (UseAdaptiveSizePolicy) {
2425 size_policy()->concurrent_sweeping_end();
2426 size_policy()->concurrent_phases_end(gch->gc_cause(),
2427 gch->prev_gen(_cmsGen)->capacity(),
2428 _cmsGen->free());
2429 }
2431 case Resizing: {
2432 // Sweeping has been completed...
2433 // At this point the background collection has completed.
2434 // Don't move the call to compute_new_size() down
2435 // into code that might be executed if the background
2436 // collection was preempted.
2437 {
2438 ReleaseForegroundGC x(this); // unblock FG collection
2439 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2440 CMSTokenSync z(true); // not strictly needed.
2441 if (_collectorState == Resizing) {
2442 compute_new_size();
2443 save_heap_summary();
2444 _collectorState = Resetting;
2445 } else {
2446 assert(_collectorState == Idling, "The state should only change"
2447 " because the foreground collector has finished the collection");
2448 }
2449 }
2450 break;
2451 }
2452 case Resetting:
2453 // CMS heap resizing has been completed
2454 reset(true);
2455 assert(_collectorState == Idling, "Collector state should "
2456 "have changed");
2458 MetaspaceGC::set_should_concurrent_collect(false);
2460 stats().record_cms_end();
2461 // Don't move the concurrent_phases_end() and compute_new_size()
2462 // calls to here because a preempted background collection
2463 // has it's state set to "Resetting".
2464 break;
2465 case Idling:
2466 default:
2467 ShouldNotReachHere();
2468 break;
2469 }
2470 if (TraceCMSState) {
2471 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2472 Thread::current(), _collectorState);
2473 }
2474 assert(_foregroundGCShouldWait, "block post-condition");
2475 }
2477 // Should this be in gc_epilogue?
2478 collector_policy()->counters()->update_counters();
2480 {
2481 // Clear _foregroundGCShouldWait and, in the event that the
2482 // foreground collector is waiting, notify it, before
2483 // returning.
2484 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2485 _foregroundGCShouldWait = false;
2486 if (_foregroundGCIsActive) {
2487 CGC_lock->notify();
2488 }
2489 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2490 "Possible deadlock");
2491 }
2492 if (TraceCMSState) {
2493 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2494 " exiting collection CMS state %d",
2495 Thread::current(), _collectorState);
2496 }
2497 if (PrintGC && Verbose) {
2498 _cmsGen->print_heap_change(prev_used);
2499 }
2500 }
2502 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2503 if (!_cms_start_registered) {
2504 register_gc_start(cause);
2505 }
2506 }
2508 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2509 _cms_start_registered = true;
2510 _gc_timer_cm->register_gc_start();
2511 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2512 }
2514 void CMSCollector::register_gc_end() {
2515 if (_cms_start_registered) {
2516 report_heap_summary(GCWhen::AfterGC);
2518 _gc_timer_cm->register_gc_end();
2519 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2520 _cms_start_registered = false;
2521 }
2522 }
2524 void CMSCollector::save_heap_summary() {
2525 GenCollectedHeap* gch = GenCollectedHeap::heap();
2526 _last_heap_summary = gch->create_heap_summary();
2527 _last_metaspace_summary = gch->create_metaspace_summary();
2528 }
2530 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2531 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2532 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2533 }
2535 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2536 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2537 "Foreground collector should be waiting, not executing");
2538 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2539 "may only be done by the VM Thread with the world stopped");
2540 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2541 "VM thread should have CMS token");
2543 // The gc id is created in register_foreground_gc_start if this collection is synchronous
2544 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
2545 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2546 true, NULL, gc_id);)
2547 if (UseAdaptiveSizePolicy) {
2548 size_policy()->ms_collection_begin();
2549 }
2550 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2552 HandleMark hm; // Discard invalid handles created during verification
2554 if (VerifyBeforeGC &&
2555 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2556 Universe::verify();
2557 }
2559 // Snapshot the soft reference policy to be used in this collection cycle.
2560 ref_processor()->setup_policy(clear_all_soft_refs);
2562 // Decide if class unloading should be done
2563 update_should_unload_classes();
2565 bool init_mark_was_synchronous = false; // until proven otherwise
2566 while (_collectorState != Idling) {
2567 if (TraceCMSState) {
2568 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2569 Thread::current(), _collectorState);
2570 }
2571 switch (_collectorState) {
2572 case InitialMarking:
2573 register_foreground_gc_start(cause);
2574 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2575 checkpointRootsInitial(false);
2576 assert(_collectorState == Marking, "Collector state should have changed"
2577 " within checkpointRootsInitial()");
2578 break;
2579 case Marking:
2580 // initial marking in checkpointRootsInitialWork has been completed
2581 if (VerifyDuringGC &&
2582 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2583 Universe::verify("Verify before initial mark: ");
2584 }
2585 {
2586 bool res = markFromRoots(false);
2587 assert(res && _collectorState == FinalMarking, "Collector state should "
2588 "have changed");
2589 break;
2590 }
2591 case FinalMarking:
2592 if (VerifyDuringGC &&
2593 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2594 Universe::verify("Verify before re-mark: ");
2595 }
2596 checkpointRootsFinal(false, clear_all_soft_refs,
2597 init_mark_was_synchronous);
2598 assert(_collectorState == Sweeping, "Collector state should not "
2599 "have changed within checkpointRootsFinal()");
2600 break;
2601 case Sweeping:
2602 // final marking in checkpointRootsFinal has been completed
2603 if (VerifyDuringGC &&
2604 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2605 Universe::verify("Verify before sweep: ");
2606 }
2607 sweep(false);
2608 assert(_collectorState == Resizing, "Incorrect state");
2609 break;
2610 case Resizing: {
2611 // Sweeping has been completed; the actual resize in this case
2612 // is done separately; nothing to be done in this state.
2613 _collectorState = Resetting;
2614 break;
2615 }
2616 case Resetting:
2617 // The heap has been resized.
2618 if (VerifyDuringGC &&
2619 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2620 Universe::verify("Verify before reset: ");
2621 }
2622 save_heap_summary();
2623 reset(false);
2624 assert(_collectorState == Idling, "Collector state should "
2625 "have changed");
2626 break;
2627 case Precleaning:
2628 case AbortablePreclean:
2629 // Elide the preclean phase
2630 _collectorState = FinalMarking;
2631 break;
2632 default:
2633 ShouldNotReachHere();
2634 }
2635 if (TraceCMSState) {
2636 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2637 Thread::current(), _collectorState);
2638 }
2639 }
2641 if (UseAdaptiveSizePolicy) {
2642 GenCollectedHeap* gch = GenCollectedHeap::heap();
2643 size_policy()->ms_collection_end(gch->gc_cause());
2644 }
2646 if (VerifyAfterGC &&
2647 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2648 Universe::verify();
2649 }
2650 if (TraceCMSState) {
2651 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2652 " exiting collection CMS state %d",
2653 Thread::current(), _collectorState);
2654 }
2655 }
2657 bool CMSCollector::waitForForegroundGC() {
2658 bool res = false;
2659 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2660 "CMS thread should have CMS token");
2661 // Block the foreground collector until the
2662 // background collectors decides whether to
2663 // yield.
2664 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2665 _foregroundGCShouldWait = true;
2666 if (_foregroundGCIsActive) {
2667 // The background collector yields to the
2668 // foreground collector and returns a value
2669 // indicating that it has yielded. The foreground
2670 // collector can proceed.
2671 res = true;
2672 _foregroundGCShouldWait = false;
2673 ConcurrentMarkSweepThread::clear_CMS_flag(
2674 ConcurrentMarkSweepThread::CMS_cms_has_token);
2675 ConcurrentMarkSweepThread::set_CMS_flag(
2676 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2677 // Get a possibly blocked foreground thread going
2678 CGC_lock->notify();
2679 if (TraceCMSState) {
2680 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2681 Thread::current(), _collectorState);
2682 }
2683 while (_foregroundGCIsActive) {
2684 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2685 }
2686 ConcurrentMarkSweepThread::set_CMS_flag(
2687 ConcurrentMarkSweepThread::CMS_cms_has_token);
2688 ConcurrentMarkSweepThread::clear_CMS_flag(
2689 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2690 }
2691 if (TraceCMSState) {
2692 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2693 Thread::current(), _collectorState);
2694 }
2695 return res;
2696 }
2698 // Because of the need to lock the free lists and other structures in
2699 // the collector, common to all the generations that the collector is
2700 // collecting, we need the gc_prologues of individual CMS generations
2701 // delegate to their collector. It may have been simpler had the
2702 // current infrastructure allowed one to call a prologue on a
2703 // collector. In the absence of that we have the generation's
2704 // prologue delegate to the collector, which delegates back
2705 // some "local" work to a worker method in the individual generations
2706 // that it's responsible for collecting, while itself doing any
2707 // work common to all generations it's responsible for. A similar
2708 // comment applies to the gc_epilogue()'s.
2709 // The role of the varaible _between_prologue_and_epilogue is to
2710 // enforce the invocation protocol.
2711 void CMSCollector::gc_prologue(bool full) {
2712 // Call gc_prologue_work() for the CMSGen
2713 // we are responsible for.
2715 // The following locking discipline assumes that we are only called
2716 // when the world is stopped.
2717 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2719 // The CMSCollector prologue must call the gc_prologues for the
2720 // "generations" that it's responsible
2721 // for.
2723 assert( Thread::current()->is_VM_thread()
2724 || ( CMSScavengeBeforeRemark
2725 && Thread::current()->is_ConcurrentGC_thread()),
2726 "Incorrect thread type for prologue execution");
2728 if (_between_prologue_and_epilogue) {
2729 // We have already been invoked; this is a gc_prologue delegation
2730 // from yet another CMS generation that we are responsible for, just
2731 // ignore it since all relevant work has already been done.
2732 return;
2733 }
2735 // set a bit saying prologue has been called; cleared in epilogue
2736 _between_prologue_and_epilogue = true;
2737 // Claim locks for common data structures, then call gc_prologue_work()
2738 // for each CMSGen.
2740 getFreelistLocks(); // gets free list locks on constituent spaces
2741 bitMapLock()->lock_without_safepoint_check();
2743 // Should call gc_prologue_work() for all cms gens we are responsible for
2744 bool duringMarking = _collectorState >= Marking
2745 && _collectorState < Sweeping;
2747 // The young collections clear the modified oops state, which tells if
2748 // there are any modified oops in the class. The remark phase also needs
2749 // that information. Tell the young collection to save the union of all
2750 // modified klasses.
2751 if (duringMarking) {
2752 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2753 }
2755 bool registerClosure = duringMarking;
2757 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2758 &_modUnionClosurePar
2759 : &_modUnionClosure;
2760 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2762 if (!full) {
2763 stats().record_gc0_begin();
2764 }
2765 }
2767 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2769 _capacity_at_prologue = capacity();
2770 _used_at_prologue = used();
2772 // Delegate to CMScollector which knows how to coordinate between
2773 // this and any other CMS generations that it is responsible for
2774 // collecting.
2775 collector()->gc_prologue(full);
2776 }
2778 // This is a "private" interface for use by this generation's CMSCollector.
2779 // Not to be called directly by any other entity (for instance,
2780 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2781 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2782 bool registerClosure, ModUnionClosure* modUnionClosure) {
2783 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2784 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2785 "Should be NULL");
2786 if (registerClosure) {
2787 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2788 }
2789 cmsSpace()->gc_prologue();
2790 // Clear stat counters
2791 NOT_PRODUCT(
2792 assert(_numObjectsPromoted == 0, "check");
2793 assert(_numWordsPromoted == 0, "check");
2794 if (Verbose && PrintGC) {
2795 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2796 SIZE_FORMAT" bytes concurrently",
2797 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2798 }
2799 _numObjectsAllocated = 0;
2800 _numWordsAllocated = 0;
2801 )
2802 }
2804 void CMSCollector::gc_epilogue(bool full) {
2805 // The following locking discipline assumes that we are only called
2806 // when the world is stopped.
2807 assert(SafepointSynchronize::is_at_safepoint(),
2808 "world is stopped assumption");
2810 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2811 // if linear allocation blocks need to be appropriately marked to allow the
2812 // the blocks to be parsable. We also check here whether we need to nudge the
2813 // CMS collector thread to start a new cycle (if it's not already active).
2814 assert( Thread::current()->is_VM_thread()
2815 || ( CMSScavengeBeforeRemark
2816 && Thread::current()->is_ConcurrentGC_thread()),
2817 "Incorrect thread type for epilogue execution");
2819 if (!_between_prologue_and_epilogue) {
2820 // We have already been invoked; this is a gc_epilogue delegation
2821 // from yet another CMS generation that we are responsible for, just
2822 // ignore it since all relevant work has already been done.
2823 return;
2824 }
2825 assert(haveFreelistLocks(), "must have freelist locks");
2826 assert_lock_strong(bitMapLock());
2828 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2830 _cmsGen->gc_epilogue_work(full);
2832 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2833 // in case sampling was not already enabled, enable it
2834 _start_sampling = true;
2835 }
2836 // reset _eden_chunk_array so sampling starts afresh
2837 _eden_chunk_index = 0;
2839 size_t cms_used = _cmsGen->cmsSpace()->used();
2841 // update performance counters - this uses a special version of
2842 // update_counters() that allows the utilization to be passed as a
2843 // parameter, avoiding multiple calls to used().
2844 //
2845 _cmsGen->update_counters(cms_used);
2847 if (CMSIncrementalMode) {
2848 icms_update_allocation_limits();
2849 }
2851 bitMapLock()->unlock();
2852 releaseFreelistLocks();
2854 if (!CleanChunkPoolAsync) {
2855 Chunk::clean_chunk_pool();
2856 }
2858 set_did_compact(false);
2859 _between_prologue_and_epilogue = false; // ready for next cycle
2860 }
2862 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2863 collector()->gc_epilogue(full);
2865 // Also reset promotion tracking in par gc thread states.
2866 if (CollectedHeap::use_parallel_gc_threads()) {
2867 for (uint i = 0; i < ParallelGCThreads; i++) {
2868 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2869 }
2870 }
2871 }
2873 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2874 assert(!incremental_collection_failed(), "Should have been cleared");
2875 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2876 cmsSpace()->gc_epilogue();
2877 // Print stat counters
2878 NOT_PRODUCT(
2879 assert(_numObjectsAllocated == 0, "check");
2880 assert(_numWordsAllocated == 0, "check");
2881 if (Verbose && PrintGC) {
2882 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2883 SIZE_FORMAT" bytes",
2884 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2885 }
2886 _numObjectsPromoted = 0;
2887 _numWordsPromoted = 0;
2888 )
2890 if (PrintGC && Verbose) {
2891 // Call down the chain in contiguous_available needs the freelistLock
2892 // so print this out before releasing the freeListLock.
2893 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2894 contiguous_available());
2895 }
2896 }
2898 #ifndef PRODUCT
2899 bool CMSCollector::have_cms_token() {
2900 Thread* thr = Thread::current();
2901 if (thr->is_VM_thread()) {
2902 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2903 } else if (thr->is_ConcurrentGC_thread()) {
2904 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2905 } else if (thr->is_GC_task_thread()) {
2906 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2907 ParGCRareEvent_lock->owned_by_self();
2908 }
2909 return false;
2910 }
2911 #endif
2913 // Check reachability of the given heap address in CMS generation,
2914 // treating all other generations as roots.
2915 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2916 // We could "guarantee" below, rather than assert, but i'll
2917 // leave these as "asserts" so that an adventurous debugger
2918 // could try this in the product build provided some subset of
2919 // the conditions were met, provided they were intersted in the
2920 // results and knew that the computation below wouldn't interfere
2921 // with other concurrent computations mutating the structures
2922 // being read or written.
2923 assert(SafepointSynchronize::is_at_safepoint(),
2924 "Else mutations in object graph will make answer suspect");
2925 assert(have_cms_token(), "Should hold cms token");
2926 assert(haveFreelistLocks(), "must hold free list locks");
2927 assert_lock_strong(bitMapLock());
2929 // Clear the marking bit map array before starting, but, just
2930 // for kicks, first report if the given address is already marked
2931 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2932 _markBitMap.isMarked(addr) ? "" : " not");
2934 if (verify_after_remark()) {
2935 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2936 bool result = verification_mark_bm()->isMarked(addr);
2937 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2938 result ? "IS" : "is NOT");
2939 return result;
2940 } else {
2941 gclog_or_tty->print_cr("Could not compute result");
2942 return false;
2943 }
2944 }
2947 void
2948 CMSCollector::print_on_error(outputStream* st) {
2949 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2950 if (collector != NULL) {
2951 CMSBitMap* bitmap = &collector->_markBitMap;
2952 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2953 bitmap->print_on_error(st, " Bits: ");
2955 st->cr();
2957 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2958 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2959 mut_bitmap->print_on_error(st, " Bits: ");
2960 }
2961 }
2963 ////////////////////////////////////////////////////////
2964 // CMS Verification Support
2965 ////////////////////////////////////////////////////////
2966 // Following the remark phase, the following invariant
2967 // should hold -- each object in the CMS heap which is
2968 // marked in markBitMap() should be marked in the verification_mark_bm().
2970 class VerifyMarkedClosure: public BitMapClosure {
2971 CMSBitMap* _marks;
2972 bool _failed;
2974 public:
2975 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2977 bool do_bit(size_t offset) {
2978 HeapWord* addr = _marks->offsetToHeapWord(offset);
2979 if (!_marks->isMarked(addr)) {
2980 oop(addr)->print_on(gclog_or_tty);
2981 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2982 _failed = true;
2983 }
2984 return true;
2985 }
2987 bool failed() { return _failed; }
2988 };
2990 bool CMSCollector::verify_after_remark(bool silent) {
2991 if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2992 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2993 static bool init = false;
2995 assert(SafepointSynchronize::is_at_safepoint(),
2996 "Else mutations in object graph will make answer suspect");
2997 assert(have_cms_token(),
2998 "Else there may be mutual interference in use of "
2999 " verification data structures");
3000 assert(_collectorState > Marking && _collectorState <= Sweeping,
3001 "Else marking info checked here may be obsolete");
3002 assert(haveFreelistLocks(), "must hold free list locks");
3003 assert_lock_strong(bitMapLock());
3006 // Allocate marking bit map if not already allocated
3007 if (!init) { // first time
3008 if (!verification_mark_bm()->allocate(_span)) {
3009 return false;
3010 }
3011 init = true;
3012 }
3014 assert(verification_mark_stack()->isEmpty(), "Should be empty");
3016 // Turn off refs discovery -- so we will be tracing through refs.
3017 // This is as intended, because by this time
3018 // GC must already have cleared any refs that need to be cleared,
3019 // and traced those that need to be marked; moreover,
3020 // the marking done here is not going to intefere in any
3021 // way with the marking information used by GC.
3022 NoRefDiscovery no_discovery(ref_processor());
3024 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3026 // Clear any marks from a previous round
3027 verification_mark_bm()->clear_all();
3028 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
3029 verify_work_stacks_empty();
3031 GenCollectedHeap* gch = GenCollectedHeap::heap();
3032 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3033 // Update the saved marks which may affect the root scans.
3034 gch->save_marks();
3036 if (CMSRemarkVerifyVariant == 1) {
3037 // In this first variant of verification, we complete
3038 // all marking, then check if the new marks-verctor is
3039 // a subset of the CMS marks-vector.
3040 verify_after_remark_work_1();
3041 } else if (CMSRemarkVerifyVariant == 2) {
3042 // In this second variant of verification, we flag an error
3043 // (i.e. an object reachable in the new marks-vector not reachable
3044 // in the CMS marks-vector) immediately, also indicating the
3045 // identify of an object (A) that references the unmarked object (B) --
3046 // presumably, a mutation to A failed to be picked up by preclean/remark?
3047 verify_after_remark_work_2();
3048 } else {
3049 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3050 CMSRemarkVerifyVariant);
3051 }
3052 if (!silent) gclog_or_tty->print(" done] ");
3053 return true;
3054 }
3056 void CMSCollector::verify_after_remark_work_1() {
3057 ResourceMark rm;
3058 HandleMark hm;
3059 GenCollectedHeap* gch = GenCollectedHeap::heap();
3061 // Get a clear set of claim bits for the roots processing to work with.
3062 ClassLoaderDataGraph::clear_claimed_marks();
3064 // Mark from roots one level into CMS
3065 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3066 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3068 gch->gen_process_roots(_cmsGen->level(),
3069 true, // younger gens are roots
3070 true, // activate StrongRootsScope
3071 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3072 should_unload_classes(),
3073 ¬Older,
3074 NULL,
3075 NULL); // SSS: Provide correct closure
3077 // Now mark from the roots
3078 MarkFromRootsClosure markFromRootsClosure(this, _span,
3079 verification_mark_bm(), verification_mark_stack(),
3080 false /* don't yield */, true /* verifying */);
3081 assert(_restart_addr == NULL, "Expected pre-condition");
3082 verification_mark_bm()->iterate(&markFromRootsClosure);
3083 while (_restart_addr != NULL) {
3084 // Deal with stack overflow: by restarting at the indicated
3085 // address.
3086 HeapWord* ra = _restart_addr;
3087 markFromRootsClosure.reset(ra);
3088 _restart_addr = NULL;
3089 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3090 }
3091 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3092 verify_work_stacks_empty();
3094 // Marking completed -- now verify that each bit marked in
3095 // verification_mark_bm() is also marked in markBitMap(); flag all
3096 // errors by printing corresponding objects.
3097 VerifyMarkedClosure vcl(markBitMap());
3098 verification_mark_bm()->iterate(&vcl);
3099 if (vcl.failed()) {
3100 gclog_or_tty->print("Verification failed");
3101 Universe::heap()->print_on(gclog_or_tty);
3102 fatal("CMS: failed marking verification after remark");
3103 }
3104 }
3106 class VerifyKlassOopsKlassClosure : public KlassClosure {
3107 class VerifyKlassOopsClosure : public OopClosure {
3108 CMSBitMap* _bitmap;
3109 public:
3110 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3111 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3112 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3113 } _oop_closure;
3114 public:
3115 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3116 void do_klass(Klass* k) {
3117 k->oops_do(&_oop_closure);
3118 }
3119 };
3121 void CMSCollector::verify_after_remark_work_2() {
3122 ResourceMark rm;
3123 HandleMark hm;
3124 GenCollectedHeap* gch = GenCollectedHeap::heap();
3126 // Get a clear set of claim bits for the roots processing to work with.
3127 ClassLoaderDataGraph::clear_claimed_marks();
3129 // Mark from roots one level into CMS
3130 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3131 markBitMap());
3132 CLDToOopClosure cld_closure(¬Older, true);
3134 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3136 gch->gen_process_roots(_cmsGen->level(),
3137 true, // younger gens are roots
3138 true, // activate StrongRootsScope
3139 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3140 should_unload_classes(),
3141 ¬Older,
3142 NULL,
3143 &cld_closure);
3145 // Now mark from the roots
3146 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3147 verification_mark_bm(), markBitMap(), verification_mark_stack());
3148 assert(_restart_addr == NULL, "Expected pre-condition");
3149 verification_mark_bm()->iterate(&markFromRootsClosure);
3150 while (_restart_addr != NULL) {
3151 // Deal with stack overflow: by restarting at the indicated
3152 // address.
3153 HeapWord* ra = _restart_addr;
3154 markFromRootsClosure.reset(ra);
3155 _restart_addr = NULL;
3156 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3157 }
3158 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3159 verify_work_stacks_empty();
3161 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3162 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3164 // Marking completed -- now verify that each bit marked in
3165 // verification_mark_bm() is also marked in markBitMap(); flag all
3166 // errors by printing corresponding objects.
3167 VerifyMarkedClosure vcl(markBitMap());
3168 verification_mark_bm()->iterate(&vcl);
3169 assert(!vcl.failed(), "Else verification above should not have succeeded");
3170 }
3172 void ConcurrentMarkSweepGeneration::save_marks() {
3173 // delegate to CMS space
3174 cmsSpace()->save_marks();
3175 for (uint i = 0; i < ParallelGCThreads; i++) {
3176 _par_gc_thread_states[i]->promo.startTrackingPromotions();
3177 }
3178 }
3180 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3181 return cmsSpace()->no_allocs_since_save_marks();
3182 }
3184 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
3185 \
3186 void ConcurrentMarkSweepGeneration:: \
3187 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
3188 cl->set_generation(this); \
3189 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
3190 cl->reset_generation(); \
3191 save_marks(); \
3192 }
3194 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3196 void
3197 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3198 cl->set_generation(this);
3199 younger_refs_in_space_iterate(_cmsSpace, cl);
3200 cl->reset_generation();
3201 }
3203 void
3204 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3205 if (freelistLock()->owned_by_self()) {
3206 Generation::oop_iterate(cl);
3207 } else {
3208 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3209 Generation::oop_iterate(cl);
3210 }
3211 }
3213 void
3214 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3215 if (freelistLock()->owned_by_self()) {
3216 Generation::object_iterate(cl);
3217 } else {
3218 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3219 Generation::object_iterate(cl);
3220 }
3221 }
3223 void
3224 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3225 if (freelistLock()->owned_by_self()) {
3226 Generation::safe_object_iterate(cl);
3227 } else {
3228 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3229 Generation::safe_object_iterate(cl);
3230 }
3231 }
3233 void
3234 ConcurrentMarkSweepGeneration::post_compact() {
3235 }
3237 void
3238 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3239 // Fix the linear allocation blocks to look like free blocks.
3241 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3242 // are not called when the heap is verified during universe initialization and
3243 // at vm shutdown.
3244 if (freelistLock()->owned_by_self()) {
3245 cmsSpace()->prepare_for_verify();
3246 } else {
3247 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3248 cmsSpace()->prepare_for_verify();
3249 }
3250 }
3252 void
3253 ConcurrentMarkSweepGeneration::verify() {
3254 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3255 // are not called when the heap is verified during universe initialization and
3256 // at vm shutdown.
3257 if (freelistLock()->owned_by_self()) {
3258 cmsSpace()->verify();
3259 } else {
3260 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3261 cmsSpace()->verify();
3262 }
3263 }
3265 void CMSCollector::verify() {
3266 _cmsGen->verify();
3267 }
3269 #ifndef PRODUCT
3270 bool CMSCollector::overflow_list_is_empty() const {
3271 assert(_num_par_pushes >= 0, "Inconsistency");
3272 if (_overflow_list == NULL) {
3273 assert(_num_par_pushes == 0, "Inconsistency");
3274 }
3275 return _overflow_list == NULL;
3276 }
3278 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3279 // merely consolidate assertion checks that appear to occur together frequently.
3280 void CMSCollector::verify_work_stacks_empty() const {
3281 assert(_markStack.isEmpty(), "Marking stack should be empty");
3282 assert(overflow_list_is_empty(), "Overflow list should be empty");
3283 }
3285 void CMSCollector::verify_overflow_empty() const {
3286 assert(overflow_list_is_empty(), "Overflow list should be empty");
3287 assert(no_preserved_marks(), "No preserved marks");
3288 }
3289 #endif // PRODUCT
3291 // Decide if we want to enable class unloading as part of the
3292 // ensuing concurrent GC cycle. We will collect and
3293 // unload classes if it's the case that:
3294 // (1) an explicit gc request has been made and the flag
3295 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3296 // (2) (a) class unloading is enabled at the command line, and
3297 // (b) old gen is getting really full
3298 // NOTE: Provided there is no change in the state of the heap between
3299 // calls to this method, it should have idempotent results. Moreover,
3300 // its results should be monotonically increasing (i.e. going from 0 to 1,
3301 // but not 1 to 0) between successive calls between which the heap was
3302 // not collected. For the implementation below, it must thus rely on
3303 // the property that concurrent_cycles_since_last_unload()
3304 // will not decrease unless a collection cycle happened and that
3305 // _cmsGen->is_too_full() are
3306 // themselves also monotonic in that sense. See check_monotonicity()
3307 // below.
3308 void CMSCollector::update_should_unload_classes() {
3309 _should_unload_classes = false;
3310 // Condition 1 above
3311 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3312 _should_unload_classes = true;
3313 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3314 // Disjuncts 2.b.(i,ii,iii) above
3315 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3316 CMSClassUnloadingMaxInterval)
3317 || _cmsGen->is_too_full();
3318 }
3319 }
3321 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3322 bool res = should_concurrent_collect();
3323 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3324 return res;
3325 }
3327 void CMSCollector::setup_cms_unloading_and_verification_state() {
3328 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3329 || VerifyBeforeExit;
3330 const int rso = GenCollectedHeap::SO_AllCodeCache;
3332 // We set the proper root for this CMS cycle here.
3333 if (should_unload_classes()) { // Should unload classes this cycle
3334 remove_root_scanning_option(rso); // Shrink the root set appropriately
3335 set_verifying(should_verify); // Set verification state for this cycle
3336 return; // Nothing else needs to be done at this time
3337 }
3339 // Not unloading classes this cycle
3340 assert(!should_unload_classes(), "Inconsitency!");
3342 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3343 // Include symbols, strings and code cache elements to prevent their resurrection.
3344 add_root_scanning_option(rso);
3345 set_verifying(true);
3346 } else if (verifying() && !should_verify) {
3347 // We were verifying, but some verification flags got disabled.
3348 set_verifying(false);
3349 // Exclude symbols, strings and code cache elements from root scanning to
3350 // reduce IM and RM pauses.
3351 remove_root_scanning_option(rso);
3352 }
3353 }
3356 #ifndef PRODUCT
3357 HeapWord* CMSCollector::block_start(const void* p) const {
3358 const HeapWord* addr = (HeapWord*)p;
3359 if (_span.contains(p)) {
3360 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3361 return _cmsGen->cmsSpace()->block_start(p);
3362 }
3363 }
3364 return NULL;
3365 }
3366 #endif
3368 HeapWord*
3369 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3370 bool tlab,
3371 bool parallel) {
3372 CMSSynchronousYieldRequest yr;
3373 assert(!tlab, "Can't deal with TLAB allocation");
3374 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3375 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3376 CMSExpansionCause::_satisfy_allocation);
3377 if (GCExpandToAllocateDelayMillis > 0) {
3378 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3379 }
3380 return have_lock_and_allocate(word_size, tlab);
3381 }
3383 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3384 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3385 // to CardGeneration and share it...
3386 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3387 return CardGeneration::expand(bytes, expand_bytes);
3388 }
3390 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3391 CMSExpansionCause::Cause cause)
3392 {
3394 bool success = expand(bytes, expand_bytes);
3396 // remember why we expanded; this information is used
3397 // by shouldConcurrentCollect() when making decisions on whether to start
3398 // a new CMS cycle.
3399 if (success) {
3400 set_expansion_cause(cause);
3401 if (PrintGCDetails && Verbose) {
3402 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3403 CMSExpansionCause::to_string(cause));
3404 }
3405 }
3406 }
3408 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3409 HeapWord* res = NULL;
3410 MutexLocker x(ParGCRareEvent_lock);
3411 while (true) {
3412 // Expansion by some other thread might make alloc OK now:
3413 res = ps->lab.alloc(word_sz);
3414 if (res != NULL) return res;
3415 // If there's not enough expansion space available, give up.
3416 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3417 return NULL;
3418 }
3419 // Otherwise, we try expansion.
3420 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3421 CMSExpansionCause::_allocate_par_lab);
3422 // Now go around the loop and try alloc again;
3423 // A competing par_promote might beat us to the expansion space,
3424 // so we may go around the loop again if promotion fails agaion.
3425 if (GCExpandToAllocateDelayMillis > 0) {
3426 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3427 }
3428 }
3429 }
3432 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3433 PromotionInfo* promo) {
3434 MutexLocker x(ParGCRareEvent_lock);
3435 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3436 while (true) {
3437 // Expansion by some other thread might make alloc OK now:
3438 if (promo->ensure_spooling_space()) {
3439 assert(promo->has_spooling_space(),
3440 "Post-condition of successful ensure_spooling_space()");
3441 return true;
3442 }
3443 // If there's not enough expansion space available, give up.
3444 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3445 return false;
3446 }
3447 // Otherwise, we try expansion.
3448 expand(refill_size_bytes, MinHeapDeltaBytes,
3449 CMSExpansionCause::_allocate_par_spooling_space);
3450 // Now go around the loop and try alloc again;
3451 // A competing allocation might beat us to the expansion space,
3452 // so we may go around the loop again if allocation fails again.
3453 if (GCExpandToAllocateDelayMillis > 0) {
3454 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3455 }
3456 }
3457 }
3460 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3461 assert_locked_or_safepoint(ExpandHeap_lock);
3462 // Shrink committed space
3463 _virtual_space.shrink_by(bytes);
3464 // Shrink space; this also shrinks the space's BOT
3465 _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3466 size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3467 // Shrink the shared block offset array
3468 _bts->resize(new_word_size);
3469 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3470 // Shrink the card table
3471 Universe::heap()->barrier_set()->resize_covered_region(mr);
3473 if (Verbose && PrintGC) {
3474 size_t new_mem_size = _virtual_space.committed_size();
3475 size_t old_mem_size = new_mem_size + bytes;
3476 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3477 name(), old_mem_size/K, new_mem_size/K);
3478 }
3479 }
3481 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3482 assert_locked_or_safepoint(Heap_lock);
3483 size_t size = ReservedSpace::page_align_size_down(bytes);
3484 // Only shrink if a compaction was done so that all the free space
3485 // in the generation is in a contiguous block at the end.
3486 if (size > 0 && did_compact()) {
3487 shrink_by(size);
3488 }
3489 }
3491 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3492 assert_locked_or_safepoint(Heap_lock);
3493 bool result = _virtual_space.expand_by(bytes);
3494 if (result) {
3495 size_t new_word_size =
3496 heap_word_size(_virtual_space.committed_size());
3497 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3498 _bts->resize(new_word_size); // resize the block offset shared array
3499 Universe::heap()->barrier_set()->resize_covered_region(mr);
3500 // Hmmmm... why doesn't CFLS::set_end verify locking?
3501 // This is quite ugly; FIX ME XXX
3502 _cmsSpace->assert_locked(freelistLock());
3503 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3505 // update the space and generation capacity counters
3506 if (UsePerfData) {
3507 _space_counters->update_capacity();
3508 _gen_counters->update_all();
3509 }
3511 if (Verbose && PrintGC) {
3512 size_t new_mem_size = _virtual_space.committed_size();
3513 size_t old_mem_size = new_mem_size - bytes;
3514 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3515 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3516 }
3517 }
3518 return result;
3519 }
3521 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3522 assert_locked_or_safepoint(Heap_lock);
3523 bool success = true;
3524 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3525 if (remaining_bytes > 0) {
3526 success = grow_by(remaining_bytes);
3527 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3528 }
3529 return success;
3530 }
3532 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3533 assert_locked_or_safepoint(Heap_lock);
3534 assert_lock_strong(freelistLock());
3535 if (PrintGCDetails && Verbose) {
3536 warning("Shrinking of CMS not yet implemented");
3537 }
3538 return;
3539 }
3542 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3543 // phases.
3544 class CMSPhaseAccounting: public StackObj {
3545 public:
3546 CMSPhaseAccounting(CMSCollector *collector,
3547 const char *phase,
3548 const GCId gc_id,
3549 bool print_cr = true);
3550 ~CMSPhaseAccounting();
3552 private:
3553 CMSCollector *_collector;
3554 const char *_phase;
3555 elapsedTimer _wallclock;
3556 bool _print_cr;
3557 const GCId _gc_id;
3559 public:
3560 // Not MT-safe; so do not pass around these StackObj's
3561 // where they may be accessed by other threads.
3562 jlong wallclock_millis() {
3563 assert(_wallclock.is_active(), "Wall clock should not stop");
3564 _wallclock.stop(); // to record time
3565 jlong ret = _wallclock.milliseconds();
3566 _wallclock.start(); // restart
3567 return ret;
3568 }
3569 };
3571 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3572 const char *phase,
3573 const GCId gc_id,
3574 bool print_cr) :
3575 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
3577 if (PrintCMSStatistics != 0) {
3578 _collector->resetYields();
3579 }
3580 if (PrintGCDetails) {
3581 gclog_or_tty->gclog_stamp(_gc_id);
3582 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3583 _collector->cmsGen()->short_name(), _phase);
3584 }
3585 _collector->resetTimer();
3586 _wallclock.start();
3587 _collector->startTimer();
3588 }
3590 CMSPhaseAccounting::~CMSPhaseAccounting() {
3591 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3592 _collector->stopTimer();
3593 _wallclock.stop();
3594 if (PrintGCDetails) {
3595 gclog_or_tty->gclog_stamp(_gc_id);
3596 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3597 _collector->cmsGen()->short_name(),
3598 _phase, _collector->timerValue(), _wallclock.seconds());
3599 if (_print_cr) {
3600 gclog_or_tty->cr();
3601 }
3602 if (PrintCMSStatistics != 0) {
3603 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3604 _collector->yields());
3605 }
3606 }
3607 }
3609 // CMS work
3611 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3612 class CMSParMarkTask : public AbstractGangTask {
3613 protected:
3614 CMSCollector* _collector;
3615 int _n_workers;
3616 CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3617 AbstractGangTask(name),
3618 _collector(collector),
3619 _n_workers(n_workers) {}
3620 // Work method in support of parallel rescan ... of young gen spaces
3621 void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3622 ContiguousSpace* space,
3623 HeapWord** chunk_array, size_t chunk_top);
3624 void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3625 };
3627 // Parallel initial mark task
3628 class CMSParInitialMarkTask: public CMSParMarkTask {
3629 public:
3630 CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3631 CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3632 collector, n_workers) {}
3633 void work(uint worker_id);
3634 };
3636 // Checkpoint the roots into this generation from outside
3637 // this generation. [Note this initial checkpoint need only
3638 // be approximate -- we'll do a catch up phase subsequently.]
3639 void CMSCollector::checkpointRootsInitial(bool asynch) {
3640 assert(_collectorState == InitialMarking, "Wrong collector state");
3641 check_correct_thread_executing();
3642 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3644 save_heap_summary();
3645 report_heap_summary(GCWhen::BeforeGC);
3647 ReferenceProcessor* rp = ref_processor();
3648 SpecializationStats::clear();
3649 assert(_restart_addr == NULL, "Control point invariant");
3650 if (asynch) {
3651 // acquire locks for subsequent manipulations
3652 MutexLockerEx x(bitMapLock(),
3653 Mutex::_no_safepoint_check_flag);
3654 checkpointRootsInitialWork(asynch);
3655 // enable ("weak") refs discovery
3656 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3657 _collectorState = Marking;
3658 } else {
3659 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3660 // which recognizes if we are a CMS generation, and doesn't try to turn on
3661 // discovery; verify that they aren't meddling.
3662 assert(!rp->discovery_is_atomic(),
3663 "incorrect setting of discovery predicate");
3664 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3665 "ref discovery for this generation kind");
3666 // already have locks
3667 checkpointRootsInitialWork(asynch);
3668 // now enable ("weak") refs discovery
3669 rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3670 _collectorState = Marking;
3671 }
3672 SpecializationStats::print();
3673 }
3675 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3676 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3677 assert(_collectorState == InitialMarking, "just checking");
3679 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3680 // precede our marking with a collection of all
3681 // younger generations to keep floating garbage to a minimum.
3682 // XXX: we won't do this for now -- it's an optimization to be done later.
3684 // already have locks
3685 assert_lock_strong(bitMapLock());
3686 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3688 // Setup the verification and class unloading state for this
3689 // CMS collection cycle.
3690 setup_cms_unloading_and_verification_state();
3692 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3693 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3694 if (UseAdaptiveSizePolicy) {
3695 size_policy()->checkpoint_roots_initial_begin();
3696 }
3698 // Reset all the PLAB chunk arrays if necessary.
3699 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3700 reset_survivor_plab_arrays();
3701 }
3703 ResourceMark rm;
3704 HandleMark hm;
3706 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3707 GenCollectedHeap* gch = GenCollectedHeap::heap();
3709 verify_work_stacks_empty();
3710 verify_overflow_empty();
3712 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3713 // Update the saved marks which may affect the root scans.
3714 gch->save_marks();
3716 // weak reference processing has not started yet.
3717 ref_processor()->set_enqueuing_is_done(false);
3719 // Need to remember all newly created CLDs,
3720 // so that we can guarantee that the remark finds them.
3721 ClassLoaderDataGraph::remember_new_clds(true);
3723 // Whenever a CLD is found, it will be claimed before proceeding to mark
3724 // the klasses. The claimed marks need to be cleared before marking starts.
3725 ClassLoaderDataGraph::clear_claimed_marks();
3727 if (CMSPrintEdenSurvivorChunks) {
3728 print_eden_and_survivor_chunk_arrays();
3729 }
3731 {
3732 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3733 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3734 // The parallel version.
3735 FlexibleWorkGang* workers = gch->workers();
3736 assert(workers != NULL, "Need parallel worker threads.");
3737 int n_workers = workers->active_workers();
3738 CMSParInitialMarkTask tsk(this, n_workers);
3739 gch->set_par_threads(n_workers);
3740 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3741 if (n_workers > 1) {
3742 GenCollectedHeap::StrongRootsScope srs(gch);
3743 workers->run_task(&tsk);
3744 } else {
3745 GenCollectedHeap::StrongRootsScope srs(gch);
3746 tsk.work(0);
3747 }
3748 gch->set_par_threads(0);
3749 } else {
3750 // The serial version.
3751 CLDToOopClosure cld_closure(¬Older, true);
3752 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3753 gch->gen_process_roots(_cmsGen->level(),
3754 true, // younger gens are roots
3755 true, // activate StrongRootsScope
3756 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3757 should_unload_classes(),
3758 ¬Older,
3759 NULL,
3760 &cld_closure);
3761 }
3762 }
3764 // Clear mod-union table; it will be dirtied in the prologue of
3765 // CMS generation per each younger generation collection.
3767 assert(_modUnionTable.isAllClear(),
3768 "Was cleared in most recent final checkpoint phase"
3769 " or no bits are set in the gc_prologue before the start of the next "
3770 "subsequent marking phase.");
3772 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3774 // Save the end of the used_region of the constituent generations
3775 // to be used to limit the extent of sweep in each generation.
3776 save_sweep_limits();
3777 if (UseAdaptiveSizePolicy) {
3778 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3779 }
3780 verify_overflow_empty();
3781 }
3783 bool CMSCollector::markFromRoots(bool asynch) {
3784 // we might be tempted to assert that:
3785 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3786 // "inconsistent argument?");
3787 // However that wouldn't be right, because it's possible that
3788 // a safepoint is indeed in progress as a younger generation
3789 // stop-the-world GC happens even as we mark in this generation.
3790 assert(_collectorState == Marking, "inconsistent state?");
3791 check_correct_thread_executing();
3792 verify_overflow_empty();
3794 bool res;
3795 if (asynch) {
3797 // Start the timers for adaptive size policy for the concurrent phases
3798 // Do it here so that the foreground MS can use the concurrent
3799 // timer since a foreground MS might has the sweep done concurrently
3800 // or STW.
3801 if (UseAdaptiveSizePolicy) {
3802 size_policy()->concurrent_marking_begin();
3803 }
3805 // Weak ref discovery note: We may be discovering weak
3806 // refs in this generation concurrent (but interleaved) with
3807 // weak ref discovery by a younger generation collector.
3809 CMSTokenSyncWithLocks ts(true, bitMapLock());
3810 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3811 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3812 res = markFromRootsWork(asynch);
3813 if (res) {
3814 _collectorState = Precleaning;
3815 } else { // We failed and a foreground collection wants to take over
3816 assert(_foregroundGCIsActive, "internal state inconsistency");
3817 assert(_restart_addr == NULL, "foreground will restart from scratch");
3818 if (PrintGCDetails) {
3819 gclog_or_tty->print_cr("bailing out to foreground collection");
3820 }
3821 }
3822 if (UseAdaptiveSizePolicy) {
3823 size_policy()->concurrent_marking_end();
3824 }
3825 } else {
3826 assert(SafepointSynchronize::is_at_safepoint(),
3827 "inconsistent with asynch == false");
3828 if (UseAdaptiveSizePolicy) {
3829 size_policy()->ms_collection_marking_begin();
3830 }
3831 // already have locks
3832 res = markFromRootsWork(asynch);
3833 _collectorState = FinalMarking;
3834 if (UseAdaptiveSizePolicy) {
3835 GenCollectedHeap* gch = GenCollectedHeap::heap();
3836 size_policy()->ms_collection_marking_end(gch->gc_cause());
3837 }
3838 }
3839 verify_overflow_empty();
3840 return res;
3841 }
3843 bool CMSCollector::markFromRootsWork(bool asynch) {
3844 // iterate over marked bits in bit map, doing a full scan and mark
3845 // from these roots using the following algorithm:
3846 // . if oop is to the right of the current scan pointer,
3847 // mark corresponding bit (we'll process it later)
3848 // . else (oop is to left of current scan pointer)
3849 // push oop on marking stack
3850 // . drain the marking stack
3852 // Note that when we do a marking step we need to hold the
3853 // bit map lock -- recall that direct allocation (by mutators)
3854 // and promotion (by younger generation collectors) is also
3855 // marking the bit map. [the so-called allocate live policy.]
3856 // Because the implementation of bit map marking is not
3857 // robust wrt simultaneous marking of bits in the same word,
3858 // we need to make sure that there is no such interference
3859 // between concurrent such updates.
3861 // already have locks
3862 assert_lock_strong(bitMapLock());
3864 verify_work_stacks_empty();
3865 verify_overflow_empty();
3866 bool result = false;
3867 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3868 result = do_marking_mt(asynch);
3869 } else {
3870 result = do_marking_st(asynch);
3871 }
3872 return result;
3873 }
3875 // Forward decl
3876 class CMSConcMarkingTask;
3878 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3879 CMSCollector* _collector;
3880 CMSConcMarkingTask* _task;
3881 public:
3882 virtual void yield();
3884 // "n_threads" is the number of threads to be terminated.
3885 // "queue_set" is a set of work queues of other threads.
3886 // "collector" is the CMS collector associated with this task terminator.
3887 // "yield" indicates whether we need the gang as a whole to yield.
3888 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3889 ParallelTaskTerminator(n_threads, queue_set),
3890 _collector(collector) { }
3892 void set_task(CMSConcMarkingTask* task) {
3893 _task = task;
3894 }
3895 };
3897 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3898 CMSConcMarkingTask* _task;
3899 public:
3900 bool should_exit_termination();
3901 void set_task(CMSConcMarkingTask* task) {
3902 _task = task;
3903 }
3904 };
3906 // MT Concurrent Marking Task
3907 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3908 CMSCollector* _collector;
3909 int _n_workers; // requested/desired # workers
3910 bool _asynch;
3911 bool _result;
3912 CompactibleFreeListSpace* _cms_space;
3913 char _pad_front[64]; // padding to ...
3914 HeapWord* _global_finger; // ... avoid sharing cache line
3915 char _pad_back[64];
3916 HeapWord* _restart_addr;
3918 // Exposed here for yielding support
3919 Mutex* const _bit_map_lock;
3921 // The per thread work queues, available here for stealing
3922 OopTaskQueueSet* _task_queues;
3924 // Termination (and yielding) support
3925 CMSConcMarkingTerminator _term;
3926 CMSConcMarkingTerminatorTerminator _term_term;
3928 public:
3929 CMSConcMarkingTask(CMSCollector* collector,
3930 CompactibleFreeListSpace* cms_space,
3931 bool asynch,
3932 YieldingFlexibleWorkGang* workers,
3933 OopTaskQueueSet* task_queues):
3934 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3935 _collector(collector),
3936 _cms_space(cms_space),
3937 _asynch(asynch), _n_workers(0), _result(true),
3938 _task_queues(task_queues),
3939 _term(_n_workers, task_queues, _collector),
3940 _bit_map_lock(collector->bitMapLock())
3941 {
3942 _requested_size = _n_workers;
3943 _term.set_task(this);
3944 _term_term.set_task(this);
3945 _restart_addr = _global_finger = _cms_space->bottom();
3946 }
3949 OopTaskQueueSet* task_queues() { return _task_queues; }
3951 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3953 HeapWord** global_finger_addr() { return &_global_finger; }
3955 CMSConcMarkingTerminator* terminator() { return &_term; }
3957 virtual void set_for_termination(int active_workers) {
3958 terminator()->reset_for_reuse(active_workers);
3959 }
3961 void work(uint worker_id);
3962 bool should_yield() {
3963 return ConcurrentMarkSweepThread::should_yield()
3964 && !_collector->foregroundGCIsActive()
3965 && _asynch;
3966 }
3968 virtual void coordinator_yield(); // stuff done by coordinator
3969 bool result() { return _result; }
3971 void reset(HeapWord* ra) {
3972 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3973 _restart_addr = _global_finger = ra;
3974 _term.reset_for_reuse();
3975 }
3977 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3978 OopTaskQueue* work_q);
3980 private:
3981 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3982 void do_work_steal(int i);
3983 void bump_global_finger(HeapWord* f);
3984 };
3986 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3987 assert(_task != NULL, "Error");
3988 return _task->yielding();
3989 // Note that we do not need the disjunct || _task->should_yield() above
3990 // because we want terminating threads to yield only if the task
3991 // is already in the midst of yielding, which happens only after at least one
3992 // thread has yielded.
3993 }
3995 void CMSConcMarkingTerminator::yield() {
3996 if (_task->should_yield()) {
3997 _task->yield();
3998 } else {
3999 ParallelTaskTerminator::yield();
4000 }
4001 }
4003 ////////////////////////////////////////////////////////////////
4004 // Concurrent Marking Algorithm Sketch
4005 ////////////////////////////////////////////////////////////////
4006 // Until all tasks exhausted (both spaces):
4007 // -- claim next available chunk
4008 // -- bump global finger via CAS
4009 // -- find first object that starts in this chunk
4010 // and start scanning bitmap from that position
4011 // -- scan marked objects for oops
4012 // -- CAS-mark target, and if successful:
4013 // . if target oop is above global finger (volatile read)
4014 // nothing to do
4015 // . if target oop is in chunk and above local finger
4016 // then nothing to do
4017 // . else push on work-queue
4018 // -- Deal with possible overflow issues:
4019 // . local work-queue overflow causes stuff to be pushed on
4020 // global (common) overflow queue
4021 // . always first empty local work queue
4022 // . then get a batch of oops from global work queue if any
4023 // . then do work stealing
4024 // -- When all tasks claimed (both spaces)
4025 // and local work queue empty,
4026 // then in a loop do:
4027 // . check global overflow stack; steal a batch of oops and trace
4028 // . try to steal from other threads oif GOS is empty
4029 // . if neither is available, offer termination
4030 // -- Terminate and return result
4031 //
4032 void CMSConcMarkingTask::work(uint worker_id) {
4033 elapsedTimer _timer;
4034 ResourceMark rm;
4035 HandleMark hm;
4037 DEBUG_ONLY(_collector->verify_overflow_empty();)
4039 // Before we begin work, our work queue should be empty
4040 assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4041 // Scan the bitmap covering _cms_space, tracing through grey objects.
4042 _timer.start();
4043 do_scan_and_mark(worker_id, _cms_space);
4044 _timer.stop();
4045 if (PrintCMSStatistics != 0) {
4046 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4047 worker_id, _timer.seconds());
4048 // XXX: need xxx/xxx type of notation, two timers
4049 }
4051 // ... do work stealing
4052 _timer.reset();
4053 _timer.start();
4054 do_work_steal(worker_id);
4055 _timer.stop();
4056 if (PrintCMSStatistics != 0) {
4057 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4058 worker_id, _timer.seconds());
4059 // XXX: need xxx/xxx type of notation, two timers
4060 }
4061 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4062 assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4063 // Note that under the current task protocol, the
4064 // following assertion is true even of the spaces
4065 // expanded since the completion of the concurrent
4066 // marking. XXX This will likely change under a strict
4067 // ABORT semantics.
4068 // After perm removal the comparison was changed to
4069 // greater than or equal to from strictly greater than.
4070 // Before perm removal the highest address sweep would
4071 // have been at the end of perm gen but now is at the
4072 // end of the tenured gen.
4073 assert(_global_finger >= _cms_space->end(),
4074 "All tasks have been completed");
4075 DEBUG_ONLY(_collector->verify_overflow_empty();)
4076 }
4078 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4079 HeapWord* read = _global_finger;
4080 HeapWord* cur = read;
4081 while (f > read) {
4082 cur = read;
4083 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4084 if (cur == read) {
4085 // our cas succeeded
4086 assert(_global_finger >= f, "protocol consistency");
4087 break;
4088 }
4089 }
4090 }
4092 // This is really inefficient, and should be redone by
4093 // using (not yet available) block-read and -write interfaces to the
4094 // stack and the work_queue. XXX FIX ME !!!
4095 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4096 OopTaskQueue* work_q) {
4097 // Fast lock-free check
4098 if (ovflw_stk->length() == 0) {
4099 return false;
4100 }
4101 assert(work_q->size() == 0, "Shouldn't steal");
4102 MutexLockerEx ml(ovflw_stk->par_lock(),
4103 Mutex::_no_safepoint_check_flag);
4104 // Grab up to 1/4 the size of the work queue
4105 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4106 (size_t)ParGCDesiredObjsFromOverflowList);
4107 num = MIN2(num, ovflw_stk->length());
4108 for (int i = (int) num; i > 0; i--) {
4109 oop cur = ovflw_stk->pop();
4110 assert(cur != NULL, "Counted wrong?");
4111 work_q->push(cur);
4112 }
4113 return num > 0;
4114 }
4116 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4117 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4118 int n_tasks = pst->n_tasks();
4119 // We allow that there may be no tasks to do here because
4120 // we are restarting after a stack overflow.
4121 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4122 uint nth_task = 0;
4124 HeapWord* aligned_start = sp->bottom();
4125 if (sp->used_region().contains(_restart_addr)) {
4126 // Align down to a card boundary for the start of 0th task
4127 // for this space.
4128 aligned_start =
4129 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4130 CardTableModRefBS::card_size);
4131 }
4133 size_t chunk_size = sp->marking_task_size();
4134 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4135 // Having claimed the nth task in this space,
4136 // compute the chunk that it corresponds to:
4137 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4138 aligned_start + (nth_task+1)*chunk_size);
4139 // Try and bump the global finger via a CAS;
4140 // note that we need to do the global finger bump
4141 // _before_ taking the intersection below, because
4142 // the task corresponding to that region will be
4143 // deemed done even if the used_region() expands
4144 // because of allocation -- as it almost certainly will
4145 // during start-up while the threads yield in the
4146 // closure below.
4147 HeapWord* finger = span.end();
4148 bump_global_finger(finger); // atomically
4149 // There are null tasks here corresponding to chunks
4150 // beyond the "top" address of the space.
4151 span = span.intersection(sp->used_region());
4152 if (!span.is_empty()) { // Non-null task
4153 HeapWord* prev_obj;
4154 assert(!span.contains(_restart_addr) || nth_task == 0,
4155 "Inconsistency");
4156 if (nth_task == 0) {
4157 // For the 0th task, we'll not need to compute a block_start.
4158 if (span.contains(_restart_addr)) {
4159 // In the case of a restart because of stack overflow,
4160 // we might additionally skip a chunk prefix.
4161 prev_obj = _restart_addr;
4162 } else {
4163 prev_obj = span.start();
4164 }
4165 } else {
4166 // We want to skip the first object because
4167 // the protocol is to scan any object in its entirety
4168 // that _starts_ in this span; a fortiori, any
4169 // object starting in an earlier span is scanned
4170 // as part of an earlier claimed task.
4171 // Below we use the "careful" version of block_start
4172 // so we do not try to navigate uninitialized objects.
4173 prev_obj = sp->block_start_careful(span.start());
4174 // Below we use a variant of block_size that uses the
4175 // Printezis bits to avoid waiting for allocated
4176 // objects to become initialized/parsable.
4177 while (prev_obj < span.start()) {
4178 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4179 if (sz > 0) {
4180 prev_obj += sz;
4181 } else {
4182 // In this case we may end up doing a bit of redundant
4183 // scanning, but that appears unavoidable, short of
4184 // locking the free list locks; see bug 6324141.
4185 break;
4186 }
4187 }
4188 }
4189 if (prev_obj < span.end()) {
4190 MemRegion my_span = MemRegion(prev_obj, span.end());
4191 // Do the marking work within a non-empty span --
4192 // the last argument to the constructor indicates whether the
4193 // iteration should be incremental with periodic yields.
4194 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4195 &_collector->_markBitMap,
4196 work_queue(i),
4197 &_collector->_markStack,
4198 _asynch);
4199 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4200 } // else nothing to do for this task
4201 } // else nothing to do for this task
4202 }
4203 // We'd be tempted to assert here that since there are no
4204 // more tasks left to claim in this space, the global_finger
4205 // must exceed space->top() and a fortiori space->end(). However,
4206 // that would not quite be correct because the bumping of
4207 // global_finger occurs strictly after the claiming of a task,
4208 // so by the time we reach here the global finger may not yet
4209 // have been bumped up by the thread that claimed the last
4210 // task.
4211 pst->all_tasks_completed();
4212 }
4214 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
4215 private:
4216 CMSCollector* _collector;
4217 CMSConcMarkingTask* _task;
4218 MemRegion _span;
4219 CMSBitMap* _bit_map;
4220 CMSMarkStack* _overflow_stack;
4221 OopTaskQueue* _work_queue;
4222 protected:
4223 DO_OOP_WORK_DEFN
4224 public:
4225 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4226 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4227 MetadataAwareOopClosure(collector->ref_processor()),
4228 _collector(collector),
4229 _task(task),
4230 _span(collector->_span),
4231 _work_queue(work_queue),
4232 _bit_map(bit_map),
4233 _overflow_stack(overflow_stack)
4234 { }
4235 virtual void do_oop(oop* p);
4236 virtual void do_oop(narrowOop* p);
4238 void trim_queue(size_t max);
4239 void handle_stack_overflow(HeapWord* lost);
4240 void do_yield_check() {
4241 if (_task->should_yield()) {
4242 _task->yield();
4243 }
4244 }
4245 };
4247 // Grey object scanning during work stealing phase --
4248 // the salient assumption here is that any references
4249 // that are in these stolen objects being scanned must
4250 // already have been initialized (else they would not have
4251 // been published), so we do not need to check for
4252 // uninitialized objects before pushing here.
4253 void Par_ConcMarkingClosure::do_oop(oop obj) {
4254 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4255 HeapWord* addr = (HeapWord*)obj;
4256 // Check if oop points into the CMS generation
4257 // and is not marked
4258 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4259 // a white object ...
4260 // If we manage to "claim" the object, by being the
4261 // first thread to mark it, then we push it on our
4262 // marking stack
4263 if (_bit_map->par_mark(addr)) { // ... now grey
4264 // push on work queue (grey set)
4265 bool simulate_overflow = false;
4266 NOT_PRODUCT(
4267 if (CMSMarkStackOverflowALot &&
4268 _collector->simulate_overflow()) {
4269 // simulate a stack overflow
4270 simulate_overflow = true;
4271 }
4272 )
4273 if (simulate_overflow ||
4274 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4275 // stack overflow
4276 if (PrintCMSStatistics != 0) {
4277 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4278 SIZE_FORMAT, _overflow_stack->capacity());
4279 }
4280 // We cannot assert that the overflow stack is full because
4281 // it may have been emptied since.
4282 assert(simulate_overflow ||
4283 _work_queue->size() == _work_queue->max_elems(),
4284 "Else push should have succeeded");
4285 handle_stack_overflow(addr);
4286 }
4287 } // Else, some other thread got there first
4288 do_yield_check();
4289 }
4290 }
4292 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4293 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4295 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4296 while (_work_queue->size() > max) {
4297 oop new_oop;
4298 if (_work_queue->pop_local(new_oop)) {
4299 assert(new_oop->is_oop(), "Should be an oop");
4300 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4301 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4302 new_oop->oop_iterate(this); // do_oop() above
4303 do_yield_check();
4304 }
4305 }
4306 }
4308 // Upon stack overflow, we discard (part of) the stack,
4309 // remembering the least address amongst those discarded
4310 // in CMSCollector's _restart_address.
4311 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4312 // We need to do this under a mutex to prevent other
4313 // workers from interfering with the work done below.
4314 MutexLockerEx ml(_overflow_stack->par_lock(),
4315 Mutex::_no_safepoint_check_flag);
4316 // Remember the least grey address discarded
4317 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4318 _collector->lower_restart_addr(ra);
4319 _overflow_stack->reset(); // discard stack contents
4320 _overflow_stack->expand(); // expand the stack if possible
4321 }
4324 void CMSConcMarkingTask::do_work_steal(int i) {
4325 OopTaskQueue* work_q = work_queue(i);
4326 oop obj_to_scan;
4327 CMSBitMap* bm = &(_collector->_markBitMap);
4328 CMSMarkStack* ovflw = &(_collector->_markStack);
4329 int* seed = _collector->hash_seed(i);
4330 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4331 while (true) {
4332 cl.trim_queue(0);
4333 assert(work_q->size() == 0, "Should have been emptied above");
4334 if (get_work_from_overflow_stack(ovflw, work_q)) {
4335 // Can't assert below because the work obtained from the
4336 // overflow stack may already have been stolen from us.
4337 // assert(work_q->size() > 0, "Work from overflow stack");
4338 continue;
4339 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4340 assert(obj_to_scan->is_oop(), "Should be an oop");
4341 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4342 obj_to_scan->oop_iterate(&cl);
4343 } else if (terminator()->offer_termination(&_term_term)) {
4344 assert(work_q->size() == 0, "Impossible!");
4345 break;
4346 } else if (yielding() || should_yield()) {
4347 yield();
4348 }
4349 }
4350 }
4352 // This is run by the CMS (coordinator) thread.
4353 void CMSConcMarkingTask::coordinator_yield() {
4354 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4355 "CMS thread should hold CMS token");
4356 // First give up the locks, then yield, then re-lock
4357 // We should probably use a constructor/destructor idiom to
4358 // do this unlock/lock or modify the MutexUnlocker class to
4359 // serve our purpose. XXX
4360 assert_lock_strong(_bit_map_lock);
4361 _bit_map_lock->unlock();
4362 ConcurrentMarkSweepThread::desynchronize(true);
4363 ConcurrentMarkSweepThread::acknowledge_yield_request();
4364 _collector->stopTimer();
4365 if (PrintCMSStatistics != 0) {
4366 _collector->incrementYields();
4367 }
4368 _collector->icms_wait();
4370 // It is possible for whichever thread initiated the yield request
4371 // not to get a chance to wake up and take the bitmap lock between
4372 // this thread releasing it and reacquiring it. So, while the
4373 // should_yield() flag is on, let's sleep for a bit to give the
4374 // other thread a chance to wake up. The limit imposed on the number
4375 // of iterations is defensive, to avoid any unforseen circumstances
4376 // putting us into an infinite loop. Since it's always been this
4377 // (coordinator_yield()) method that was observed to cause the
4378 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4379 // which is by default non-zero. For the other seven methods that
4380 // also perform the yield operation, as are using a different
4381 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4382 // can enable the sleeping for those methods too, if necessary.
4383 // See 6442774.
4384 //
4385 // We really need to reconsider the synchronization between the GC
4386 // thread and the yield-requesting threads in the future and we
4387 // should really use wait/notify, which is the recommended
4388 // way of doing this type of interaction. Additionally, we should
4389 // consolidate the eight methods that do the yield operation and they
4390 // are almost identical into one for better maintenability and
4391 // readability. See 6445193.
4392 //
4393 // Tony 2006.06.29
4394 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4395 ConcurrentMarkSweepThread::should_yield() &&
4396 !CMSCollector::foregroundGCIsActive(); ++i) {
4397 os::sleep(Thread::current(), 1, false);
4398 ConcurrentMarkSweepThread::acknowledge_yield_request();
4399 }
4401 ConcurrentMarkSweepThread::synchronize(true);
4402 _bit_map_lock->lock_without_safepoint_check();
4403 _collector->startTimer();
4404 }
4406 bool CMSCollector::do_marking_mt(bool asynch) {
4407 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4408 int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4409 conc_workers()->total_workers(),
4410 conc_workers()->active_workers(),
4411 Threads::number_of_non_daemon_threads());
4412 conc_workers()->set_active_workers(num_workers);
4414 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4416 CMSConcMarkingTask tsk(this,
4417 cms_space,
4418 asynch,
4419 conc_workers(),
4420 task_queues());
4422 // Since the actual number of workers we get may be different
4423 // from the number we requested above, do we need to do anything different
4424 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4425 // class?? XXX
4426 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4428 // Refs discovery is already non-atomic.
4429 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4430 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4431 conc_workers()->start_task(&tsk);
4432 while (tsk.yielded()) {
4433 tsk.coordinator_yield();
4434 conc_workers()->continue_task(&tsk);
4435 }
4436 // If the task was aborted, _restart_addr will be non-NULL
4437 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4438 while (_restart_addr != NULL) {
4439 // XXX For now we do not make use of ABORTED state and have not
4440 // yet implemented the right abort semantics (even in the original
4441 // single-threaded CMS case). That needs some more investigation
4442 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4443 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4444 // If _restart_addr is non-NULL, a marking stack overflow
4445 // occurred; we need to do a fresh marking iteration from the
4446 // indicated restart address.
4447 if (_foregroundGCIsActive && asynch) {
4448 // We may be running into repeated stack overflows, having
4449 // reached the limit of the stack size, while making very
4450 // slow forward progress. It may be best to bail out and
4451 // let the foreground collector do its job.
4452 // Clear _restart_addr, so that foreground GC
4453 // works from scratch. This avoids the headache of
4454 // a "rescan" which would otherwise be needed because
4455 // of the dirty mod union table & card table.
4456 _restart_addr = NULL;
4457 return false;
4458 }
4459 // Adjust the task to restart from _restart_addr
4460 tsk.reset(_restart_addr);
4461 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4462 _restart_addr);
4463 _restart_addr = NULL;
4464 // Get the workers going again
4465 conc_workers()->start_task(&tsk);
4466 while (tsk.yielded()) {
4467 tsk.coordinator_yield();
4468 conc_workers()->continue_task(&tsk);
4469 }
4470 }
4471 assert(tsk.completed(), "Inconsistency");
4472 assert(tsk.result() == true, "Inconsistency");
4473 return true;
4474 }
4476 bool CMSCollector::do_marking_st(bool asynch) {
4477 ResourceMark rm;
4478 HandleMark hm;
4480 // Temporarily make refs discovery single threaded (non-MT)
4481 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4482 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4483 &_markStack, CMSYield && asynch);
4484 // the last argument to iterate indicates whether the iteration
4485 // should be incremental with periodic yields.
4486 _markBitMap.iterate(&markFromRootsClosure);
4487 // If _restart_addr is non-NULL, a marking stack overflow
4488 // occurred; we need to do a fresh iteration from the
4489 // indicated restart address.
4490 while (_restart_addr != NULL) {
4491 if (_foregroundGCIsActive && asynch) {
4492 // We may be running into repeated stack overflows, having
4493 // reached the limit of the stack size, while making very
4494 // slow forward progress. It may be best to bail out and
4495 // let the foreground collector do its job.
4496 // Clear _restart_addr, so that foreground GC
4497 // works from scratch. This avoids the headache of
4498 // a "rescan" which would otherwise be needed because
4499 // of the dirty mod union table & card table.
4500 _restart_addr = NULL;
4501 return false; // indicating failure to complete marking
4502 }
4503 // Deal with stack overflow:
4504 // we restart marking from _restart_addr
4505 HeapWord* ra = _restart_addr;
4506 markFromRootsClosure.reset(ra);
4507 _restart_addr = NULL;
4508 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4509 }
4510 return true;
4511 }
4513 void CMSCollector::preclean() {
4514 check_correct_thread_executing();
4515 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4516 verify_work_stacks_empty();
4517 verify_overflow_empty();
4518 _abort_preclean = false;
4519 if (CMSPrecleaningEnabled) {
4520 if (!CMSEdenChunksRecordAlways) {
4521 _eden_chunk_index = 0;
4522 }
4523 size_t used = get_eden_used();
4524 size_t capacity = get_eden_capacity();
4525 // Don't start sampling unless we will get sufficiently
4526 // many samples.
4527 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4528 * CMSScheduleRemarkEdenPenetration)) {
4529 _start_sampling = true;
4530 } else {
4531 _start_sampling = false;
4532 }
4533 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4534 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4535 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4536 }
4537 CMSTokenSync x(true); // is cms thread
4538 if (CMSPrecleaningEnabled) {
4539 sample_eden();
4540 _collectorState = AbortablePreclean;
4541 } else {
4542 _collectorState = FinalMarking;
4543 }
4544 verify_work_stacks_empty();
4545 verify_overflow_empty();
4546 }
4548 // Try and schedule the remark such that young gen
4549 // occupancy is CMSScheduleRemarkEdenPenetration %.
4550 void CMSCollector::abortable_preclean() {
4551 check_correct_thread_executing();
4552 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4553 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4555 // If Eden's current occupancy is below this threshold,
4556 // immediately schedule the remark; else preclean
4557 // past the next scavenge in an effort to
4558 // schedule the pause as described avove. By choosing
4559 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4560 // we will never do an actual abortable preclean cycle.
4561 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4563 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4564 // We need more smarts in the abortable preclean
4565 // loop below to deal with cases where allocation
4566 // in young gen is very very slow, and our precleaning
4567 // is running a losing race against a horde of
4568 // mutators intent on flooding us with CMS updates
4569 // (dirty cards).
4570 // One, admittedly dumb, strategy is to give up
4571 // after a certain number of abortable precleaning loops
4572 // or after a certain maximum time. We want to make
4573 // this smarter in the next iteration.
4574 // XXX FIX ME!!! YSR
4575 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4576 while (!(should_abort_preclean() ||
4577 ConcurrentMarkSweepThread::should_terminate())) {
4578 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4579 cumworkdone += workdone;
4580 loops++;
4581 // Voluntarily terminate abortable preclean phase if we have
4582 // been at it for too long.
4583 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4584 loops >= CMSMaxAbortablePrecleanLoops) {
4585 if (PrintGCDetails) {
4586 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4587 }
4588 break;
4589 }
4590 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4591 if (PrintGCDetails) {
4592 gclog_or_tty->print(" CMS: abort preclean due to time ");
4593 }
4594 break;
4595 }
4596 // If we are doing little work each iteration, we should
4597 // take a short break.
4598 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4599 // Sleep for some time, waiting for work to accumulate
4600 stopTimer();
4601 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4602 startTimer();
4603 waited++;
4604 }
4605 }
4606 if (PrintCMSStatistics > 0) {
4607 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4608 loops, waited, cumworkdone);
4609 }
4610 }
4611 CMSTokenSync x(true); // is cms thread
4612 if (_collectorState != Idling) {
4613 assert(_collectorState == AbortablePreclean,
4614 "Spontaneous state transition?");
4615 _collectorState = FinalMarking;
4616 } // Else, a foreground collection completed this CMS cycle.
4617 return;
4618 }
4620 // Respond to an Eden sampling opportunity
4621 void CMSCollector::sample_eden() {
4622 // Make sure a young gc cannot sneak in between our
4623 // reading and recording of a sample.
4624 assert(Thread::current()->is_ConcurrentGC_thread(),
4625 "Only the cms thread may collect Eden samples");
4626 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4627 "Should collect samples while holding CMS token");
4628 if (!_start_sampling) {
4629 return;
4630 }
4631 // When CMSEdenChunksRecordAlways is true, the eden chunk array
4632 // is populated by the young generation.
4633 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4634 if (_eden_chunk_index < _eden_chunk_capacity) {
4635 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4636 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4637 "Unexpected state of Eden");
4638 // We'd like to check that what we just sampled is an oop-start address;
4639 // however, we cannot do that here since the object may not yet have been
4640 // initialized. So we'll instead do the check when we _use_ this sample
4641 // later.
4642 if (_eden_chunk_index == 0 ||
4643 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4644 _eden_chunk_array[_eden_chunk_index-1])
4645 >= CMSSamplingGrain)) {
4646 _eden_chunk_index++; // commit sample
4647 }
4648 }
4649 }
4650 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4651 size_t used = get_eden_used();
4652 size_t capacity = get_eden_capacity();
4653 assert(used <= capacity, "Unexpected state of Eden");
4654 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4655 _abort_preclean = true;
4656 }
4657 }
4658 }
4661 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4662 assert(_collectorState == Precleaning ||
4663 _collectorState == AbortablePreclean, "incorrect state");
4664 ResourceMark rm;
4665 HandleMark hm;
4667 // Precleaning is currently not MT but the reference processor
4668 // may be set for MT. Disable it temporarily here.
4669 ReferenceProcessor* rp = ref_processor();
4670 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4672 // Do one pass of scrubbing the discovered reference lists
4673 // to remove any reference objects with strongly-reachable
4674 // referents.
4675 if (clean_refs) {
4676 CMSPrecleanRefsYieldClosure yield_cl(this);
4677 assert(rp->span().equals(_span), "Spans should be equal");
4678 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4679 &_markStack, true /* preclean */);
4680 CMSDrainMarkingStackClosure complete_trace(this,
4681 _span, &_markBitMap, &_markStack,
4682 &keep_alive, true /* preclean */);
4684 // We don't want this step to interfere with a young
4685 // collection because we don't want to take CPU
4686 // or memory bandwidth away from the young GC threads
4687 // (which may be as many as there are CPUs).
4688 // Note that we don't need to protect ourselves from
4689 // interference with mutators because they can't
4690 // manipulate the discovered reference lists nor affect
4691 // the computed reachability of the referents, the
4692 // only properties manipulated by the precleaning
4693 // of these reference lists.
4694 stopTimer();
4695 CMSTokenSyncWithLocks x(true /* is cms thread */,
4696 bitMapLock());
4697 startTimer();
4698 sample_eden();
4700 // The following will yield to allow foreground
4701 // collection to proceed promptly. XXX YSR:
4702 // The code in this method may need further
4703 // tweaking for better performance and some restructuring
4704 // for cleaner interfaces.
4705 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4706 rp->preclean_discovered_references(
4707 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4708 gc_timer, _gc_tracer_cm->gc_id());
4709 }
4711 if (clean_survivor) { // preclean the active survivor space(s)
4712 assert(_young_gen->kind() == Generation::DefNew ||
4713 _young_gen->kind() == Generation::ParNew ||
4714 _young_gen->kind() == Generation::ASParNew,
4715 "incorrect type for cast");
4716 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4717 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4718 &_markBitMap, &_modUnionTable,
4719 &_markStack, true /* precleaning phase */);
4720 stopTimer();
4721 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4722 bitMapLock());
4723 startTimer();
4724 unsigned int before_count =
4725 GenCollectedHeap::heap()->total_collections();
4726 SurvivorSpacePrecleanClosure
4727 sss_cl(this, _span, &_markBitMap, &_markStack,
4728 &pam_cl, before_count, CMSYield);
4729 dng->from()->object_iterate_careful(&sss_cl);
4730 dng->to()->object_iterate_careful(&sss_cl);
4731 }
4732 MarkRefsIntoAndScanClosure
4733 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4734 &_markStack, this, CMSYield,
4735 true /* precleaning phase */);
4736 // CAUTION: The following closure has persistent state that may need to
4737 // be reset upon a decrease in the sequence of addresses it
4738 // processes.
4739 ScanMarkedObjectsAgainCarefullyClosure
4740 smoac_cl(this, _span,
4741 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4743 // Preclean dirty cards in ModUnionTable and CardTable using
4744 // appropriate convergence criterion;
4745 // repeat CMSPrecleanIter times unless we find that
4746 // we are losing.
4747 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4748 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4749 "Bad convergence multiplier");
4750 assert(CMSPrecleanThreshold >= 100,
4751 "Unreasonably low CMSPrecleanThreshold");
4753 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4754 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4755 numIter < CMSPrecleanIter;
4756 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4757 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4758 if (Verbose && PrintGCDetails) {
4759 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4760 }
4761 // Either there are very few dirty cards, so re-mark
4762 // pause will be small anyway, or our pre-cleaning isn't
4763 // that much faster than the rate at which cards are being
4764 // dirtied, so we might as well stop and re-mark since
4765 // precleaning won't improve our re-mark time by much.
4766 if (curNumCards <= CMSPrecleanThreshold ||
4767 (numIter > 0 &&
4768 (curNumCards * CMSPrecleanDenominator >
4769 lastNumCards * CMSPrecleanNumerator))) {
4770 numIter++;
4771 cumNumCards += curNumCards;
4772 break;
4773 }
4774 }
4776 preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4778 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4779 cumNumCards += curNumCards;
4780 if (PrintGCDetails && PrintCMSStatistics != 0) {
4781 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4782 curNumCards, cumNumCards, numIter);
4783 }
4784 return cumNumCards; // as a measure of useful work done
4785 }
4787 // PRECLEANING NOTES:
4788 // Precleaning involves:
4789 // . reading the bits of the modUnionTable and clearing the set bits.
4790 // . For the cards corresponding to the set bits, we scan the
4791 // objects on those cards. This means we need the free_list_lock
4792 // so that we can safely iterate over the CMS space when scanning
4793 // for oops.
4794 // . When we scan the objects, we'll be both reading and setting
4795 // marks in the marking bit map, so we'll need the marking bit map.
4796 // . For protecting _collector_state transitions, we take the CGC_lock.
4797 // Note that any races in the reading of of card table entries by the
4798 // CMS thread on the one hand and the clearing of those entries by the
4799 // VM thread or the setting of those entries by the mutator threads on the
4800 // other are quite benign. However, for efficiency it makes sense to keep
4801 // the VM thread from racing with the CMS thread while the latter is
4802 // dirty card info to the modUnionTable. We therefore also use the
4803 // CGC_lock to protect the reading of the card table and the mod union
4804 // table by the CM thread.
4805 // . We run concurrently with mutator updates, so scanning
4806 // needs to be done carefully -- we should not try to scan
4807 // potentially uninitialized objects.
4808 //
4809 // Locking strategy: While holding the CGC_lock, we scan over and
4810 // reset a maximal dirty range of the mod union / card tables, then lock
4811 // the free_list_lock and bitmap lock to do a full marking, then
4812 // release these locks; and repeat the cycle. This allows for a
4813 // certain amount of fairness in the sharing of these locks between
4814 // the CMS collector on the one hand, and the VM thread and the
4815 // mutators on the other.
4817 // NOTE: preclean_mod_union_table() and preclean_card_table()
4818 // further below are largely identical; if you need to modify
4819 // one of these methods, please check the other method too.
4821 size_t CMSCollector::preclean_mod_union_table(
4822 ConcurrentMarkSweepGeneration* gen,
4823 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4824 verify_work_stacks_empty();
4825 verify_overflow_empty();
4827 // strategy: starting with the first card, accumulate contiguous
4828 // ranges of dirty cards; clear these cards, then scan the region
4829 // covered by these cards.
4831 // Since all of the MUT is committed ahead, we can just use
4832 // that, in case the generations expand while we are precleaning.
4833 // It might also be fine to just use the committed part of the
4834 // generation, but we might potentially miss cards when the
4835 // generation is rapidly expanding while we are in the midst
4836 // of precleaning.
4837 HeapWord* startAddr = gen->reserved().start();
4838 HeapWord* endAddr = gen->reserved().end();
4840 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4842 size_t numDirtyCards, cumNumDirtyCards;
4843 HeapWord *nextAddr, *lastAddr;
4844 for (cumNumDirtyCards = numDirtyCards = 0,
4845 nextAddr = lastAddr = startAddr;
4846 nextAddr < endAddr;
4847 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4849 ResourceMark rm;
4850 HandleMark hm;
4852 MemRegion dirtyRegion;
4853 {
4854 stopTimer();
4855 // Potential yield point
4856 CMSTokenSync ts(true);
4857 startTimer();
4858 sample_eden();
4859 // Get dirty region starting at nextOffset (inclusive),
4860 // simultaneously clearing it.
4861 dirtyRegion =
4862 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4863 assert(dirtyRegion.start() >= nextAddr,
4864 "returned region inconsistent?");
4865 }
4866 // Remember where the next search should begin.
4867 // The returned region (if non-empty) is a right open interval,
4868 // so lastOffset is obtained from the right end of that
4869 // interval.
4870 lastAddr = dirtyRegion.end();
4871 // Should do something more transparent and less hacky XXX
4872 numDirtyCards =
4873 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4875 // We'll scan the cards in the dirty region (with periodic
4876 // yields for foreground GC as needed).
4877 if (!dirtyRegion.is_empty()) {
4878 assert(numDirtyCards > 0, "consistency check");
4879 HeapWord* stop_point = NULL;
4880 stopTimer();
4881 // Potential yield point
4882 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4883 bitMapLock());
4884 startTimer();
4885 {
4886 verify_work_stacks_empty();
4887 verify_overflow_empty();
4888 sample_eden();
4889 stop_point =
4890 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4891 }
4892 if (stop_point != NULL) {
4893 // The careful iteration stopped early either because it found an
4894 // uninitialized object, or because we were in the midst of an
4895 // "abortable preclean", which should now be aborted. Redirty
4896 // the bits corresponding to the partially-scanned or unscanned
4897 // cards. We'll either restart at the next block boundary or
4898 // abort the preclean.
4899 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4900 "Should only be AbortablePreclean.");
4901 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4902 if (should_abort_preclean()) {
4903 break; // out of preclean loop
4904 } else {
4905 // Compute the next address at which preclean should pick up;
4906 // might need bitMapLock in order to read P-bits.
4907 lastAddr = next_card_start_after_block(stop_point);
4908 }
4909 }
4910 } else {
4911 assert(lastAddr == endAddr, "consistency check");
4912 assert(numDirtyCards == 0, "consistency check");
4913 break;
4914 }
4915 }
4916 verify_work_stacks_empty();
4917 verify_overflow_empty();
4918 return cumNumDirtyCards;
4919 }
4921 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4922 // below are largely identical; if you need to modify
4923 // one of these methods, please check the other method too.
4925 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4926 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4927 // strategy: it's similar to precleamModUnionTable above, in that
4928 // we accumulate contiguous ranges of dirty cards, mark these cards
4929 // precleaned, then scan the region covered by these cards.
4930 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4931 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4933 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4935 size_t numDirtyCards, cumNumDirtyCards;
4936 HeapWord *lastAddr, *nextAddr;
4938 for (cumNumDirtyCards = numDirtyCards = 0,
4939 nextAddr = lastAddr = startAddr;
4940 nextAddr < endAddr;
4941 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4943 ResourceMark rm;
4944 HandleMark hm;
4946 MemRegion dirtyRegion;
4947 {
4948 // See comments in "Precleaning notes" above on why we
4949 // do this locking. XXX Could the locking overheads be
4950 // too high when dirty cards are sparse? [I don't think so.]
4951 stopTimer();
4952 CMSTokenSync x(true); // is cms thread
4953 startTimer();
4954 sample_eden();
4955 // Get and clear dirty region from card table
4956 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4957 MemRegion(nextAddr, endAddr),
4958 true,
4959 CardTableModRefBS::precleaned_card_val());
4961 assert(dirtyRegion.start() >= nextAddr,
4962 "returned region inconsistent?");
4963 }
4964 lastAddr = dirtyRegion.end();
4965 numDirtyCards =
4966 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4968 if (!dirtyRegion.is_empty()) {
4969 stopTimer();
4970 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4971 startTimer();
4972 sample_eden();
4973 verify_work_stacks_empty();
4974 verify_overflow_empty();
4975 HeapWord* stop_point =
4976 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4977 if (stop_point != NULL) {
4978 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4979 "Should only be AbortablePreclean.");
4980 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4981 if (should_abort_preclean()) {
4982 break; // out of preclean loop
4983 } else {
4984 // Compute the next address at which preclean should pick up.
4985 lastAddr = next_card_start_after_block(stop_point);
4986 }
4987 }
4988 } else {
4989 break;
4990 }
4991 }
4992 verify_work_stacks_empty();
4993 verify_overflow_empty();
4994 return cumNumDirtyCards;
4995 }
4997 class PrecleanKlassClosure : public KlassClosure {
4998 KlassToOopClosure _cm_klass_closure;
4999 public:
5000 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5001 void do_klass(Klass* k) {
5002 if (k->has_accumulated_modified_oops()) {
5003 k->clear_accumulated_modified_oops();
5005 _cm_klass_closure.do_klass(k);
5006 }
5007 }
5008 };
5010 // The freelist lock is needed to prevent asserts, is it really needed?
5011 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5013 cl->set_freelistLock(freelistLock);
5015 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5017 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5018 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5019 PrecleanKlassClosure preclean_klass_closure(cl);
5020 ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5022 verify_work_stacks_empty();
5023 verify_overflow_empty();
5024 }
5026 void CMSCollector::checkpointRootsFinal(bool asynch,
5027 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5028 assert(_collectorState == FinalMarking, "incorrect state transition?");
5029 check_correct_thread_executing();
5030 // world is stopped at this checkpoint
5031 assert(SafepointSynchronize::is_at_safepoint(),
5032 "world should be stopped");
5033 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5035 verify_work_stacks_empty();
5036 verify_overflow_empty();
5038 SpecializationStats::clear();
5039 if (PrintGCDetails) {
5040 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5041 _young_gen->used() / K,
5042 _young_gen->capacity() / K);
5043 }
5044 if (asynch) {
5045 if (CMSScavengeBeforeRemark) {
5046 GenCollectedHeap* gch = GenCollectedHeap::heap();
5047 // Temporarily set flag to false, GCH->do_collection will
5048 // expect it to be false and set to true
5049 FlagSetting fl(gch->_is_gc_active, false);
5050 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5051 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5052 int level = _cmsGen->level() - 1;
5053 if (level >= 0) {
5054 gch->do_collection(true, // full (i.e. force, see below)
5055 false, // !clear_all_soft_refs
5056 0, // size
5057 false, // is_tlab
5058 level // max_level
5059 );
5060 }
5061 }
5062 FreelistLocker x(this);
5063 MutexLockerEx y(bitMapLock(),
5064 Mutex::_no_safepoint_check_flag);
5065 assert(!init_mark_was_synchronous, "but that's impossible!");
5066 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5067 } else {
5068 // already have all the locks
5069 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5070 init_mark_was_synchronous);
5071 }
5072 verify_work_stacks_empty();
5073 verify_overflow_empty();
5074 SpecializationStats::print();
5075 }
5077 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5078 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5080 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5082 assert(haveFreelistLocks(), "must have free list locks");
5083 assert_lock_strong(bitMapLock());
5085 if (UseAdaptiveSizePolicy) {
5086 size_policy()->checkpoint_roots_final_begin();
5087 }
5089 ResourceMark rm;
5090 HandleMark hm;
5092 GenCollectedHeap* gch = GenCollectedHeap::heap();
5094 if (should_unload_classes()) {
5095 CodeCache::gc_prologue();
5096 }
5097 assert(haveFreelistLocks(), "must have free list locks");
5098 assert_lock_strong(bitMapLock());
5100 if (!init_mark_was_synchronous) {
5101 // We might assume that we need not fill TLAB's when
5102 // CMSScavengeBeforeRemark is set, because we may have just done
5103 // a scavenge which would have filled all TLAB's -- and besides
5104 // Eden would be empty. This however may not always be the case --
5105 // for instance although we asked for a scavenge, it may not have
5106 // happened because of a JNI critical section. We probably need
5107 // a policy for deciding whether we can in that case wait until
5108 // the critical section releases and then do the remark following
5109 // the scavenge, and skip it here. In the absence of that policy,
5110 // or of an indication of whether the scavenge did indeed occur,
5111 // we cannot rely on TLAB's having been filled and must do
5112 // so here just in case a scavenge did not happen.
5113 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
5114 // Update the saved marks which may affect the root scans.
5115 gch->save_marks();
5117 if (CMSPrintEdenSurvivorChunks) {
5118 print_eden_and_survivor_chunk_arrays();
5119 }
5121 {
5122 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5124 // Note on the role of the mod union table:
5125 // Since the marker in "markFromRoots" marks concurrently with
5126 // mutators, it is possible for some reachable objects not to have been
5127 // scanned. For instance, an only reference to an object A was
5128 // placed in object B after the marker scanned B. Unless B is rescanned,
5129 // A would be collected. Such updates to references in marked objects
5130 // are detected via the mod union table which is the set of all cards
5131 // dirtied since the first checkpoint in this GC cycle and prior to
5132 // the most recent young generation GC, minus those cleaned up by the
5133 // concurrent precleaning.
5134 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5135 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5136 do_remark_parallel();
5137 } else {
5138 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5139 _gc_timer_cm, _gc_tracer_cm->gc_id());
5140 do_remark_non_parallel();
5141 }
5142 }
5143 } else {
5144 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5145 // The initial mark was stop-world, so there's no rescanning to
5146 // do; go straight on to the next step below.
5147 }
5148 verify_work_stacks_empty();
5149 verify_overflow_empty();
5151 {
5152 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5153 refProcessingWork(asynch, clear_all_soft_refs);
5154 }
5155 verify_work_stacks_empty();
5156 verify_overflow_empty();
5158 if (should_unload_classes()) {
5159 CodeCache::gc_epilogue();
5160 }
5161 JvmtiExport::gc_epilogue();
5163 // If we encountered any (marking stack / work queue) overflow
5164 // events during the current CMS cycle, take appropriate
5165 // remedial measures, where possible, so as to try and avoid
5166 // recurrence of that condition.
5167 assert(_markStack.isEmpty(), "No grey objects");
5168 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5169 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
5170 if (ser_ovflw > 0) {
5171 if (PrintCMSStatistics != 0) {
5172 gclog_or_tty->print_cr("Marking stack overflow (benign) "
5173 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5174 ", kac_preclean="SIZE_FORMAT")",
5175 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5176 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5177 }
5178 _markStack.expand();
5179 _ser_pmc_remark_ovflw = 0;
5180 _ser_pmc_preclean_ovflw = 0;
5181 _ser_kac_preclean_ovflw = 0;
5182 _ser_kac_ovflw = 0;
5183 }
5184 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5185 if (PrintCMSStatistics != 0) {
5186 gclog_or_tty->print_cr("Work queue overflow (benign) "
5187 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5188 _par_pmc_remark_ovflw, _par_kac_ovflw);
5189 }
5190 _par_pmc_remark_ovflw = 0;
5191 _par_kac_ovflw = 0;
5192 }
5193 if (PrintCMSStatistics != 0) {
5194 if (_markStack._hit_limit > 0) {
5195 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5196 _markStack._hit_limit);
5197 }
5198 if (_markStack._failed_double > 0) {
5199 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5200 " current capacity "SIZE_FORMAT,
5201 _markStack._failed_double,
5202 _markStack.capacity());
5203 }
5204 }
5205 _markStack._hit_limit = 0;
5206 _markStack._failed_double = 0;
5208 if ((VerifyAfterGC || VerifyDuringGC) &&
5209 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5210 verify_after_remark();
5211 }
5213 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5215 // Change under the freelistLocks.
5216 _collectorState = Sweeping;
5217 // Call isAllClear() under bitMapLock
5218 assert(_modUnionTable.isAllClear(),
5219 "Should be clear by end of the final marking");
5220 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5221 "Should be clear by end of the final marking");
5222 if (UseAdaptiveSizePolicy) {
5223 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5224 }
5225 }
5227 void CMSParInitialMarkTask::work(uint worker_id) {
5228 elapsedTimer _timer;
5229 ResourceMark rm;
5230 HandleMark hm;
5232 // ---------- scan from roots --------------
5233 _timer.start();
5234 GenCollectedHeap* gch = GenCollectedHeap::heap();
5235 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5237 // ---------- young gen roots --------------
5238 {
5239 work_on_young_gen_roots(worker_id, &par_mri_cl);
5240 _timer.stop();
5241 if (PrintCMSStatistics != 0) {
5242 gclog_or_tty->print_cr(
5243 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5244 worker_id, _timer.seconds());
5245 }
5246 }
5248 // ---------- remaining roots --------------
5249 _timer.reset();
5250 _timer.start();
5252 CLDToOopClosure cld_closure(&par_mri_cl, true);
5254 gch->gen_process_roots(_collector->_cmsGen->level(),
5255 false, // yg was scanned above
5256 false, // this is parallel code
5257 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5258 _collector->should_unload_classes(),
5259 &par_mri_cl,
5260 NULL,
5261 &cld_closure);
5262 assert(_collector->should_unload_classes()
5263 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5264 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5265 _timer.stop();
5266 if (PrintCMSStatistics != 0) {
5267 gclog_or_tty->print_cr(
5268 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5269 worker_id, _timer.seconds());
5270 }
5271 }
5273 // Parallel remark task
5274 class CMSParRemarkTask: public CMSParMarkTask {
5275 CompactibleFreeListSpace* _cms_space;
5277 // The per-thread work queues, available here for stealing.
5278 OopTaskQueueSet* _task_queues;
5279 ParallelTaskTerminator _term;
5281 public:
5282 // A value of 0 passed to n_workers will cause the number of
5283 // workers to be taken from the active workers in the work gang.
5284 CMSParRemarkTask(CMSCollector* collector,
5285 CompactibleFreeListSpace* cms_space,
5286 int n_workers, FlexibleWorkGang* workers,
5287 OopTaskQueueSet* task_queues):
5288 CMSParMarkTask("Rescan roots and grey objects in parallel",
5289 collector, n_workers),
5290 _cms_space(cms_space),
5291 _task_queues(task_queues),
5292 _term(n_workers, task_queues) { }
5294 OopTaskQueueSet* task_queues() { return _task_queues; }
5296 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5298 ParallelTaskTerminator* terminator() { return &_term; }
5299 int n_workers() { return _n_workers; }
5301 void work(uint worker_id);
5303 private:
5304 // ... of dirty cards in old space
5305 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5306 Par_MarkRefsIntoAndScanClosure* cl);
5308 // ... work stealing for the above
5309 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5310 };
5312 class RemarkKlassClosure : public KlassClosure {
5313 KlassToOopClosure _cm_klass_closure;
5314 public:
5315 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5316 void do_klass(Klass* k) {
5317 // Check if we have modified any oops in the Klass during the concurrent marking.
5318 if (k->has_accumulated_modified_oops()) {
5319 k->clear_accumulated_modified_oops();
5321 // We could have transfered the current modified marks to the accumulated marks,
5322 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5323 } else if (k->has_modified_oops()) {
5324 // Don't clear anything, this info is needed by the next young collection.
5325 } else {
5326 // No modified oops in the Klass.
5327 return;
5328 }
5330 // The klass has modified fields, need to scan the klass.
5331 _cm_klass_closure.do_klass(k);
5332 }
5333 };
5335 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5336 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5337 EdenSpace* eden_space = dng->eden();
5338 ContiguousSpace* from_space = dng->from();
5339 ContiguousSpace* to_space = dng->to();
5341 HeapWord** eca = _collector->_eden_chunk_array;
5342 size_t ect = _collector->_eden_chunk_index;
5343 HeapWord** sca = _collector->_survivor_chunk_array;
5344 size_t sct = _collector->_survivor_chunk_index;
5346 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5347 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5349 do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5350 do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5351 do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5352 }
5354 // work_queue(i) is passed to the closure
5355 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
5356 // also is passed to do_dirty_card_rescan_tasks() and to
5357 // do_work_steal() to select the i-th task_queue.
5359 void CMSParRemarkTask::work(uint worker_id) {
5360 elapsedTimer _timer;
5361 ResourceMark rm;
5362 HandleMark hm;
5364 // ---------- rescan from roots --------------
5365 _timer.start();
5366 GenCollectedHeap* gch = GenCollectedHeap::heap();
5367 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5368 _collector->_span, _collector->ref_processor(),
5369 &(_collector->_markBitMap),
5370 work_queue(worker_id));
5372 // Rescan young gen roots first since these are likely
5373 // coarsely partitioned and may, on that account, constitute
5374 // the critical path; thus, it's best to start off that
5375 // work first.
5376 // ---------- young gen roots --------------
5377 {
5378 work_on_young_gen_roots(worker_id, &par_mrias_cl);
5379 _timer.stop();
5380 if (PrintCMSStatistics != 0) {
5381 gclog_or_tty->print_cr(
5382 "Finished young gen rescan work in %dth thread: %3.3f sec",
5383 worker_id, _timer.seconds());
5384 }
5385 }
5387 // ---------- remaining roots --------------
5388 _timer.reset();
5389 _timer.start();
5390 gch->gen_process_roots(_collector->_cmsGen->level(),
5391 false, // yg was scanned above
5392 false, // this is parallel code
5393 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5394 _collector->should_unload_classes(),
5395 &par_mrias_cl,
5396 NULL,
5397 NULL); // The dirty klasses will be handled below
5399 assert(_collector->should_unload_classes()
5400 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5401 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5402 _timer.stop();
5403 if (PrintCMSStatistics != 0) {
5404 gclog_or_tty->print_cr(
5405 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5406 worker_id, _timer.seconds());
5407 }
5409 // ---------- unhandled CLD scanning ----------
5410 if (worker_id == 0) { // Single threaded at the moment.
5411 _timer.reset();
5412 _timer.start();
5414 // Scan all new class loader data objects and new dependencies that were
5415 // introduced during concurrent marking.
5416 ResourceMark rm;
5417 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5418 for (int i = 0; i < array->length(); i++) {
5419 par_mrias_cl.do_class_loader_data(array->at(i));
5420 }
5422 // We don't need to keep track of new CLDs anymore.
5423 ClassLoaderDataGraph::remember_new_clds(false);
5425 _timer.stop();
5426 if (PrintCMSStatistics != 0) {
5427 gclog_or_tty->print_cr(
5428 "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5429 worker_id, _timer.seconds());
5430 }
5431 }
5433 // ---------- dirty klass scanning ----------
5434 if (worker_id == 0) { // Single threaded at the moment.
5435 _timer.reset();
5436 _timer.start();
5438 // Scan all classes that was dirtied during the concurrent marking phase.
5439 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5440 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5442 _timer.stop();
5443 if (PrintCMSStatistics != 0) {
5444 gclog_or_tty->print_cr(
5445 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5446 worker_id, _timer.seconds());
5447 }
5448 }
5450 // We might have added oops to ClassLoaderData::_handles during the
5451 // concurrent marking phase. These oops point to newly allocated objects
5452 // that are guaranteed to be kept alive. Either by the direct allocation
5453 // code, or when the young collector processes the roots. Hence,
5454 // we don't have to revisit the _handles block during the remark phase.
5456 // ---------- rescan dirty cards ------------
5457 _timer.reset();
5458 _timer.start();
5460 // Do the rescan tasks for each of the two spaces
5461 // (cms_space) in turn.
5462 // "worker_id" is passed to select the task_queue for "worker_id"
5463 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5464 _timer.stop();
5465 if (PrintCMSStatistics != 0) {
5466 gclog_or_tty->print_cr(
5467 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5468 worker_id, _timer.seconds());
5469 }
5471 // ---------- steal work from other threads ...
5472 // ---------- ... and drain overflow list.
5473 _timer.reset();
5474 _timer.start();
5475 do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5476 _timer.stop();
5477 if (PrintCMSStatistics != 0) {
5478 gclog_or_tty->print_cr(
5479 "Finished work stealing in %dth thread: %3.3f sec",
5480 worker_id, _timer.seconds());
5481 }
5482 }
5484 // Note that parameter "i" is not used.
5485 void
5486 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5487 OopsInGenClosure* cl, ContiguousSpace* space,
5488 HeapWord** chunk_array, size_t chunk_top) {
5489 // Until all tasks completed:
5490 // . claim an unclaimed task
5491 // . compute region boundaries corresponding to task claimed
5492 // using chunk_array
5493 // . par_oop_iterate(cl) over that region
5495 ResourceMark rm;
5496 HandleMark hm;
5498 SequentialSubTasksDone* pst = space->par_seq_tasks();
5500 uint nth_task = 0;
5501 uint n_tasks = pst->n_tasks();
5503 if (n_tasks > 0) {
5504 assert(pst->valid(), "Uninitialized use?");
5505 HeapWord *start, *end;
5506 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5507 // We claimed task # nth_task; compute its boundaries.
5508 if (chunk_top == 0) { // no samples were taken
5509 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5510 start = space->bottom();
5511 end = space->top();
5512 } else if (nth_task == 0) {
5513 start = space->bottom();
5514 end = chunk_array[nth_task];
5515 } else if (nth_task < (uint)chunk_top) {
5516 assert(nth_task >= 1, "Control point invariant");
5517 start = chunk_array[nth_task - 1];
5518 end = chunk_array[nth_task];
5519 } else {
5520 assert(nth_task == (uint)chunk_top, "Control point invariant");
5521 start = chunk_array[chunk_top - 1];
5522 end = space->top();
5523 }
5524 MemRegion mr(start, end);
5525 // Verify that mr is in space
5526 assert(mr.is_empty() || space->used_region().contains(mr),
5527 "Should be in space");
5528 // Verify that "start" is an object boundary
5529 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5530 "Should be an oop");
5531 space->par_oop_iterate(mr, cl);
5532 }
5533 pst->all_tasks_completed();
5534 }
5535 }
5537 void
5538 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5539 CompactibleFreeListSpace* sp, int i,
5540 Par_MarkRefsIntoAndScanClosure* cl) {
5541 // Until all tasks completed:
5542 // . claim an unclaimed task
5543 // . compute region boundaries corresponding to task claimed
5544 // . transfer dirty bits ct->mut for that region
5545 // . apply rescanclosure to dirty mut bits for that region
5547 ResourceMark rm;
5548 HandleMark hm;
5550 OopTaskQueue* work_q = work_queue(i);
5551 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5552 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5553 // CAUTION: This closure has state that persists across calls to
5554 // the work method dirty_range_iterate_clear() in that it has
5555 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5556 // use of that state in the imbedded UpwardsObjectClosure instance
5557 // assumes that the cards are always iterated (even if in parallel
5558 // by several threads) in monotonically increasing order per each
5559 // thread. This is true of the implementation below which picks
5560 // card ranges (chunks) in monotonically increasing order globally
5561 // and, a-fortiori, in monotonically increasing order per thread
5562 // (the latter order being a subsequence of the former).
5563 // If the work code below is ever reorganized into a more chaotic
5564 // work-partitioning form than the current "sequential tasks"
5565 // paradigm, the use of that persistent state will have to be
5566 // revisited and modified appropriately. See also related
5567 // bug 4756801 work on which should examine this code to make
5568 // sure that the changes there do not run counter to the
5569 // assumptions made here and necessary for correctness and
5570 // efficiency. Note also that this code might yield inefficient
5571 // behaviour in the case of very large objects that span one or
5572 // more work chunks. Such objects would potentially be scanned
5573 // several times redundantly. Work on 4756801 should try and
5574 // address that performance anomaly if at all possible. XXX
5575 MemRegion full_span = _collector->_span;
5576 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5577 MarkFromDirtyCardsClosure
5578 greyRescanClosure(_collector, full_span, // entire span of interest
5579 sp, bm, work_q, cl);
5581 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5582 assert(pst->valid(), "Uninitialized use?");
5583 uint nth_task = 0;
5584 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5585 MemRegion span = sp->used_region();
5586 HeapWord* start_addr = span.start();
5587 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5588 alignment);
5589 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5590 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5591 start_addr, "Check alignment");
5592 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5593 chunk_size, "Check alignment");
5595 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5596 // Having claimed the nth_task, compute corresponding mem-region,
5597 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5598 // The alignment restriction ensures that we do not need any
5599 // synchronization with other gang-workers while setting or
5600 // clearing bits in thus chunk of the MUT.
5601 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5602 start_addr + (nth_task+1)*chunk_size);
5603 // The last chunk's end might be way beyond end of the
5604 // used region. In that case pull back appropriately.
5605 if (this_span.end() > end_addr) {
5606 this_span.set_end(end_addr);
5607 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5608 }
5609 // Iterate over the dirty cards covering this chunk, marking them
5610 // precleaned, and setting the corresponding bits in the mod union
5611 // table. Since we have been careful to partition at Card and MUT-word
5612 // boundaries no synchronization is needed between parallel threads.
5613 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5614 &modUnionClosure);
5616 // Having transferred these marks into the modUnionTable,
5617 // rescan the marked objects on the dirty cards in the modUnionTable.
5618 // Even if this is at a synchronous collection, the initial marking
5619 // may have been done during an asynchronous collection so there
5620 // may be dirty bits in the mod-union table.
5621 _collector->_modUnionTable.dirty_range_iterate_clear(
5622 this_span, &greyRescanClosure);
5623 _collector->_modUnionTable.verifyNoOneBitsInRange(
5624 this_span.start(),
5625 this_span.end());
5626 }
5627 pst->all_tasks_completed(); // declare that i am done
5628 }
5630 // . see if we can share work_queues with ParNew? XXX
5631 void
5632 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5633 int* seed) {
5634 OopTaskQueue* work_q = work_queue(i);
5635 NOT_PRODUCT(int num_steals = 0;)
5636 oop obj_to_scan;
5637 CMSBitMap* bm = &(_collector->_markBitMap);
5639 while (true) {
5640 // Completely finish any left over work from (an) earlier round(s)
5641 cl->trim_queue(0);
5642 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5643 (size_t)ParGCDesiredObjsFromOverflowList);
5644 // Now check if there's any work in the overflow list
5645 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5646 // only affects the number of attempts made to get work from the
5647 // overflow list and does not affect the number of workers. Just
5648 // pass ParallelGCThreads so this behavior is unchanged.
5649 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5650 work_q,
5651 ParallelGCThreads)) {
5652 // found something in global overflow list;
5653 // not yet ready to go stealing work from others.
5654 // We'd like to assert(work_q->size() != 0, ...)
5655 // because we just took work from the overflow list,
5656 // but of course we can't since all of that could have
5657 // been already stolen from us.
5658 // "He giveth and He taketh away."
5659 continue;
5660 }
5661 // Verify that we have no work before we resort to stealing
5662 assert(work_q->size() == 0, "Have work, shouldn't steal");
5663 // Try to steal from other queues that have work
5664 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5665 NOT_PRODUCT(num_steals++;)
5666 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5667 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5668 // Do scanning work
5669 obj_to_scan->oop_iterate(cl);
5670 // Loop around, finish this work, and try to steal some more
5671 } else if (terminator()->offer_termination()) {
5672 break; // nirvana from the infinite cycle
5673 }
5674 }
5675 NOT_PRODUCT(
5676 if (PrintCMSStatistics != 0) {
5677 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5678 }
5679 )
5680 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5681 "Else our work is not yet done");
5682 }
5684 // Record object boundaries in _eden_chunk_array by sampling the eden
5685 // top in the slow-path eden object allocation code path and record
5686 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5687 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5688 // sampling in sample_eden() that activates during the part of the
5689 // preclean phase.
5690 void CMSCollector::sample_eden_chunk() {
5691 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5692 if (_eden_chunk_lock->try_lock()) {
5693 // Record a sample. This is the critical section. The contents
5694 // of the _eden_chunk_array have to be non-decreasing in the
5695 // address order.
5696 _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5697 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5698 "Unexpected state of Eden");
5699 if (_eden_chunk_index == 0 ||
5700 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5701 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5702 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5703 _eden_chunk_index++; // commit sample
5704 }
5705 _eden_chunk_lock->unlock();
5706 }
5707 }
5708 }
5710 // Return a thread-local PLAB recording array, as appropriate.
5711 void* CMSCollector::get_data_recorder(int thr_num) {
5712 if (_survivor_plab_array != NULL &&
5713 (CMSPLABRecordAlways ||
5714 (_collectorState > Marking && _collectorState < FinalMarking))) {
5715 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5716 ChunkArray* ca = &_survivor_plab_array[thr_num];
5717 ca->reset(); // clear it so that fresh data is recorded
5718 return (void*) ca;
5719 } else {
5720 return NULL;
5721 }
5722 }
5724 // Reset all the thread-local PLAB recording arrays
5725 void CMSCollector::reset_survivor_plab_arrays() {
5726 for (uint i = 0; i < ParallelGCThreads; i++) {
5727 _survivor_plab_array[i].reset();
5728 }
5729 }
5731 // Merge the per-thread plab arrays into the global survivor chunk
5732 // array which will provide the partitioning of the survivor space
5733 // for CMS initial scan and rescan.
5734 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5735 int no_of_gc_threads) {
5736 assert(_survivor_plab_array != NULL, "Error");
5737 assert(_survivor_chunk_array != NULL, "Error");
5738 assert(_collectorState == FinalMarking ||
5739 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5740 for (int j = 0; j < no_of_gc_threads; j++) {
5741 _cursor[j] = 0;
5742 }
5743 HeapWord* top = surv->top();
5744 size_t i;
5745 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5746 HeapWord* min_val = top; // Higher than any PLAB address
5747 uint min_tid = 0; // position of min_val this round
5748 for (int j = 0; j < no_of_gc_threads; j++) {
5749 ChunkArray* cur_sca = &_survivor_plab_array[j];
5750 if (_cursor[j] == cur_sca->end()) {
5751 continue;
5752 }
5753 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5754 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5755 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5756 if (cur_val < min_val) {
5757 min_tid = j;
5758 min_val = cur_val;
5759 } else {
5760 assert(cur_val < top, "All recorded addresses should be less");
5761 }
5762 }
5763 // At this point min_val and min_tid are respectively
5764 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5765 // and the thread (j) that witnesses that address.
5766 // We record this address in the _survivor_chunk_array[i]
5767 // and increment _cursor[min_tid] prior to the next round i.
5768 if (min_val == top) {
5769 break;
5770 }
5771 _survivor_chunk_array[i] = min_val;
5772 _cursor[min_tid]++;
5773 }
5774 // We are all done; record the size of the _survivor_chunk_array
5775 _survivor_chunk_index = i; // exclusive: [0, i)
5776 if (PrintCMSStatistics > 0) {
5777 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5778 }
5779 // Verify that we used up all the recorded entries
5780 #ifdef ASSERT
5781 size_t total = 0;
5782 for (int j = 0; j < no_of_gc_threads; j++) {
5783 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5784 total += _cursor[j];
5785 }
5786 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5787 // Check that the merged array is in sorted order
5788 if (total > 0) {
5789 for (size_t i = 0; i < total - 1; i++) {
5790 if (PrintCMSStatistics > 0) {
5791 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5792 i, _survivor_chunk_array[i]);
5793 }
5794 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5795 "Not sorted");
5796 }
5797 }
5798 #endif // ASSERT
5799 }
5801 // Set up the space's par_seq_tasks structure for work claiming
5802 // for parallel initial scan and rescan of young gen.
5803 // See ParRescanTask where this is currently used.
5804 void
5805 CMSCollector::
5806 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5807 assert(n_threads > 0, "Unexpected n_threads argument");
5808 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5810 // Eden space
5811 if (!dng->eden()->is_empty()) {
5812 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5813 assert(!pst->valid(), "Clobbering existing data?");
5814 // Each valid entry in [0, _eden_chunk_index) represents a task.
5815 size_t n_tasks = _eden_chunk_index + 1;
5816 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5817 // Sets the condition for completion of the subtask (how many threads
5818 // need to finish in order to be done).
5819 pst->set_n_threads(n_threads);
5820 pst->set_n_tasks((int)n_tasks);
5821 }
5823 // Merge the survivor plab arrays into _survivor_chunk_array
5824 if (_survivor_plab_array != NULL) {
5825 merge_survivor_plab_arrays(dng->from(), n_threads);
5826 } else {
5827 assert(_survivor_chunk_index == 0, "Error");
5828 }
5830 // To space
5831 {
5832 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5833 assert(!pst->valid(), "Clobbering existing data?");
5834 // Sets the condition for completion of the subtask (how many threads
5835 // need to finish in order to be done).
5836 pst->set_n_threads(n_threads);
5837 pst->set_n_tasks(1);
5838 assert(pst->valid(), "Error");
5839 }
5841 // From space
5842 {
5843 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5844 assert(!pst->valid(), "Clobbering existing data?");
5845 size_t n_tasks = _survivor_chunk_index + 1;
5846 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5847 // Sets the condition for completion of the subtask (how many threads
5848 // need to finish in order to be done).
5849 pst->set_n_threads(n_threads);
5850 pst->set_n_tasks((int)n_tasks);
5851 assert(pst->valid(), "Error");
5852 }
5853 }
5855 // Parallel version of remark
5856 void CMSCollector::do_remark_parallel() {
5857 GenCollectedHeap* gch = GenCollectedHeap::heap();
5858 FlexibleWorkGang* workers = gch->workers();
5859 assert(workers != NULL, "Need parallel worker threads.");
5860 // Choose to use the number of GC workers most recently set
5861 // into "active_workers". If active_workers is not set, set it
5862 // to ParallelGCThreads.
5863 int n_workers = workers->active_workers();
5864 if (n_workers == 0) {
5865 assert(n_workers > 0, "Should have been set during scavenge");
5866 n_workers = ParallelGCThreads;
5867 workers->set_active_workers(n_workers);
5868 }
5869 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5871 CMSParRemarkTask tsk(this,
5872 cms_space,
5873 n_workers, workers, task_queues());
5875 // Set up for parallel process_roots work.
5876 gch->set_par_threads(n_workers);
5877 // We won't be iterating over the cards in the card table updating
5878 // the younger_gen cards, so we shouldn't call the following else
5879 // the verification code as well as subsequent younger_refs_iterate
5880 // code would get confused. XXX
5881 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5883 // The young gen rescan work will not be done as part of
5884 // process_roots (which currently doesn't know how to
5885 // parallelize such a scan), but rather will be broken up into
5886 // a set of parallel tasks (via the sampling that the [abortable]
5887 // preclean phase did of EdenSpace, plus the [two] tasks of
5888 // scanning the [two] survivor spaces. Further fine-grain
5889 // parallelization of the scanning of the survivor spaces
5890 // themselves, and of precleaning of the younger gen itself
5891 // is deferred to the future.
5892 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5894 // The dirty card rescan work is broken up into a "sequence"
5895 // of parallel tasks (per constituent space) that are dynamically
5896 // claimed by the parallel threads.
5897 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5899 // It turns out that even when we're using 1 thread, doing the work in a
5900 // separate thread causes wide variance in run times. We can't help this
5901 // in the multi-threaded case, but we special-case n=1 here to get
5902 // repeatable measurements of the 1-thread overhead of the parallel code.
5903 if (n_workers > 1) {
5904 // Make refs discovery MT-safe, if it isn't already: it may not
5905 // necessarily be so, since it's possible that we are doing
5906 // ST marking.
5907 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5908 GenCollectedHeap::StrongRootsScope srs(gch);
5909 workers->run_task(&tsk);
5910 } else {
5911 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5912 GenCollectedHeap::StrongRootsScope srs(gch);
5913 tsk.work(0);
5914 }
5916 gch->set_par_threads(0); // 0 ==> non-parallel.
5917 // restore, single-threaded for now, any preserved marks
5918 // as a result of work_q overflow
5919 restore_preserved_marks_if_any();
5920 }
5922 // Non-parallel version of remark
5923 void CMSCollector::do_remark_non_parallel() {
5924 ResourceMark rm;
5925 HandleMark hm;
5926 GenCollectedHeap* gch = GenCollectedHeap::heap();
5927 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5929 MarkRefsIntoAndScanClosure
5930 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5931 &_markStack, this,
5932 false /* should_yield */, false /* not precleaning */);
5933 MarkFromDirtyCardsClosure
5934 markFromDirtyCardsClosure(this, _span,
5935 NULL, // space is set further below
5936 &_markBitMap, &_markStack, &mrias_cl);
5937 {
5938 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5939 // Iterate over the dirty cards, setting the corresponding bits in the
5940 // mod union table.
5941 {
5942 ModUnionClosure modUnionClosure(&_modUnionTable);
5943 _ct->ct_bs()->dirty_card_iterate(
5944 _cmsGen->used_region(),
5945 &modUnionClosure);
5946 }
5947 // Having transferred these marks into the modUnionTable, we just need
5948 // to rescan the marked objects on the dirty cards in the modUnionTable.
5949 // The initial marking may have been done during an asynchronous
5950 // collection so there may be dirty bits in the mod-union table.
5951 const int alignment =
5952 CardTableModRefBS::card_size * BitsPerWord;
5953 {
5954 // ... First handle dirty cards in CMS gen
5955 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5956 MemRegion ur = _cmsGen->used_region();
5957 HeapWord* lb = ur.start();
5958 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5959 MemRegion cms_span(lb, ub);
5960 _modUnionTable.dirty_range_iterate_clear(cms_span,
5961 &markFromDirtyCardsClosure);
5962 verify_work_stacks_empty();
5963 if (PrintCMSStatistics != 0) {
5964 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5965 markFromDirtyCardsClosure.num_dirty_cards());
5966 }
5967 }
5968 }
5969 if (VerifyDuringGC &&
5970 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5971 HandleMark hm; // Discard invalid handles created during verification
5972 Universe::verify();
5973 }
5974 {
5975 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5977 verify_work_stacks_empty();
5979 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5980 GenCollectedHeap::StrongRootsScope srs(gch);
5982 gch->gen_process_roots(_cmsGen->level(),
5983 true, // younger gens as roots
5984 false, // use the local StrongRootsScope
5985 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5986 should_unload_classes(),
5987 &mrias_cl,
5988 NULL,
5989 NULL); // The dirty klasses will be handled below
5991 assert(should_unload_classes()
5992 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5993 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5994 }
5996 {
5997 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5999 verify_work_stacks_empty();
6001 // Scan all class loader data objects that might have been introduced
6002 // during concurrent marking.
6003 ResourceMark rm;
6004 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
6005 for (int i = 0; i < array->length(); i++) {
6006 mrias_cl.do_class_loader_data(array->at(i));
6007 }
6009 // We don't need to keep track of new CLDs anymore.
6010 ClassLoaderDataGraph::remember_new_clds(false);
6012 verify_work_stacks_empty();
6013 }
6015 {
6016 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6018 verify_work_stacks_empty();
6020 RemarkKlassClosure remark_klass_closure(&mrias_cl);
6021 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6023 verify_work_stacks_empty();
6024 }
6026 // We might have added oops to ClassLoaderData::_handles during the
6027 // concurrent marking phase. These oops point to newly allocated objects
6028 // that are guaranteed to be kept alive. Either by the direct allocation
6029 // code, or when the young collector processes the roots. Hence,
6030 // we don't have to revisit the _handles block during the remark phase.
6032 verify_work_stacks_empty();
6033 // Restore evacuated mark words, if any, used for overflow list links
6034 if (!CMSOverflowEarlyRestoration) {
6035 restore_preserved_marks_if_any();
6036 }
6037 verify_overflow_empty();
6038 }
6040 ////////////////////////////////////////////////////////
6041 // Parallel Reference Processing Task Proxy Class
6042 ////////////////////////////////////////////////////////
6043 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6044 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6045 CMSCollector* _collector;
6046 CMSBitMap* _mark_bit_map;
6047 const MemRegion _span;
6048 ProcessTask& _task;
6050 public:
6051 CMSRefProcTaskProxy(ProcessTask& task,
6052 CMSCollector* collector,
6053 const MemRegion& span,
6054 CMSBitMap* mark_bit_map,
6055 AbstractWorkGang* workers,
6056 OopTaskQueueSet* task_queues):
6057 // XXX Should superclass AGTWOQ also know about AWG since it knows
6058 // about the task_queues used by the AWG? Then it could initialize
6059 // the terminator() object. See 6984287. The set_for_termination()
6060 // below is a temporary band-aid for the regression in 6984287.
6061 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6062 task_queues),
6063 _task(task),
6064 _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6065 {
6066 assert(_collector->_span.equals(_span) && !_span.is_empty(),
6067 "Inconsistency in _span");
6068 set_for_termination(workers->active_workers());
6069 }
6071 OopTaskQueueSet* task_queues() { return queues(); }
6073 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6075 void do_work_steal(int i,
6076 CMSParDrainMarkingStackClosure* drain,
6077 CMSParKeepAliveClosure* keep_alive,
6078 int* seed);
6080 virtual void work(uint worker_id);
6081 };
6083 void CMSRefProcTaskProxy::work(uint worker_id) {
6084 ResourceMark rm;
6085 HandleMark hm;
6086 assert(_collector->_span.equals(_span), "Inconsistency in _span");
6087 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6088 _mark_bit_map,
6089 work_queue(worker_id));
6090 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6091 _mark_bit_map,
6092 work_queue(worker_id));
6093 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6094 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6095 if (_task.marks_oops_alive()) {
6096 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6097 _collector->hash_seed(worker_id));
6098 }
6099 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6100 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6101 }
6103 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6104 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6105 EnqueueTask& _task;
6107 public:
6108 CMSRefEnqueueTaskProxy(EnqueueTask& task)
6109 : AbstractGangTask("Enqueue reference objects in parallel"),
6110 _task(task)
6111 { }
6113 virtual void work(uint worker_id)
6114 {
6115 _task.work(worker_id);
6116 }
6117 };
6119 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6120 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6121 _span(span),
6122 _bit_map(bit_map),
6123 _work_queue(work_queue),
6124 _mark_and_push(collector, span, bit_map, work_queue),
6125 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6126 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6127 { }
6129 // . see if we can share work_queues with ParNew? XXX
6130 void CMSRefProcTaskProxy::do_work_steal(int i,
6131 CMSParDrainMarkingStackClosure* drain,
6132 CMSParKeepAliveClosure* keep_alive,
6133 int* seed) {
6134 OopTaskQueue* work_q = work_queue(i);
6135 NOT_PRODUCT(int num_steals = 0;)
6136 oop obj_to_scan;
6138 while (true) {
6139 // Completely finish any left over work from (an) earlier round(s)
6140 drain->trim_queue(0);
6141 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6142 (size_t)ParGCDesiredObjsFromOverflowList);
6143 // Now check if there's any work in the overflow list
6144 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6145 // only affects the number of attempts made to get work from the
6146 // overflow list and does not affect the number of workers. Just
6147 // pass ParallelGCThreads so this behavior is unchanged.
6148 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6149 work_q,
6150 ParallelGCThreads)) {
6151 // Found something in global overflow list;
6152 // not yet ready to go stealing work from others.
6153 // We'd like to assert(work_q->size() != 0, ...)
6154 // because we just took work from the overflow list,
6155 // but of course we can't, since all of that might have
6156 // been already stolen from us.
6157 continue;
6158 }
6159 // Verify that we have no work before we resort to stealing
6160 assert(work_q->size() == 0, "Have work, shouldn't steal");
6161 // Try to steal from other queues that have work
6162 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6163 NOT_PRODUCT(num_steals++;)
6164 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6165 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6166 // Do scanning work
6167 obj_to_scan->oop_iterate(keep_alive);
6168 // Loop around, finish this work, and try to steal some more
6169 } else if (terminator()->offer_termination()) {
6170 break; // nirvana from the infinite cycle
6171 }
6172 }
6173 NOT_PRODUCT(
6174 if (PrintCMSStatistics != 0) {
6175 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6176 }
6177 )
6178 }
6180 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6181 {
6182 GenCollectedHeap* gch = GenCollectedHeap::heap();
6183 FlexibleWorkGang* workers = gch->workers();
6184 assert(workers != NULL, "Need parallel worker threads.");
6185 CMSRefProcTaskProxy rp_task(task, &_collector,
6186 _collector.ref_processor()->span(),
6187 _collector.markBitMap(),
6188 workers, _collector.task_queues());
6189 workers->run_task(&rp_task);
6190 }
6192 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6193 {
6195 GenCollectedHeap* gch = GenCollectedHeap::heap();
6196 FlexibleWorkGang* workers = gch->workers();
6197 assert(workers != NULL, "Need parallel worker threads.");
6198 CMSRefEnqueueTaskProxy enq_task(task);
6199 workers->run_task(&enq_task);
6200 }
6202 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6204 ResourceMark rm;
6205 HandleMark hm;
6207 ReferenceProcessor* rp = ref_processor();
6208 assert(rp->span().equals(_span), "Spans should be equal");
6209 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6210 // Process weak references.
6211 rp->setup_policy(clear_all_soft_refs);
6212 verify_work_stacks_empty();
6214 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6215 &_markStack, false /* !preclean */);
6216 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6217 _span, &_markBitMap, &_markStack,
6218 &cmsKeepAliveClosure, false /* !preclean */);
6219 {
6220 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6222 ReferenceProcessorStats stats;
6223 if (rp->processing_is_mt()) {
6224 // Set the degree of MT here. If the discovery is done MT, there
6225 // may have been a different number of threads doing the discovery
6226 // and a different number of discovered lists may have Ref objects.
6227 // That is OK as long as the Reference lists are balanced (see
6228 // balance_all_queues() and balance_queues()).
6229 GenCollectedHeap* gch = GenCollectedHeap::heap();
6230 int active_workers = ParallelGCThreads;
6231 FlexibleWorkGang* workers = gch->workers();
6232 if (workers != NULL) {
6233 active_workers = workers->active_workers();
6234 // The expectation is that active_workers will have already
6235 // been set to a reasonable value. If it has not been set,
6236 // investigate.
6237 assert(active_workers > 0, "Should have been set during scavenge");
6238 }
6239 rp->set_active_mt_degree(active_workers);
6240 CMSRefProcTaskExecutor task_executor(*this);
6241 stats = rp->process_discovered_references(&_is_alive_closure,
6242 &cmsKeepAliveClosure,
6243 &cmsDrainMarkingStackClosure,
6244 &task_executor,
6245 _gc_timer_cm,
6246 _gc_tracer_cm->gc_id());
6247 } else {
6248 stats = rp->process_discovered_references(&_is_alive_closure,
6249 &cmsKeepAliveClosure,
6250 &cmsDrainMarkingStackClosure,
6251 NULL,
6252 _gc_timer_cm,
6253 _gc_tracer_cm->gc_id());
6254 }
6255 _gc_tracer_cm->report_gc_reference_stats(stats);
6257 }
6259 // This is the point where the entire marking should have completed.
6260 verify_work_stacks_empty();
6262 if (should_unload_classes()) {
6263 {
6264 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6266 // Unload classes and purge the SystemDictionary.
6267 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6269 // Unload nmethods.
6270 CodeCache::do_unloading(&_is_alive_closure, purged_class);
6272 // Prune dead klasses from subklass/sibling/implementor lists.
6273 Klass::clean_weak_klass_links(&_is_alive_closure);
6274 }
6276 {
6277 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6278 // Clean up unreferenced symbols in symbol table.
6279 SymbolTable::unlink();
6280 }
6282 {
6283 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6284 // Delete entries for dead interned strings.
6285 StringTable::unlink(&_is_alive_closure);
6286 }
6287 }
6290 // Restore any preserved marks as a result of mark stack or
6291 // work queue overflow
6292 restore_preserved_marks_if_any(); // done single-threaded for now
6294 rp->set_enqueuing_is_done(true);
6295 if (rp->processing_is_mt()) {
6296 rp->balance_all_queues();
6297 CMSRefProcTaskExecutor task_executor(*this);
6298 rp->enqueue_discovered_references(&task_executor);
6299 } else {
6300 rp->enqueue_discovered_references(NULL);
6301 }
6302 rp->verify_no_references_recorded();
6303 assert(!rp->discovery_enabled(), "should have been disabled");
6304 }
6306 #ifndef PRODUCT
6307 void CMSCollector::check_correct_thread_executing() {
6308 Thread* t = Thread::current();
6309 // Only the VM thread or the CMS thread should be here.
6310 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6311 "Unexpected thread type");
6312 // If this is the vm thread, the foreground process
6313 // should not be waiting. Note that _foregroundGCIsActive is
6314 // true while the foreground collector is waiting.
6315 if (_foregroundGCShouldWait) {
6316 // We cannot be the VM thread
6317 assert(t->is_ConcurrentGC_thread(),
6318 "Should be CMS thread");
6319 } else {
6320 // We can be the CMS thread only if we are in a stop-world
6321 // phase of CMS collection.
6322 if (t->is_ConcurrentGC_thread()) {
6323 assert(_collectorState == InitialMarking ||
6324 _collectorState == FinalMarking,
6325 "Should be a stop-world phase");
6326 // The CMS thread should be holding the CMS_token.
6327 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6328 "Potential interference with concurrently "
6329 "executing VM thread");
6330 }
6331 }
6332 }
6333 #endif
6335 void CMSCollector::sweep(bool asynch) {
6336 assert(_collectorState == Sweeping, "just checking");
6337 check_correct_thread_executing();
6338 verify_work_stacks_empty();
6339 verify_overflow_empty();
6340 increment_sweep_count();
6341 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6343 _inter_sweep_timer.stop();
6344 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6345 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6347 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6348 _intra_sweep_timer.reset();
6349 _intra_sweep_timer.start();
6350 if (asynch) {
6351 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6352 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6353 // First sweep the old gen
6354 {
6355 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6356 bitMapLock());
6357 sweepWork(_cmsGen, asynch);
6358 }
6360 // Update Universe::_heap_*_at_gc figures.
6361 // We need all the free list locks to make the abstract state
6362 // transition from Sweeping to Resetting. See detailed note
6363 // further below.
6364 {
6365 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6366 // Update heap occupancy information which is used as
6367 // input to soft ref clearing policy at the next gc.
6368 Universe::update_heap_info_at_gc();
6369 _collectorState = Resizing;
6370 }
6371 } else {
6372 // already have needed locks
6373 sweepWork(_cmsGen, asynch);
6374 // Update heap occupancy information which is used as
6375 // input to soft ref clearing policy at the next gc.
6376 Universe::update_heap_info_at_gc();
6377 _collectorState = Resizing;
6378 }
6379 verify_work_stacks_empty();
6380 verify_overflow_empty();
6382 if (should_unload_classes()) {
6383 // Delay purge to the beginning of the next safepoint. Metaspace::contains
6384 // requires that the virtual spaces are stable and not deleted.
6385 ClassLoaderDataGraph::set_should_purge(true);
6386 }
6388 _intra_sweep_timer.stop();
6389 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6391 _inter_sweep_timer.reset();
6392 _inter_sweep_timer.start();
6394 // We need to use a monotonically non-deccreasing time in ms
6395 // or we will see time-warp warnings and os::javaTimeMillis()
6396 // does not guarantee monotonicity.
6397 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6398 update_time_of_last_gc(now);
6400 // NOTE on abstract state transitions:
6401 // Mutators allocate-live and/or mark the mod-union table dirty
6402 // based on the state of the collection. The former is done in
6403 // the interval [Marking, Sweeping] and the latter in the interval
6404 // [Marking, Sweeping). Thus the transitions into the Marking state
6405 // and out of the Sweeping state must be synchronously visible
6406 // globally to the mutators.
6407 // The transition into the Marking state happens with the world
6408 // stopped so the mutators will globally see it. Sweeping is
6409 // done asynchronously by the background collector so the transition
6410 // from the Sweeping state to the Resizing state must be done
6411 // under the freelistLock (as is the check for whether to
6412 // allocate-live and whether to dirty the mod-union table).
6413 assert(_collectorState == Resizing, "Change of collector state to"
6414 " Resizing must be done under the freelistLocks (plural)");
6416 // Now that sweeping has been completed, we clear
6417 // the incremental_collection_failed flag,
6418 // thus inviting a younger gen collection to promote into
6419 // this generation. If such a promotion may still fail,
6420 // the flag will be set again when a young collection is
6421 // attempted.
6422 GenCollectedHeap* gch = GenCollectedHeap::heap();
6423 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
6424 gch->update_full_collections_completed(_collection_count_start);
6425 }
6427 // FIX ME!!! Looks like this belongs in CFLSpace, with
6428 // CMSGen merely delegating to it.
6429 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6430 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6431 HeapWord* minAddr = _cmsSpace->bottom();
6432 HeapWord* largestAddr =
6433 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6434 if (largestAddr == NULL) {
6435 // The dictionary appears to be empty. In this case
6436 // try to coalesce at the end of the heap.
6437 largestAddr = _cmsSpace->end();
6438 }
6439 size_t largestOffset = pointer_delta(largestAddr, minAddr);
6440 size_t nearLargestOffset =
6441 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6442 if (PrintFLSStatistics != 0) {
6443 gclog_or_tty->print_cr(
6444 "CMS: Large Block: " PTR_FORMAT ";"
6445 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6446 largestAddr,
6447 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6448 }
6449 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6450 }
6452 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6453 return addr >= _cmsSpace->nearLargestChunk();
6454 }
6456 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6457 return _cmsSpace->find_chunk_at_end();
6458 }
6460 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6461 bool full) {
6462 // The next lower level has been collected. Gather any statistics
6463 // that are of interest at this point.
6464 if (!full && (current_level + 1) == level()) {
6465 // Gather statistics on the young generation collection.
6466 collector()->stats().record_gc0_end(used());
6467 }
6468 }
6470 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6471 GenCollectedHeap* gch = GenCollectedHeap::heap();
6472 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6473 "Wrong type of heap");
6474 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6475 gch->gen_policy()->size_policy();
6476 assert(sp->is_gc_cms_adaptive_size_policy(),
6477 "Wrong type of size policy");
6478 return sp;
6479 }
6481 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6482 if (PrintGCDetails && Verbose) {
6483 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6484 }
6485 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6486 _debug_collection_type =
6487 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6488 if (PrintGCDetails && Verbose) {
6489 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6490 }
6491 }
6493 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6494 bool asynch) {
6495 // We iterate over the space(s) underlying this generation,
6496 // checking the mark bit map to see if the bits corresponding
6497 // to specific blocks are marked or not. Blocks that are
6498 // marked are live and are not swept up. All remaining blocks
6499 // are swept up, with coalescing on-the-fly as we sweep up
6500 // contiguous free and/or garbage blocks:
6501 // We need to ensure that the sweeper synchronizes with allocators
6502 // and stop-the-world collectors. In particular, the following
6503 // locks are used:
6504 // . CMS token: if this is held, a stop the world collection cannot occur
6505 // . freelistLock: if this is held no allocation can occur from this
6506 // generation by another thread
6507 // . bitMapLock: if this is held, no other thread can access or update
6508 //
6510 // Note that we need to hold the freelistLock if we use
6511 // block iterate below; else the iterator might go awry if
6512 // a mutator (or promotion) causes block contents to change
6513 // (for instance if the allocator divvies up a block).
6514 // If we hold the free list lock, for all practical purposes
6515 // young generation GC's can't occur (they'll usually need to
6516 // promote), so we might as well prevent all young generation
6517 // GC's while we do a sweeping step. For the same reason, we might
6518 // as well take the bit map lock for the entire duration
6520 // check that we hold the requisite locks
6521 assert(have_cms_token(), "Should hold cms token");
6522 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6523 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6524 "Should possess CMS token to sweep");
6525 assert_lock_strong(gen->freelistLock());
6526 assert_lock_strong(bitMapLock());
6528 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6529 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6530 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6531 _inter_sweep_estimate.padded_average(),
6532 _intra_sweep_estimate.padded_average());
6533 gen->setNearLargestChunk();
6535 {
6536 SweepClosure sweepClosure(this, gen, &_markBitMap,
6537 CMSYield && asynch);
6538 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6539 // We need to free-up/coalesce garbage/blocks from a
6540 // co-terminal free run. This is done in the SweepClosure
6541 // destructor; so, do not remove this scope, else the
6542 // end-of-sweep-census below will be off by a little bit.
6543 }
6544 gen->cmsSpace()->sweep_completed();
6545 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6546 if (should_unload_classes()) { // unloaded classes this cycle,
6547 _concurrent_cycles_since_last_unload = 0; // ... reset count
6548 } else { // did not unload classes,
6549 _concurrent_cycles_since_last_unload++; // ... increment count
6550 }
6551 }
6553 // Reset CMS data structures (for now just the marking bit map)
6554 // preparatory for the next cycle.
6555 void CMSCollector::reset(bool asynch) {
6556 GenCollectedHeap* gch = GenCollectedHeap::heap();
6557 CMSAdaptiveSizePolicy* sp = size_policy();
6558 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6559 if (asynch) {
6560 CMSTokenSyncWithLocks ts(true, bitMapLock());
6562 // If the state is not "Resetting", the foreground thread
6563 // has done a collection and the resetting.
6564 if (_collectorState != Resetting) {
6565 assert(_collectorState == Idling, "The state should only change"
6566 " because the foreground collector has finished the collection");
6567 return;
6568 }
6570 // Clear the mark bitmap (no grey objects to start with)
6571 // for the next cycle.
6572 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6573 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6575 HeapWord* curAddr = _markBitMap.startWord();
6576 while (curAddr < _markBitMap.endWord()) {
6577 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6578 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6579 _markBitMap.clear_large_range(chunk);
6580 if (ConcurrentMarkSweepThread::should_yield() &&
6581 !foregroundGCIsActive() &&
6582 CMSYield) {
6583 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6584 "CMS thread should hold CMS token");
6585 assert_lock_strong(bitMapLock());
6586 bitMapLock()->unlock();
6587 ConcurrentMarkSweepThread::desynchronize(true);
6588 ConcurrentMarkSweepThread::acknowledge_yield_request();
6589 stopTimer();
6590 if (PrintCMSStatistics != 0) {
6591 incrementYields();
6592 }
6593 icms_wait();
6595 // See the comment in coordinator_yield()
6596 for (unsigned i = 0; i < CMSYieldSleepCount &&
6597 ConcurrentMarkSweepThread::should_yield() &&
6598 !CMSCollector::foregroundGCIsActive(); ++i) {
6599 os::sleep(Thread::current(), 1, false);
6600 ConcurrentMarkSweepThread::acknowledge_yield_request();
6601 }
6603 ConcurrentMarkSweepThread::synchronize(true);
6604 bitMapLock()->lock_without_safepoint_check();
6605 startTimer();
6606 }
6607 curAddr = chunk.end();
6608 }
6609 // A successful mostly concurrent collection has been done.
6610 // Because only the full (i.e., concurrent mode failure) collections
6611 // are being measured for gc overhead limits, clean the "near" flag
6612 // and count.
6613 sp->reset_gc_overhead_limit_count();
6614 _collectorState = Idling;
6615 } else {
6616 // already have the lock
6617 assert(_collectorState == Resetting, "just checking");
6618 assert_lock_strong(bitMapLock());
6619 _markBitMap.clear_all();
6620 _collectorState = Idling;
6621 }
6623 // Stop incremental mode after a cycle completes, so that any future cycles
6624 // are triggered by allocation.
6625 stop_icms();
6627 NOT_PRODUCT(
6628 if (RotateCMSCollectionTypes) {
6629 _cmsGen->rotate_debug_collection_type();
6630 }
6631 )
6633 register_gc_end();
6634 }
6636 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6637 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6638 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6639 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6640 TraceCollectorStats tcs(counters());
6642 switch (op) {
6643 case CMS_op_checkpointRootsInitial: {
6644 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6645 checkpointRootsInitial(true); // asynch
6646 if (PrintGC) {
6647 _cmsGen->printOccupancy("initial-mark");
6648 }
6649 break;
6650 }
6651 case CMS_op_checkpointRootsFinal: {
6652 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6653 checkpointRootsFinal(true, // asynch
6654 false, // !clear_all_soft_refs
6655 false); // !init_mark_was_synchronous
6656 if (PrintGC) {
6657 _cmsGen->printOccupancy("remark");
6658 }
6659 break;
6660 }
6661 default:
6662 fatal("No such CMS_op");
6663 }
6664 }
6666 #ifndef PRODUCT
6667 size_t const CMSCollector::skip_header_HeapWords() {
6668 return FreeChunk::header_size();
6669 }
6671 // Try and collect here conditions that should hold when
6672 // CMS thread is exiting. The idea is that the foreground GC
6673 // thread should not be blocked if it wants to terminate
6674 // the CMS thread and yet continue to run the VM for a while
6675 // after that.
6676 void CMSCollector::verify_ok_to_terminate() const {
6677 assert(Thread::current()->is_ConcurrentGC_thread(),
6678 "should be called by CMS thread");
6679 assert(!_foregroundGCShouldWait, "should be false");
6680 // We could check here that all the various low-level locks
6681 // are not held by the CMS thread, but that is overkill; see
6682 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6683 // is checked.
6684 }
6685 #endif
6687 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6688 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6689 "missing Printezis mark?");
6690 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6691 size_t size = pointer_delta(nextOneAddr + 1, addr);
6692 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6693 "alignment problem");
6694 assert(size >= 3, "Necessary for Printezis marks to work");
6695 return size;
6696 }
6698 // A variant of the above (block_size_using_printezis_bits()) except
6699 // that we return 0 if the P-bits are not yet set.
6700 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6701 if (_markBitMap.isMarked(addr + 1)) {
6702 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6703 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6704 size_t size = pointer_delta(nextOneAddr + 1, addr);
6705 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6706 "alignment problem");
6707 assert(size >= 3, "Necessary for Printezis marks to work");
6708 return size;
6709 }
6710 return 0;
6711 }
6713 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6714 size_t sz = 0;
6715 oop p = (oop)addr;
6716 if (p->klass_or_null() != NULL) {
6717 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6718 } else {
6719 sz = block_size_using_printezis_bits(addr);
6720 }
6721 assert(sz > 0, "size must be nonzero");
6722 HeapWord* next_block = addr + sz;
6723 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6724 CardTableModRefBS::card_size);
6725 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6726 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6727 "must be different cards");
6728 return next_card;
6729 }
6732 // CMS Bit Map Wrapper /////////////////////////////////////////
6734 // Construct a CMS bit map infrastructure, but don't create the
6735 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6736 // further below.
6737 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6738 _bm(),
6739 _shifter(shifter),
6740 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6741 {
6742 _bmStartWord = 0;
6743 _bmWordSize = 0;
6744 }
6746 bool CMSBitMap::allocate(MemRegion mr) {
6747 _bmStartWord = mr.start();
6748 _bmWordSize = mr.word_size();
6749 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6750 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6751 if (!brs.is_reserved()) {
6752 warning("CMS bit map allocation failure");
6753 return false;
6754 }
6755 // For now we'll just commit all of the bit map up fromt.
6756 // Later on we'll try to be more parsimonious with swap.
6757 if (!_virtual_space.initialize(brs, brs.size())) {
6758 warning("CMS bit map backing store failure");
6759 return false;
6760 }
6761 assert(_virtual_space.committed_size() == brs.size(),
6762 "didn't reserve backing store for all of CMS bit map?");
6763 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6764 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6765 _bmWordSize, "inconsistency in bit map sizing");
6766 _bm.set_size(_bmWordSize >> _shifter);
6768 // bm.clear(); // can we rely on getting zero'd memory? verify below
6769 assert(isAllClear(),
6770 "Expected zero'd memory from ReservedSpace constructor");
6771 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6772 "consistency check");
6773 return true;
6774 }
6776 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6777 HeapWord *next_addr, *end_addr, *last_addr;
6778 assert_locked();
6779 assert(covers(mr), "out-of-range error");
6780 // XXX assert that start and end are appropriately aligned
6781 for (next_addr = mr.start(), end_addr = mr.end();
6782 next_addr < end_addr; next_addr = last_addr) {
6783 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6784 last_addr = dirty_region.end();
6785 if (!dirty_region.is_empty()) {
6786 cl->do_MemRegion(dirty_region);
6787 } else {
6788 assert(last_addr == end_addr, "program logic");
6789 return;
6790 }
6791 }
6792 }
6794 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6795 _bm.print_on_error(st, prefix);
6796 }
6798 #ifndef PRODUCT
6799 void CMSBitMap::assert_locked() const {
6800 CMSLockVerifier::assert_locked(lock());
6801 }
6803 bool CMSBitMap::covers(MemRegion mr) const {
6804 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6805 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6806 "size inconsistency");
6807 return (mr.start() >= _bmStartWord) &&
6808 (mr.end() <= endWord());
6809 }
6811 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6812 return (start >= _bmStartWord && (start + size) <= endWord());
6813 }
6815 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6816 // verify that there are no 1 bits in the interval [left, right)
6817 FalseBitMapClosure falseBitMapClosure;
6818 iterate(&falseBitMapClosure, left, right);
6819 }
6821 void CMSBitMap::region_invariant(MemRegion mr)
6822 {
6823 assert_locked();
6824 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6825 assert(!mr.is_empty(), "unexpected empty region");
6826 assert(covers(mr), "mr should be covered by bit map");
6827 // convert address range into offset range
6828 size_t start_ofs = heapWordToOffset(mr.start());
6829 // Make sure that end() is appropriately aligned
6830 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6831 (1 << (_shifter+LogHeapWordSize))),
6832 "Misaligned mr.end()");
6833 size_t end_ofs = heapWordToOffset(mr.end());
6834 assert(end_ofs > start_ofs, "Should mark at least one bit");
6835 }
6837 #endif
6839 bool CMSMarkStack::allocate(size_t size) {
6840 // allocate a stack of the requisite depth
6841 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6842 size * sizeof(oop)));
6843 if (!rs.is_reserved()) {
6844 warning("CMSMarkStack allocation failure");
6845 return false;
6846 }
6847 if (!_virtual_space.initialize(rs, rs.size())) {
6848 warning("CMSMarkStack backing store failure");
6849 return false;
6850 }
6851 assert(_virtual_space.committed_size() == rs.size(),
6852 "didn't reserve backing store for all of CMS stack?");
6853 _base = (oop*)(_virtual_space.low());
6854 _index = 0;
6855 _capacity = size;
6856 NOT_PRODUCT(_max_depth = 0);
6857 return true;
6858 }
6860 // XXX FIX ME !!! In the MT case we come in here holding a
6861 // leaf lock. For printing we need to take a further lock
6862 // which has lower rank. We need to recallibrate the two
6863 // lock-ranks involved in order to be able to rpint the
6864 // messages below. (Or defer the printing to the caller.
6865 // For now we take the expedient path of just disabling the
6866 // messages for the problematic case.)
6867 void CMSMarkStack::expand() {
6868 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6869 if (_capacity == MarkStackSizeMax) {
6870 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6871 // We print a warning message only once per CMS cycle.
6872 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6873 }
6874 return;
6875 }
6876 // Double capacity if possible
6877 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6878 // Do not give up existing stack until we have managed to
6879 // get the double capacity that we desired.
6880 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6881 new_capacity * sizeof(oop)));
6882 if (rs.is_reserved()) {
6883 // Release the backing store associated with old stack
6884 _virtual_space.release();
6885 // Reinitialize virtual space for new stack
6886 if (!_virtual_space.initialize(rs, rs.size())) {
6887 fatal("Not enough swap for expanded marking stack");
6888 }
6889 _base = (oop*)(_virtual_space.low());
6890 _index = 0;
6891 _capacity = new_capacity;
6892 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6893 // Failed to double capacity, continue;
6894 // we print a detail message only once per CMS cycle.
6895 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6896 SIZE_FORMAT"K",
6897 _capacity / K, new_capacity / K);
6898 }
6899 }
6902 // Closures
6903 // XXX: there seems to be a lot of code duplication here;
6904 // should refactor and consolidate common code.
6906 // This closure is used to mark refs into the CMS generation in
6907 // the CMS bit map. Called at the first checkpoint. This closure
6908 // assumes that we do not need to re-mark dirty cards; if the CMS
6909 // generation on which this is used is not an oldest
6910 // generation then this will lose younger_gen cards!
6912 MarkRefsIntoClosure::MarkRefsIntoClosure(
6913 MemRegion span, CMSBitMap* bitMap):
6914 _span(span),
6915 _bitMap(bitMap)
6916 {
6917 assert(_ref_processor == NULL, "deliberately left NULL");
6918 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6919 }
6921 void MarkRefsIntoClosure::do_oop(oop obj) {
6922 // if p points into _span, then mark corresponding bit in _markBitMap
6923 assert(obj->is_oop(), "expected an oop");
6924 HeapWord* addr = (HeapWord*)obj;
6925 if (_span.contains(addr)) {
6926 // this should be made more efficient
6927 _bitMap->mark(addr);
6928 }
6929 }
6931 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6932 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6934 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6935 MemRegion span, CMSBitMap* bitMap):
6936 _span(span),
6937 _bitMap(bitMap)
6938 {
6939 assert(_ref_processor == NULL, "deliberately left NULL");
6940 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6941 }
6943 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6944 // if p points into _span, then mark corresponding bit in _markBitMap
6945 assert(obj->is_oop(), "expected an oop");
6946 HeapWord* addr = (HeapWord*)obj;
6947 if (_span.contains(addr)) {
6948 // this should be made more efficient
6949 _bitMap->par_mark(addr);
6950 }
6951 }
6953 void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6954 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6956 // A variant of the above, used for CMS marking verification.
6957 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6958 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6959 _span(span),
6960 _verification_bm(verification_bm),
6961 _cms_bm(cms_bm)
6962 {
6963 assert(_ref_processor == NULL, "deliberately left NULL");
6964 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6965 }
6967 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6968 // if p points into _span, then mark corresponding bit in _markBitMap
6969 assert(obj->is_oop(), "expected an oop");
6970 HeapWord* addr = (HeapWord*)obj;
6971 if (_span.contains(addr)) {
6972 _verification_bm->mark(addr);
6973 if (!_cms_bm->isMarked(addr)) {
6974 oop(addr)->print();
6975 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6976 fatal("... aborting");
6977 }
6978 }
6979 }
6981 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6982 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6984 //////////////////////////////////////////////////
6985 // MarkRefsIntoAndScanClosure
6986 //////////////////////////////////////////////////
6988 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6989 ReferenceProcessor* rp,
6990 CMSBitMap* bit_map,
6991 CMSBitMap* mod_union_table,
6992 CMSMarkStack* mark_stack,
6993 CMSCollector* collector,
6994 bool should_yield,
6995 bool concurrent_precleaning):
6996 _collector(collector),
6997 _span(span),
6998 _bit_map(bit_map),
6999 _mark_stack(mark_stack),
7000 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
7001 mark_stack, concurrent_precleaning),
7002 _yield(should_yield),
7003 _concurrent_precleaning(concurrent_precleaning),
7004 _freelistLock(NULL)
7005 {
7006 _ref_processor = rp;
7007 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7008 }
7010 // This closure is used to mark refs into the CMS generation at the
7011 // second (final) checkpoint, and to scan and transitively follow
7012 // the unmarked oops. It is also used during the concurrent precleaning
7013 // phase while scanning objects on dirty cards in the CMS generation.
7014 // The marks are made in the marking bit map and the marking stack is
7015 // used for keeping the (newly) grey objects during the scan.
7016 // The parallel version (Par_...) appears further below.
7017 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7018 if (obj != NULL) {
7019 assert(obj->is_oop(), "expected an oop");
7020 HeapWord* addr = (HeapWord*)obj;
7021 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7022 assert(_collector->overflow_list_is_empty(),
7023 "overflow list should be empty");
7024 if (_span.contains(addr) &&
7025 !_bit_map->isMarked(addr)) {
7026 // mark bit map (object is now grey)
7027 _bit_map->mark(addr);
7028 // push on marking stack (stack should be empty), and drain the
7029 // stack by applying this closure to the oops in the oops popped
7030 // from the stack (i.e. blacken the grey objects)
7031 bool res = _mark_stack->push(obj);
7032 assert(res, "Should have space to push on empty stack");
7033 do {
7034 oop new_oop = _mark_stack->pop();
7035 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7036 assert(_bit_map->isMarked((HeapWord*)new_oop),
7037 "only grey objects on this stack");
7038 // iterate over the oops in this oop, marking and pushing
7039 // the ones in CMS heap (i.e. in _span).
7040 new_oop->oop_iterate(&_pushAndMarkClosure);
7041 // check if it's time to yield
7042 do_yield_check();
7043 } while (!_mark_stack->isEmpty() ||
7044 (!_concurrent_precleaning && take_from_overflow_list()));
7045 // if marking stack is empty, and we are not doing this
7046 // during precleaning, then check the overflow list
7047 }
7048 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7049 assert(_collector->overflow_list_is_empty(),
7050 "overflow list was drained above");
7051 // We could restore evacuated mark words, if any, used for
7052 // overflow list links here because the overflow list is
7053 // provably empty here. That would reduce the maximum
7054 // size requirements for preserved_{oop,mark}_stack.
7055 // But we'll just postpone it until we are all done
7056 // so we can just stream through.
7057 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7058 _collector->restore_preserved_marks_if_any();
7059 assert(_collector->no_preserved_marks(), "No preserved marks");
7060 }
7061 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7062 "All preserved marks should have been restored above");
7063 }
7064 }
7066 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7067 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7069 void MarkRefsIntoAndScanClosure::do_yield_work() {
7070 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7071 "CMS thread should hold CMS token");
7072 assert_lock_strong(_freelistLock);
7073 assert_lock_strong(_bit_map->lock());
7074 // relinquish the free_list_lock and bitMaplock()
7075 _bit_map->lock()->unlock();
7076 _freelistLock->unlock();
7077 ConcurrentMarkSweepThread::desynchronize(true);
7078 ConcurrentMarkSweepThread::acknowledge_yield_request();
7079 _collector->stopTimer();
7080 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7081 if (PrintCMSStatistics != 0) {
7082 _collector->incrementYields();
7083 }
7084 _collector->icms_wait();
7086 // See the comment in coordinator_yield()
7087 for (unsigned i = 0;
7088 i < CMSYieldSleepCount &&
7089 ConcurrentMarkSweepThread::should_yield() &&
7090 !CMSCollector::foregroundGCIsActive();
7091 ++i) {
7092 os::sleep(Thread::current(), 1, false);
7093 ConcurrentMarkSweepThread::acknowledge_yield_request();
7094 }
7096 ConcurrentMarkSweepThread::synchronize(true);
7097 _freelistLock->lock_without_safepoint_check();
7098 _bit_map->lock()->lock_without_safepoint_check();
7099 _collector->startTimer();
7100 }
7102 ///////////////////////////////////////////////////////////
7103 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7104 // MarkRefsIntoAndScanClosure
7105 ///////////////////////////////////////////////////////////
7106 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7107 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7108 CMSBitMap* bit_map, OopTaskQueue* work_queue):
7109 _span(span),
7110 _bit_map(bit_map),
7111 _work_queue(work_queue),
7112 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7113 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7114 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7115 {
7116 _ref_processor = rp;
7117 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7118 }
7120 // This closure is used to mark refs into the CMS generation at the
7121 // second (final) checkpoint, and to scan and transitively follow
7122 // the unmarked oops. The marks are made in the marking bit map and
7123 // the work_queue is used for keeping the (newly) grey objects during
7124 // the scan phase whence they are also available for stealing by parallel
7125 // threads. Since the marking bit map is shared, updates are
7126 // synchronized (via CAS).
7127 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7128 if (obj != NULL) {
7129 // Ignore mark word because this could be an already marked oop
7130 // that may be chained at the end of the overflow list.
7131 assert(obj->is_oop(true), "expected an oop");
7132 HeapWord* addr = (HeapWord*)obj;
7133 if (_span.contains(addr) &&
7134 !_bit_map->isMarked(addr)) {
7135 // mark bit map (object will become grey):
7136 // It is possible for several threads to be
7137 // trying to "claim" this object concurrently;
7138 // the unique thread that succeeds in marking the
7139 // object first will do the subsequent push on
7140 // to the work queue (or overflow list).
7141 if (_bit_map->par_mark(addr)) {
7142 // push on work_queue (which may not be empty), and trim the
7143 // queue to an appropriate length by applying this closure to
7144 // the oops in the oops popped from the stack (i.e. blacken the
7145 // grey objects)
7146 bool res = _work_queue->push(obj);
7147 assert(res, "Low water mark should be less than capacity?");
7148 trim_queue(_low_water_mark);
7149 } // Else, another thread claimed the object
7150 }
7151 }
7152 }
7154 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7155 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7157 // This closure is used to rescan the marked objects on the dirty cards
7158 // in the mod union table and the card table proper.
7159 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7160 oop p, MemRegion mr) {
7162 size_t size = 0;
7163 HeapWord* addr = (HeapWord*)p;
7164 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7165 assert(_span.contains(addr), "we are scanning the CMS generation");
7166 // check if it's time to yield
7167 if (do_yield_check()) {
7168 // We yielded for some foreground stop-world work,
7169 // and we have been asked to abort this ongoing preclean cycle.
7170 return 0;
7171 }
7172 if (_bitMap->isMarked(addr)) {
7173 // it's marked; is it potentially uninitialized?
7174 if (p->klass_or_null() != NULL) {
7175 // an initialized object; ignore mark word in verification below
7176 // since we are running concurrent with mutators
7177 assert(p->is_oop(true), "should be an oop");
7178 if (p->is_objArray()) {
7179 // objArrays are precisely marked; restrict scanning
7180 // to dirty cards only.
7181 size = CompactibleFreeListSpace::adjustObjectSize(
7182 p->oop_iterate(_scanningClosure, mr));
7183 } else {
7184 // A non-array may have been imprecisely marked; we need
7185 // to scan object in its entirety.
7186 size = CompactibleFreeListSpace::adjustObjectSize(
7187 p->oop_iterate(_scanningClosure));
7188 }
7189 #ifdef ASSERT
7190 size_t direct_size =
7191 CompactibleFreeListSpace::adjustObjectSize(p->size());
7192 assert(size == direct_size, "Inconsistency in size");
7193 assert(size >= 3, "Necessary for Printezis marks to work");
7194 if (!_bitMap->isMarked(addr+1)) {
7195 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7196 } else {
7197 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7198 assert(_bitMap->isMarked(addr+size-1),
7199 "inconsistent Printezis mark");
7200 }
7201 #endif // ASSERT
7202 } else {
7203 // an unitialized object
7204 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7205 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7206 size = pointer_delta(nextOneAddr + 1, addr);
7207 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7208 "alignment problem");
7209 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7210 // will dirty the card when the klass pointer is installed in the
7211 // object (signalling the completion of initialization).
7212 }
7213 } else {
7214 // Either a not yet marked object or an uninitialized object
7215 if (p->klass_or_null() == NULL) {
7216 // An uninitialized object, skip to the next card, since
7217 // we may not be able to read its P-bits yet.
7218 assert(size == 0, "Initial value");
7219 } else {
7220 // An object not (yet) reached by marking: we merely need to
7221 // compute its size so as to go look at the next block.
7222 assert(p->is_oop(true), "should be an oop");
7223 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7224 }
7225 }
7226 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7227 return size;
7228 }
7230 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7231 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7232 "CMS thread should hold CMS token");
7233 assert_lock_strong(_freelistLock);
7234 assert_lock_strong(_bitMap->lock());
7235 // relinquish the free_list_lock and bitMaplock()
7236 _bitMap->lock()->unlock();
7237 _freelistLock->unlock();
7238 ConcurrentMarkSweepThread::desynchronize(true);
7239 ConcurrentMarkSweepThread::acknowledge_yield_request();
7240 _collector->stopTimer();
7241 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7242 if (PrintCMSStatistics != 0) {
7243 _collector->incrementYields();
7244 }
7245 _collector->icms_wait();
7247 // See the comment in coordinator_yield()
7248 for (unsigned i = 0; i < CMSYieldSleepCount &&
7249 ConcurrentMarkSweepThread::should_yield() &&
7250 !CMSCollector::foregroundGCIsActive(); ++i) {
7251 os::sleep(Thread::current(), 1, false);
7252 ConcurrentMarkSweepThread::acknowledge_yield_request();
7253 }
7255 ConcurrentMarkSweepThread::synchronize(true);
7256 _freelistLock->lock_without_safepoint_check();
7257 _bitMap->lock()->lock_without_safepoint_check();
7258 _collector->startTimer();
7259 }
7262 //////////////////////////////////////////////////////////////////
7263 // SurvivorSpacePrecleanClosure
7264 //////////////////////////////////////////////////////////////////
7265 // This (single-threaded) closure is used to preclean the oops in
7266 // the survivor spaces.
7267 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7269 HeapWord* addr = (HeapWord*)p;
7270 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7271 assert(!_span.contains(addr), "we are scanning the survivor spaces");
7272 assert(p->klass_or_null() != NULL, "object should be initializd");
7273 // an initialized object; ignore mark word in verification below
7274 // since we are running concurrent with mutators
7275 assert(p->is_oop(true), "should be an oop");
7276 // Note that we do not yield while we iterate over
7277 // the interior oops of p, pushing the relevant ones
7278 // on our marking stack.
7279 size_t size = p->oop_iterate(_scanning_closure);
7280 do_yield_check();
7281 // Observe that below, we do not abandon the preclean
7282 // phase as soon as we should; rather we empty the
7283 // marking stack before returning. This is to satisfy
7284 // some existing assertions. In general, it may be a
7285 // good idea to abort immediately and complete the marking
7286 // from the grey objects at a later time.
7287 while (!_mark_stack->isEmpty()) {
7288 oop new_oop = _mark_stack->pop();
7289 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7290 assert(_bit_map->isMarked((HeapWord*)new_oop),
7291 "only grey objects on this stack");
7292 // iterate over the oops in this oop, marking and pushing
7293 // the ones in CMS heap (i.e. in _span).
7294 new_oop->oop_iterate(_scanning_closure);
7295 // check if it's time to yield
7296 do_yield_check();
7297 }
7298 unsigned int after_count =
7299 GenCollectedHeap::heap()->total_collections();
7300 bool abort = (_before_count != after_count) ||
7301 _collector->should_abort_preclean();
7302 return abort ? 0 : size;
7303 }
7305 void SurvivorSpacePrecleanClosure::do_yield_work() {
7306 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7307 "CMS thread should hold CMS token");
7308 assert_lock_strong(_bit_map->lock());
7309 // Relinquish the bit map lock
7310 _bit_map->lock()->unlock();
7311 ConcurrentMarkSweepThread::desynchronize(true);
7312 ConcurrentMarkSweepThread::acknowledge_yield_request();
7313 _collector->stopTimer();
7314 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7315 if (PrintCMSStatistics != 0) {
7316 _collector->incrementYields();
7317 }
7318 _collector->icms_wait();
7320 // See the comment in coordinator_yield()
7321 for (unsigned i = 0; i < CMSYieldSleepCount &&
7322 ConcurrentMarkSweepThread::should_yield() &&
7323 !CMSCollector::foregroundGCIsActive(); ++i) {
7324 os::sleep(Thread::current(), 1, false);
7325 ConcurrentMarkSweepThread::acknowledge_yield_request();
7326 }
7328 ConcurrentMarkSweepThread::synchronize(true);
7329 _bit_map->lock()->lock_without_safepoint_check();
7330 _collector->startTimer();
7331 }
7333 // This closure is used to rescan the marked objects on the dirty cards
7334 // in the mod union table and the card table proper. In the parallel
7335 // case, although the bitMap is shared, we do a single read so the
7336 // isMarked() query is "safe".
7337 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7338 // Ignore mark word because we are running concurrent with mutators
7339 assert(p->is_oop_or_null(true), "expected an oop or null");
7340 HeapWord* addr = (HeapWord*)p;
7341 assert(_span.contains(addr), "we are scanning the CMS generation");
7342 bool is_obj_array = false;
7343 #ifdef ASSERT
7344 if (!_parallel) {
7345 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7346 assert(_collector->overflow_list_is_empty(),
7347 "overflow list should be empty");
7349 }
7350 #endif // ASSERT
7351 if (_bit_map->isMarked(addr)) {
7352 // Obj arrays are precisely marked, non-arrays are not;
7353 // so we scan objArrays precisely and non-arrays in their
7354 // entirety.
7355 if (p->is_objArray()) {
7356 is_obj_array = true;
7357 if (_parallel) {
7358 p->oop_iterate(_par_scan_closure, mr);
7359 } else {
7360 p->oop_iterate(_scan_closure, mr);
7361 }
7362 } else {
7363 if (_parallel) {
7364 p->oop_iterate(_par_scan_closure);
7365 } else {
7366 p->oop_iterate(_scan_closure);
7367 }
7368 }
7369 }
7370 #ifdef ASSERT
7371 if (!_parallel) {
7372 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7373 assert(_collector->overflow_list_is_empty(),
7374 "overflow list should be empty");
7376 }
7377 #endif // ASSERT
7378 return is_obj_array;
7379 }
7381 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7382 MemRegion span,
7383 CMSBitMap* bitMap, CMSMarkStack* markStack,
7384 bool should_yield, bool verifying):
7385 _collector(collector),
7386 _span(span),
7387 _bitMap(bitMap),
7388 _mut(&collector->_modUnionTable),
7389 _markStack(markStack),
7390 _yield(should_yield),
7391 _skipBits(0)
7392 {
7393 assert(_markStack->isEmpty(), "stack should be empty");
7394 _finger = _bitMap->startWord();
7395 _threshold = _finger;
7396 assert(_collector->_restart_addr == NULL, "Sanity check");
7397 assert(_span.contains(_finger), "Out of bounds _finger?");
7398 DEBUG_ONLY(_verifying = verifying;)
7399 }
7401 void MarkFromRootsClosure::reset(HeapWord* addr) {
7402 assert(_markStack->isEmpty(), "would cause duplicates on stack");
7403 assert(_span.contains(addr), "Out of bounds _finger?");
7404 _finger = addr;
7405 _threshold = (HeapWord*)round_to(
7406 (intptr_t)_finger, CardTableModRefBS::card_size);
7407 }
7409 // Should revisit to see if this should be restructured for
7410 // greater efficiency.
7411 bool MarkFromRootsClosure::do_bit(size_t offset) {
7412 if (_skipBits > 0) {
7413 _skipBits--;
7414 return true;
7415 }
7416 // convert offset into a HeapWord*
7417 HeapWord* addr = _bitMap->startWord() + offset;
7418 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7419 "address out of range");
7420 assert(_bitMap->isMarked(addr), "tautology");
7421 if (_bitMap->isMarked(addr+1)) {
7422 // this is an allocated but not yet initialized object
7423 assert(_skipBits == 0, "tautology");
7424 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
7425 oop p = oop(addr);
7426 if (p->klass_or_null() == NULL) {
7427 DEBUG_ONLY(if (!_verifying) {)
7428 // We re-dirty the cards on which this object lies and increase
7429 // the _threshold so that we'll come back to scan this object
7430 // during the preclean or remark phase. (CMSCleanOnEnter)
7431 if (CMSCleanOnEnter) {
7432 size_t sz = _collector->block_size_using_printezis_bits(addr);
7433 HeapWord* end_card_addr = (HeapWord*)round_to(
7434 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7435 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7436 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7437 // Bump _threshold to end_card_addr; note that
7438 // _threshold cannot possibly exceed end_card_addr, anyhow.
7439 // This prevents future clearing of the card as the scan proceeds
7440 // to the right.
7441 assert(_threshold <= end_card_addr,
7442 "Because we are just scanning into this object");
7443 if (_threshold < end_card_addr) {
7444 _threshold = end_card_addr;
7445 }
7446 if (p->klass_or_null() != NULL) {
7447 // Redirty the range of cards...
7448 _mut->mark_range(redirty_range);
7449 } // ...else the setting of klass will dirty the card anyway.
7450 }
7451 DEBUG_ONLY(})
7452 return true;
7453 }
7454 }
7455 scanOopsInOop(addr);
7456 return true;
7457 }
7459 // We take a break if we've been at this for a while,
7460 // so as to avoid monopolizing the locks involved.
7461 void MarkFromRootsClosure::do_yield_work() {
7462 // First give up the locks, then yield, then re-lock
7463 // We should probably use a constructor/destructor idiom to
7464 // do this unlock/lock or modify the MutexUnlocker class to
7465 // serve our purpose. XXX
7466 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7467 "CMS thread should hold CMS token");
7468 assert_lock_strong(_bitMap->lock());
7469 _bitMap->lock()->unlock();
7470 ConcurrentMarkSweepThread::desynchronize(true);
7471 ConcurrentMarkSweepThread::acknowledge_yield_request();
7472 _collector->stopTimer();
7473 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7474 if (PrintCMSStatistics != 0) {
7475 _collector->incrementYields();
7476 }
7477 _collector->icms_wait();
7479 // See the comment in coordinator_yield()
7480 for (unsigned i = 0; i < CMSYieldSleepCount &&
7481 ConcurrentMarkSweepThread::should_yield() &&
7482 !CMSCollector::foregroundGCIsActive(); ++i) {
7483 os::sleep(Thread::current(), 1, false);
7484 ConcurrentMarkSweepThread::acknowledge_yield_request();
7485 }
7487 ConcurrentMarkSweepThread::synchronize(true);
7488 _bitMap->lock()->lock_without_safepoint_check();
7489 _collector->startTimer();
7490 }
7492 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7493 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7494 assert(_markStack->isEmpty(),
7495 "should drain stack to limit stack usage");
7496 // convert ptr to an oop preparatory to scanning
7497 oop obj = oop(ptr);
7498 // Ignore mark word in verification below, since we
7499 // may be running concurrent with mutators.
7500 assert(obj->is_oop(true), "should be an oop");
7501 assert(_finger <= ptr, "_finger runneth ahead");
7502 // advance the finger to right end of this object
7503 _finger = ptr + obj->size();
7504 assert(_finger > ptr, "we just incremented it above");
7505 // On large heaps, it may take us some time to get through
7506 // the marking phase (especially if running iCMS). During
7507 // this time it's possible that a lot of mutations have
7508 // accumulated in the card table and the mod union table --
7509 // these mutation records are redundant until we have
7510 // actually traced into the corresponding card.
7511 // Here, we check whether advancing the finger would make
7512 // us cross into a new card, and if so clear corresponding
7513 // cards in the MUT (preclean them in the card-table in the
7514 // future).
7516 DEBUG_ONLY(if (!_verifying) {)
7517 // The clean-on-enter optimization is disabled by default,
7518 // until we fix 6178663.
7519 if (CMSCleanOnEnter && (_finger > _threshold)) {
7520 // [_threshold, _finger) represents the interval
7521 // of cards to be cleared in MUT (or precleaned in card table).
7522 // The set of cards to be cleared is all those that overlap
7523 // with the interval [_threshold, _finger); note that
7524 // _threshold is always kept card-aligned but _finger isn't
7525 // always card-aligned.
7526 HeapWord* old_threshold = _threshold;
7527 assert(old_threshold == (HeapWord*)round_to(
7528 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7529 "_threshold should always be card-aligned");
7530 _threshold = (HeapWord*)round_to(
7531 (intptr_t)_finger, CardTableModRefBS::card_size);
7532 MemRegion mr(old_threshold, _threshold);
7533 assert(!mr.is_empty(), "Control point invariant");
7534 assert(_span.contains(mr), "Should clear within span");
7535 _mut->clear_range(mr);
7536 }
7537 DEBUG_ONLY(})
7538 // Note: the finger doesn't advance while we drain
7539 // the stack below.
7540 PushOrMarkClosure pushOrMarkClosure(_collector,
7541 _span, _bitMap, _markStack,
7542 _finger, this);
7543 bool res = _markStack->push(obj);
7544 assert(res, "Empty non-zero size stack should have space for single push");
7545 while (!_markStack->isEmpty()) {
7546 oop new_oop = _markStack->pop();
7547 // Skip verifying header mark word below because we are
7548 // running concurrent with mutators.
7549 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7550 // now scan this oop's oops
7551 new_oop->oop_iterate(&pushOrMarkClosure);
7552 do_yield_check();
7553 }
7554 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7555 }
7557 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7558 CMSCollector* collector, MemRegion span,
7559 CMSBitMap* bit_map,
7560 OopTaskQueue* work_queue,
7561 CMSMarkStack* overflow_stack,
7562 bool should_yield):
7563 _collector(collector),
7564 _whole_span(collector->_span),
7565 _span(span),
7566 _bit_map(bit_map),
7567 _mut(&collector->_modUnionTable),
7568 _work_queue(work_queue),
7569 _overflow_stack(overflow_stack),
7570 _yield(should_yield),
7571 _skip_bits(0),
7572 _task(task)
7573 {
7574 assert(_work_queue->size() == 0, "work_queue should be empty");
7575 _finger = span.start();
7576 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7577 assert(_span.contains(_finger), "Out of bounds _finger?");
7578 }
7580 // Should revisit to see if this should be restructured for
7581 // greater efficiency.
7582 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7583 if (_skip_bits > 0) {
7584 _skip_bits--;
7585 return true;
7586 }
7587 // convert offset into a HeapWord*
7588 HeapWord* addr = _bit_map->startWord() + offset;
7589 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7590 "address out of range");
7591 assert(_bit_map->isMarked(addr), "tautology");
7592 if (_bit_map->isMarked(addr+1)) {
7593 // this is an allocated object that might not yet be initialized
7594 assert(_skip_bits == 0, "tautology");
7595 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7596 oop p = oop(addr);
7597 if (p->klass_or_null() == NULL) {
7598 // in the case of Clean-on-Enter optimization, redirty card
7599 // and avoid clearing card by increasing the threshold.
7600 return true;
7601 }
7602 }
7603 scan_oops_in_oop(addr);
7604 return true;
7605 }
7607 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7608 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7609 // Should we assert that our work queue is empty or
7610 // below some drain limit?
7611 assert(_work_queue->size() == 0,
7612 "should drain stack to limit stack usage");
7613 // convert ptr to an oop preparatory to scanning
7614 oop obj = oop(ptr);
7615 // Ignore mark word in verification below, since we
7616 // may be running concurrent with mutators.
7617 assert(obj->is_oop(true), "should be an oop");
7618 assert(_finger <= ptr, "_finger runneth ahead");
7619 // advance the finger to right end of this object
7620 _finger = ptr + obj->size();
7621 assert(_finger > ptr, "we just incremented it above");
7622 // On large heaps, it may take us some time to get through
7623 // the marking phase (especially if running iCMS). During
7624 // this time it's possible that a lot of mutations have
7625 // accumulated in the card table and the mod union table --
7626 // these mutation records are redundant until we have
7627 // actually traced into the corresponding card.
7628 // Here, we check whether advancing the finger would make
7629 // us cross into a new card, and if so clear corresponding
7630 // cards in the MUT (preclean them in the card-table in the
7631 // future).
7633 // The clean-on-enter optimization is disabled by default,
7634 // until we fix 6178663.
7635 if (CMSCleanOnEnter && (_finger > _threshold)) {
7636 // [_threshold, _finger) represents the interval
7637 // of cards to be cleared in MUT (or precleaned in card table).
7638 // The set of cards to be cleared is all those that overlap
7639 // with the interval [_threshold, _finger); note that
7640 // _threshold is always kept card-aligned but _finger isn't
7641 // always card-aligned.
7642 HeapWord* old_threshold = _threshold;
7643 assert(old_threshold == (HeapWord*)round_to(
7644 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7645 "_threshold should always be card-aligned");
7646 _threshold = (HeapWord*)round_to(
7647 (intptr_t)_finger, CardTableModRefBS::card_size);
7648 MemRegion mr(old_threshold, _threshold);
7649 assert(!mr.is_empty(), "Control point invariant");
7650 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7651 _mut->clear_range(mr);
7652 }
7654 // Note: the local finger doesn't advance while we drain
7655 // the stack below, but the global finger sure can and will.
7656 HeapWord** gfa = _task->global_finger_addr();
7657 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7658 _span, _bit_map,
7659 _work_queue,
7660 _overflow_stack,
7661 _finger,
7662 gfa, this);
7663 bool res = _work_queue->push(obj); // overflow could occur here
7664 assert(res, "Will hold once we use workqueues");
7665 while (true) {
7666 oop new_oop;
7667 if (!_work_queue->pop_local(new_oop)) {
7668 // We emptied our work_queue; check if there's stuff that can
7669 // be gotten from the overflow stack.
7670 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7671 _overflow_stack, _work_queue)) {
7672 do_yield_check();
7673 continue;
7674 } else { // done
7675 break;
7676 }
7677 }
7678 // Skip verifying header mark word below because we are
7679 // running concurrent with mutators.
7680 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7681 // now scan this oop's oops
7682 new_oop->oop_iterate(&pushOrMarkClosure);
7683 do_yield_check();
7684 }
7685 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7686 }
7688 // Yield in response to a request from VM Thread or
7689 // from mutators.
7690 void Par_MarkFromRootsClosure::do_yield_work() {
7691 assert(_task != NULL, "sanity");
7692 _task->yield();
7693 }
7695 // A variant of the above used for verifying CMS marking work.
7696 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7697 MemRegion span,
7698 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7699 CMSMarkStack* mark_stack):
7700 _collector(collector),
7701 _span(span),
7702 _verification_bm(verification_bm),
7703 _cms_bm(cms_bm),
7704 _mark_stack(mark_stack),
7705 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7706 mark_stack)
7707 {
7708 assert(_mark_stack->isEmpty(), "stack should be empty");
7709 _finger = _verification_bm->startWord();
7710 assert(_collector->_restart_addr == NULL, "Sanity check");
7711 assert(_span.contains(_finger), "Out of bounds _finger?");
7712 }
7714 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7715 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7716 assert(_span.contains(addr), "Out of bounds _finger?");
7717 _finger = addr;
7718 }
7720 // Should revisit to see if this should be restructured for
7721 // greater efficiency.
7722 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7723 // convert offset into a HeapWord*
7724 HeapWord* addr = _verification_bm->startWord() + offset;
7725 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7726 "address out of range");
7727 assert(_verification_bm->isMarked(addr), "tautology");
7728 assert(_cms_bm->isMarked(addr), "tautology");
7730 assert(_mark_stack->isEmpty(),
7731 "should drain stack to limit stack usage");
7732 // convert addr to an oop preparatory to scanning
7733 oop obj = oop(addr);
7734 assert(obj->is_oop(), "should be an oop");
7735 assert(_finger <= addr, "_finger runneth ahead");
7736 // advance the finger to right end of this object
7737 _finger = addr + obj->size();
7738 assert(_finger > addr, "we just incremented it above");
7739 // Note: the finger doesn't advance while we drain
7740 // the stack below.
7741 bool res = _mark_stack->push(obj);
7742 assert(res, "Empty non-zero size stack should have space for single push");
7743 while (!_mark_stack->isEmpty()) {
7744 oop new_oop = _mark_stack->pop();
7745 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7746 // now scan this oop's oops
7747 new_oop->oop_iterate(&_pam_verify_closure);
7748 }
7749 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7750 return true;
7751 }
7753 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7754 CMSCollector* collector, MemRegion span,
7755 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7756 CMSMarkStack* mark_stack):
7757 MetadataAwareOopClosure(collector->ref_processor()),
7758 _collector(collector),
7759 _span(span),
7760 _verification_bm(verification_bm),
7761 _cms_bm(cms_bm),
7762 _mark_stack(mark_stack)
7763 { }
7765 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7766 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7768 // Upon stack overflow, we discard (part of) the stack,
7769 // remembering the least address amongst those discarded
7770 // in CMSCollector's _restart_address.
7771 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7772 // Remember the least grey address discarded
7773 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7774 _collector->lower_restart_addr(ra);
7775 _mark_stack->reset(); // discard stack contents
7776 _mark_stack->expand(); // expand the stack if possible
7777 }
7779 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7780 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7781 HeapWord* addr = (HeapWord*)obj;
7782 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7783 // Oop lies in _span and isn't yet grey or black
7784 _verification_bm->mark(addr); // now grey
7785 if (!_cms_bm->isMarked(addr)) {
7786 oop(addr)->print();
7787 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7788 addr);
7789 fatal("... aborting");
7790 }
7792 if (!_mark_stack->push(obj)) { // stack overflow
7793 if (PrintCMSStatistics != 0) {
7794 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7795 SIZE_FORMAT, _mark_stack->capacity());
7796 }
7797 assert(_mark_stack->isFull(), "Else push should have succeeded");
7798 handle_stack_overflow(addr);
7799 }
7800 // anything including and to the right of _finger
7801 // will be scanned as we iterate over the remainder of the
7802 // bit map
7803 }
7804 }
7806 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7807 MemRegion span,
7808 CMSBitMap* bitMap, CMSMarkStack* markStack,
7809 HeapWord* finger, MarkFromRootsClosure* parent) :
7810 MetadataAwareOopClosure(collector->ref_processor()),
7811 _collector(collector),
7812 _span(span),
7813 _bitMap(bitMap),
7814 _markStack(markStack),
7815 _finger(finger),
7816 _parent(parent)
7817 { }
7819 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7820 MemRegion span,
7821 CMSBitMap* bit_map,
7822 OopTaskQueue* work_queue,
7823 CMSMarkStack* overflow_stack,
7824 HeapWord* finger,
7825 HeapWord** global_finger_addr,
7826 Par_MarkFromRootsClosure* parent) :
7827 MetadataAwareOopClosure(collector->ref_processor()),
7828 _collector(collector),
7829 _whole_span(collector->_span),
7830 _span(span),
7831 _bit_map(bit_map),
7832 _work_queue(work_queue),
7833 _overflow_stack(overflow_stack),
7834 _finger(finger),
7835 _global_finger_addr(global_finger_addr),
7836 _parent(parent)
7837 { }
7839 // Assumes thread-safe access by callers, who are
7840 // responsible for mutual exclusion.
7841 void CMSCollector::lower_restart_addr(HeapWord* low) {
7842 assert(_span.contains(low), "Out of bounds addr");
7843 if (_restart_addr == NULL) {
7844 _restart_addr = low;
7845 } else {
7846 _restart_addr = MIN2(_restart_addr, low);
7847 }
7848 }
7850 // Upon stack overflow, we discard (part of) the stack,
7851 // remembering the least address amongst those discarded
7852 // in CMSCollector's _restart_address.
7853 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7854 // Remember the least grey address discarded
7855 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7856 _collector->lower_restart_addr(ra);
7857 _markStack->reset(); // discard stack contents
7858 _markStack->expand(); // expand the stack if possible
7859 }
7861 // Upon stack overflow, we discard (part of) the stack,
7862 // remembering the least address amongst those discarded
7863 // in CMSCollector's _restart_address.
7864 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7865 // We need to do this under a mutex to prevent other
7866 // workers from interfering with the work done below.
7867 MutexLockerEx ml(_overflow_stack->par_lock(),
7868 Mutex::_no_safepoint_check_flag);
7869 // Remember the least grey address discarded
7870 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7871 _collector->lower_restart_addr(ra);
7872 _overflow_stack->reset(); // discard stack contents
7873 _overflow_stack->expand(); // expand the stack if possible
7874 }
7876 void PushOrMarkClosure::do_oop(oop obj) {
7877 // Ignore mark word because we are running concurrent with mutators.
7878 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7879 HeapWord* addr = (HeapWord*)obj;
7880 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7881 // Oop lies in _span and isn't yet grey or black
7882 _bitMap->mark(addr); // now grey
7883 if (addr < _finger) {
7884 // the bit map iteration has already either passed, or
7885 // sampled, this bit in the bit map; we'll need to
7886 // use the marking stack to scan this oop's oops.
7887 bool simulate_overflow = false;
7888 NOT_PRODUCT(
7889 if (CMSMarkStackOverflowALot &&
7890 _collector->simulate_overflow()) {
7891 // simulate a stack overflow
7892 simulate_overflow = true;
7893 }
7894 )
7895 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7896 if (PrintCMSStatistics != 0) {
7897 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7898 SIZE_FORMAT, _markStack->capacity());
7899 }
7900 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7901 handle_stack_overflow(addr);
7902 }
7903 }
7904 // anything including and to the right of _finger
7905 // will be scanned as we iterate over the remainder of the
7906 // bit map
7907 do_yield_check();
7908 }
7909 }
7911 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7912 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7914 void Par_PushOrMarkClosure::do_oop(oop obj) {
7915 // Ignore mark word because we are running concurrent with mutators.
7916 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7917 HeapWord* addr = (HeapWord*)obj;
7918 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7919 // Oop lies in _span and isn't yet grey or black
7920 // We read the global_finger (volatile read) strictly after marking oop
7921 bool res = _bit_map->par_mark(addr); // now grey
7922 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7923 // Should we push this marked oop on our stack?
7924 // -- if someone else marked it, nothing to do
7925 // -- if target oop is above global finger nothing to do
7926 // -- if target oop is in chunk and above local finger
7927 // then nothing to do
7928 // -- else push on work queue
7929 if ( !res // someone else marked it, they will deal with it
7930 || (addr >= *gfa) // will be scanned in a later task
7931 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7932 return;
7933 }
7934 // the bit map iteration has already either passed, or
7935 // sampled, this bit in the bit map; we'll need to
7936 // use the marking stack to scan this oop's oops.
7937 bool simulate_overflow = false;
7938 NOT_PRODUCT(
7939 if (CMSMarkStackOverflowALot &&
7940 _collector->simulate_overflow()) {
7941 // simulate a stack overflow
7942 simulate_overflow = true;
7943 }
7944 )
7945 if (simulate_overflow ||
7946 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7947 // stack overflow
7948 if (PrintCMSStatistics != 0) {
7949 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7950 SIZE_FORMAT, _overflow_stack->capacity());
7951 }
7952 // We cannot assert that the overflow stack is full because
7953 // it may have been emptied since.
7954 assert(simulate_overflow ||
7955 _work_queue->size() == _work_queue->max_elems(),
7956 "Else push should have succeeded");
7957 handle_stack_overflow(addr);
7958 }
7959 do_yield_check();
7960 }
7961 }
7963 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7964 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7966 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7967 MemRegion span,
7968 ReferenceProcessor* rp,
7969 CMSBitMap* bit_map,
7970 CMSBitMap* mod_union_table,
7971 CMSMarkStack* mark_stack,
7972 bool concurrent_precleaning):
7973 MetadataAwareOopClosure(rp),
7974 _collector(collector),
7975 _span(span),
7976 _bit_map(bit_map),
7977 _mod_union_table(mod_union_table),
7978 _mark_stack(mark_stack),
7979 _concurrent_precleaning(concurrent_precleaning)
7980 {
7981 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7982 }
7984 // Grey object rescan during pre-cleaning and second checkpoint phases --
7985 // the non-parallel version (the parallel version appears further below.)
7986 void PushAndMarkClosure::do_oop(oop obj) {
7987 // Ignore mark word verification. If during concurrent precleaning,
7988 // the object monitor may be locked. If during the checkpoint
7989 // phases, the object may already have been reached by a different
7990 // path and may be at the end of the global overflow list (so
7991 // the mark word may be NULL).
7992 assert(obj->is_oop_or_null(true /* ignore mark word */),
7993 "expected an oop or NULL");
7994 HeapWord* addr = (HeapWord*)obj;
7995 // Check if oop points into the CMS generation
7996 // and is not marked
7997 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7998 // a white object ...
7999 _bit_map->mark(addr); // ... now grey
8000 // push on the marking stack (grey set)
8001 bool simulate_overflow = false;
8002 NOT_PRODUCT(
8003 if (CMSMarkStackOverflowALot &&
8004 _collector->simulate_overflow()) {
8005 // simulate a stack overflow
8006 simulate_overflow = true;
8007 }
8008 )
8009 if (simulate_overflow || !_mark_stack->push(obj)) {
8010 if (_concurrent_precleaning) {
8011 // During precleaning we can just dirty the appropriate card(s)
8012 // in the mod union table, thus ensuring that the object remains
8013 // in the grey set and continue. In the case of object arrays
8014 // we need to dirty all of the cards that the object spans,
8015 // since the rescan of object arrays will be limited to the
8016 // dirty cards.
8017 // Note that no one can be intefering with us in this action
8018 // of dirtying the mod union table, so no locking or atomics
8019 // are required.
8020 if (obj->is_objArray()) {
8021 size_t sz = obj->size();
8022 HeapWord* end_card_addr = (HeapWord*)round_to(
8023 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8024 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8025 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8026 _mod_union_table->mark_range(redirty_range);
8027 } else {
8028 _mod_union_table->mark(addr);
8029 }
8030 _collector->_ser_pmc_preclean_ovflw++;
8031 } else {
8032 // During the remark phase, we need to remember this oop
8033 // in the overflow list.
8034 _collector->push_on_overflow_list(obj);
8035 _collector->_ser_pmc_remark_ovflw++;
8036 }
8037 }
8038 }
8039 }
8041 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8042 MemRegion span,
8043 ReferenceProcessor* rp,
8044 CMSBitMap* bit_map,
8045 OopTaskQueue* work_queue):
8046 MetadataAwareOopClosure(rp),
8047 _collector(collector),
8048 _span(span),
8049 _bit_map(bit_map),
8050 _work_queue(work_queue)
8051 {
8052 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8053 }
8055 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8056 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8058 // Grey object rescan during second checkpoint phase --
8059 // the parallel version.
8060 void Par_PushAndMarkClosure::do_oop(oop obj) {
8061 // In the assert below, we ignore the mark word because
8062 // this oop may point to an already visited object that is
8063 // on the overflow stack (in which case the mark word has
8064 // been hijacked for chaining into the overflow stack --
8065 // if this is the last object in the overflow stack then
8066 // its mark word will be NULL). Because this object may
8067 // have been subsequently popped off the global overflow
8068 // stack, and the mark word possibly restored to the prototypical
8069 // value, by the time we get to examined this failing assert in
8070 // the debugger, is_oop_or_null(false) may subsequently start
8071 // to hold.
8072 assert(obj->is_oop_or_null(true),
8073 "expected an oop or NULL");
8074 HeapWord* addr = (HeapWord*)obj;
8075 // Check if oop points into the CMS generation
8076 // and is not marked
8077 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8078 // a white object ...
8079 // If we manage to "claim" the object, by being the
8080 // first thread to mark it, then we push it on our
8081 // marking stack
8082 if (_bit_map->par_mark(addr)) { // ... now grey
8083 // push on work queue (grey set)
8084 bool simulate_overflow = false;
8085 NOT_PRODUCT(
8086 if (CMSMarkStackOverflowALot &&
8087 _collector->par_simulate_overflow()) {
8088 // simulate a stack overflow
8089 simulate_overflow = true;
8090 }
8091 )
8092 if (simulate_overflow || !_work_queue->push(obj)) {
8093 _collector->par_push_on_overflow_list(obj);
8094 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
8095 }
8096 } // Else, some other thread got there first
8097 }
8098 }
8100 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8101 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8103 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8104 Mutex* bml = _collector->bitMapLock();
8105 assert_lock_strong(bml);
8106 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8107 "CMS thread should hold CMS token");
8109 bml->unlock();
8110 ConcurrentMarkSweepThread::desynchronize(true);
8112 ConcurrentMarkSweepThread::acknowledge_yield_request();
8114 _collector->stopTimer();
8115 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8116 if (PrintCMSStatistics != 0) {
8117 _collector->incrementYields();
8118 }
8119 _collector->icms_wait();
8121 // See the comment in coordinator_yield()
8122 for (unsigned i = 0; i < CMSYieldSleepCount &&
8123 ConcurrentMarkSweepThread::should_yield() &&
8124 !CMSCollector::foregroundGCIsActive(); ++i) {
8125 os::sleep(Thread::current(), 1, false);
8126 ConcurrentMarkSweepThread::acknowledge_yield_request();
8127 }
8129 ConcurrentMarkSweepThread::synchronize(true);
8130 bml->lock();
8132 _collector->startTimer();
8133 }
8135 bool CMSPrecleanRefsYieldClosure::should_return() {
8136 if (ConcurrentMarkSweepThread::should_yield()) {
8137 do_yield_work();
8138 }
8139 return _collector->foregroundGCIsActive();
8140 }
8142 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8143 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8144 "mr should be aligned to start at a card boundary");
8145 // We'd like to assert:
8146 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8147 // "mr should be a range of cards");
8148 // However, that would be too strong in one case -- the last
8149 // partition ends at _unallocated_block which, in general, can be
8150 // an arbitrary boundary, not necessarily card aligned.
8151 if (PrintCMSStatistics != 0) {
8152 _num_dirty_cards +=
8153 mr.word_size()/CardTableModRefBS::card_size_in_words;
8154 }
8155 _space->object_iterate_mem(mr, &_scan_cl);
8156 }
8158 SweepClosure::SweepClosure(CMSCollector* collector,
8159 ConcurrentMarkSweepGeneration* g,
8160 CMSBitMap* bitMap, bool should_yield) :
8161 _collector(collector),
8162 _g(g),
8163 _sp(g->cmsSpace()),
8164 _limit(_sp->sweep_limit()),
8165 _freelistLock(_sp->freelistLock()),
8166 _bitMap(bitMap),
8167 _yield(should_yield),
8168 _inFreeRange(false), // No free range at beginning of sweep
8169 _freeRangeInFreeLists(false), // No free range at beginning of sweep
8170 _lastFreeRangeCoalesced(false),
8171 _freeFinger(g->used_region().start())
8172 {
8173 NOT_PRODUCT(
8174 _numObjectsFreed = 0;
8175 _numWordsFreed = 0;
8176 _numObjectsLive = 0;
8177 _numWordsLive = 0;
8178 _numObjectsAlreadyFree = 0;
8179 _numWordsAlreadyFree = 0;
8180 _last_fc = NULL;
8182 _sp->initializeIndexedFreeListArrayReturnedBytes();
8183 _sp->dictionary()->initialize_dict_returned_bytes();
8184 )
8185 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8186 "sweep _limit out of bounds");
8187 if (CMSTraceSweeper) {
8188 gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8189 _limit);
8190 }
8191 }
8193 void SweepClosure::print_on(outputStream* st) const {
8194 tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8195 _sp->bottom(), _sp->end());
8196 tty->print_cr("_limit = " PTR_FORMAT, _limit);
8197 tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8198 NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8199 tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8200 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8201 }
8203 #ifndef PRODUCT
8204 // Assertion checking only: no useful work in product mode --
8205 // however, if any of the flags below become product flags,
8206 // you may need to review this code to see if it needs to be
8207 // enabled in product mode.
8208 SweepClosure::~SweepClosure() {
8209 assert_lock_strong(_freelistLock);
8210 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8211 "sweep _limit out of bounds");
8212 if (inFreeRange()) {
8213 warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8214 print();
8215 ShouldNotReachHere();
8216 }
8217 if (Verbose && PrintGC) {
8218 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8219 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8220 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
8221 SIZE_FORMAT" bytes "
8222 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8223 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8224 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8225 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8226 * sizeof(HeapWord);
8227 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8229 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8230 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8231 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8232 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8233 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8234 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
8235 indexListReturnedBytes);
8236 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
8237 dict_returned_bytes);
8238 }
8239 }
8240 if (CMSTraceSweeper) {
8241 gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8242 _limit);
8243 }
8244 }
8245 #endif // PRODUCT
8247 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8248 bool freeRangeInFreeLists) {
8249 if (CMSTraceSweeper) {
8250 gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8251 freeFinger, freeRangeInFreeLists);
8252 }
8253 assert(!inFreeRange(), "Trampling existing free range");
8254 set_inFreeRange(true);
8255 set_lastFreeRangeCoalesced(false);
8257 set_freeFinger(freeFinger);
8258 set_freeRangeInFreeLists(freeRangeInFreeLists);
8259 if (CMSTestInFreeList) {
8260 if (freeRangeInFreeLists) {
8261 FreeChunk* fc = (FreeChunk*) freeFinger;
8262 assert(fc->is_free(), "A chunk on the free list should be free.");
8263 assert(fc->size() > 0, "Free range should have a size");
8264 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8265 }
8266 }
8267 }
8269 // Note that the sweeper runs concurrently with mutators. Thus,
8270 // it is possible for direct allocation in this generation to happen
8271 // in the middle of the sweep. Note that the sweeper also coalesces
8272 // contiguous free blocks. Thus, unless the sweeper and the allocator
8273 // synchronize appropriately freshly allocated blocks may get swept up.
8274 // This is accomplished by the sweeper locking the free lists while
8275 // it is sweeping. Thus blocks that are determined to be free are
8276 // indeed free. There is however one additional complication:
8277 // blocks that have been allocated since the final checkpoint and
8278 // mark, will not have been marked and so would be treated as
8279 // unreachable and swept up. To prevent this, the allocator marks
8280 // the bit map when allocating during the sweep phase. This leads,
8281 // however, to a further complication -- objects may have been allocated
8282 // but not yet initialized -- in the sense that the header isn't yet
8283 // installed. The sweeper can not then determine the size of the block
8284 // in order to skip over it. To deal with this case, we use a technique
8285 // (due to Printezis) to encode such uninitialized block sizes in the
8286 // bit map. Since the bit map uses a bit per every HeapWord, but the
8287 // CMS generation has a minimum object size of 3 HeapWords, it follows
8288 // that "normal marks" won't be adjacent in the bit map (there will
8289 // always be at least two 0 bits between successive 1 bits). We make use
8290 // of these "unused" bits to represent uninitialized blocks -- the bit
8291 // corresponding to the start of the uninitialized object and the next
8292 // bit are both set. Finally, a 1 bit marks the end of the object that
8293 // started with the two consecutive 1 bits to indicate its potentially
8294 // uninitialized state.
8296 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8297 FreeChunk* fc = (FreeChunk*)addr;
8298 size_t res;
8300 // Check if we are done sweeping. Below we check "addr >= _limit" rather
8301 // than "addr == _limit" because although _limit was a block boundary when
8302 // we started the sweep, it may no longer be one because heap expansion
8303 // may have caused us to coalesce the block ending at the address _limit
8304 // with a newly expanded chunk (this happens when _limit was set to the
8305 // previous _end of the space), so we may have stepped past _limit:
8306 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8307 if (addr >= _limit) { // we have swept up to or past the limit: finish up
8308 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8309 "sweep _limit out of bounds");
8310 assert(addr < _sp->end(), "addr out of bounds");
8311 // Flush any free range we might be holding as a single
8312 // coalesced chunk to the appropriate free list.
8313 if (inFreeRange()) {
8314 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8315 err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8316 flush_cur_free_chunk(freeFinger(),
8317 pointer_delta(addr, freeFinger()));
8318 if (CMSTraceSweeper) {
8319 gclog_or_tty->print("Sweep: last chunk: ");
8320 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8321 "[coalesced:"SIZE_FORMAT"]\n",
8322 freeFinger(), pointer_delta(addr, freeFinger()),
8323 lastFreeRangeCoalesced());
8324 }
8325 }
8327 // help the iterator loop finish
8328 return pointer_delta(_sp->end(), addr);
8329 }
8331 assert(addr < _limit, "sweep invariant");
8332 // check if we should yield
8333 do_yield_check(addr);
8334 if (fc->is_free()) {
8335 // Chunk that is already free
8336 res = fc->size();
8337 do_already_free_chunk(fc);
8338 debug_only(_sp->verifyFreeLists());
8339 // If we flush the chunk at hand in lookahead_and_flush()
8340 // and it's coalesced with a preceding chunk, then the
8341 // process of "mangling" the payload of the coalesced block
8342 // will cause erasure of the size information from the
8343 // (erstwhile) header of all the coalesced blocks but the
8344 // first, so the first disjunct in the assert will not hold
8345 // in that specific case (in which case the second disjunct
8346 // will hold).
8347 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8348 "Otherwise the size info doesn't change at this step");
8349 NOT_PRODUCT(
8350 _numObjectsAlreadyFree++;
8351 _numWordsAlreadyFree += res;
8352 )
8353 NOT_PRODUCT(_last_fc = fc;)
8354 } else if (!_bitMap->isMarked(addr)) {
8355 // Chunk is fresh garbage
8356 res = do_garbage_chunk(fc);
8357 debug_only(_sp->verifyFreeLists());
8358 NOT_PRODUCT(
8359 _numObjectsFreed++;
8360 _numWordsFreed += res;
8361 )
8362 } else {
8363 // Chunk that is alive.
8364 res = do_live_chunk(fc);
8365 debug_only(_sp->verifyFreeLists());
8366 NOT_PRODUCT(
8367 _numObjectsLive++;
8368 _numWordsLive += res;
8369 )
8370 }
8371 return res;
8372 }
8374 // For the smart allocation, record following
8375 // split deaths - a free chunk is removed from its free list because
8376 // it is being split into two or more chunks.
8377 // split birth - a free chunk is being added to its free list because
8378 // a larger free chunk has been split and resulted in this free chunk.
8379 // coal death - a free chunk is being removed from its free list because
8380 // it is being coalesced into a large free chunk.
8381 // coal birth - a free chunk is being added to its free list because
8382 // it was created when two or more free chunks where coalesced into
8383 // this free chunk.
8384 //
8385 // These statistics are used to determine the desired number of free
8386 // chunks of a given size. The desired number is chosen to be relative
8387 // to the end of a CMS sweep. The desired number at the end of a sweep
8388 // is the
8389 // count-at-end-of-previous-sweep (an amount that was enough)
8390 // - count-at-beginning-of-current-sweep (the excess)
8391 // + split-births (gains in this size during interval)
8392 // - split-deaths (demands on this size during interval)
8393 // where the interval is from the end of one sweep to the end of the
8394 // next.
8395 //
8396 // When sweeping the sweeper maintains an accumulated chunk which is
8397 // the chunk that is made up of chunks that have been coalesced. That
8398 // will be termed the left-hand chunk. A new chunk of garbage that
8399 // is being considered for coalescing will be referred to as the
8400 // right-hand chunk.
8401 //
8402 // When making a decision on whether to coalesce a right-hand chunk with
8403 // the current left-hand chunk, the current count vs. the desired count
8404 // of the left-hand chunk is considered. Also if the right-hand chunk
8405 // is near the large chunk at the end of the heap (see
8406 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8407 // left-hand chunk is coalesced.
8408 //
8409 // When making a decision about whether to split a chunk, the desired count
8410 // vs. the current count of the candidate to be split is also considered.
8411 // If the candidate is underpopulated (currently fewer chunks than desired)
8412 // a chunk of an overpopulated (currently more chunks than desired) size may
8413 // be chosen. The "hint" associated with a free list, if non-null, points
8414 // to a free list which may be overpopulated.
8415 //
8417 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8418 const size_t size = fc->size();
8419 // Chunks that cannot be coalesced are not in the
8420 // free lists.
8421 if (CMSTestInFreeList && !fc->cantCoalesce()) {
8422 assert(_sp->verify_chunk_in_free_list(fc),
8423 "free chunk should be in free lists");
8424 }
8425 // a chunk that is already free, should not have been
8426 // marked in the bit map
8427 HeapWord* const addr = (HeapWord*) fc;
8428 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8429 // Verify that the bit map has no bits marked between
8430 // addr and purported end of this block.
8431 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8433 // Some chunks cannot be coalesced under any circumstances.
8434 // See the definition of cantCoalesce().
8435 if (!fc->cantCoalesce()) {
8436 // This chunk can potentially be coalesced.
8437 if (_sp->adaptive_freelists()) {
8438 // All the work is done in
8439 do_post_free_or_garbage_chunk(fc, size);
8440 } else { // Not adaptive free lists
8441 // this is a free chunk that can potentially be coalesced by the sweeper;
8442 if (!inFreeRange()) {
8443 // if the next chunk is a free block that can't be coalesced
8444 // it doesn't make sense to remove this chunk from the free lists
8445 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8446 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8447 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
8448 nextChunk->is_free() && // ... which is free...
8449 nextChunk->cantCoalesce()) { // ... but can't be coalesced
8450 // nothing to do
8451 } else {
8452 // Potentially the start of a new free range:
8453 // Don't eagerly remove it from the free lists.
8454 // No need to remove it if it will just be put
8455 // back again. (Also from a pragmatic point of view
8456 // if it is a free block in a region that is beyond
8457 // any allocated blocks, an assertion will fail)
8458 // Remember the start of a free run.
8459 initialize_free_range(addr, true);
8460 // end - can coalesce with next chunk
8461 }
8462 } else {
8463 // the midst of a free range, we are coalescing
8464 print_free_block_coalesced(fc);
8465 if (CMSTraceSweeper) {
8466 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8467 }
8468 // remove it from the free lists
8469 _sp->removeFreeChunkFromFreeLists(fc);
8470 set_lastFreeRangeCoalesced(true);
8471 // If the chunk is being coalesced and the current free range is
8472 // in the free lists, remove the current free range so that it
8473 // will be returned to the free lists in its entirety - all
8474 // the coalesced pieces included.
8475 if (freeRangeInFreeLists()) {
8476 FreeChunk* ffc = (FreeChunk*) freeFinger();
8477 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8478 "Size of free range is inconsistent with chunk size.");
8479 if (CMSTestInFreeList) {
8480 assert(_sp->verify_chunk_in_free_list(ffc),
8481 "free range is not in free lists");
8482 }
8483 _sp->removeFreeChunkFromFreeLists(ffc);
8484 set_freeRangeInFreeLists(false);
8485 }
8486 }
8487 }
8488 // Note that if the chunk is not coalescable (the else arm
8489 // below), we unconditionally flush, without needing to do
8490 // a "lookahead," as we do below.
8491 if (inFreeRange()) lookahead_and_flush(fc, size);
8492 } else {
8493 // Code path common to both original and adaptive free lists.
8495 // cant coalesce with previous block; this should be treated
8496 // as the end of a free run if any
8497 if (inFreeRange()) {
8498 // we kicked some butt; time to pick up the garbage
8499 assert(freeFinger() < addr, "freeFinger points too high");
8500 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8501 }
8502 // else, nothing to do, just continue
8503 }
8504 }
8506 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8507 // This is a chunk of garbage. It is not in any free list.
8508 // Add it to a free list or let it possibly be coalesced into
8509 // a larger chunk.
8510 HeapWord* const addr = (HeapWord*) fc;
8511 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8513 if (_sp->adaptive_freelists()) {
8514 // Verify that the bit map has no bits marked between
8515 // addr and purported end of just dead object.
8516 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8518 do_post_free_or_garbage_chunk(fc, size);
8519 } else {
8520 if (!inFreeRange()) {
8521 // start of a new free range
8522 assert(size > 0, "A free range should have a size");
8523 initialize_free_range(addr, false);
8524 } else {
8525 // this will be swept up when we hit the end of the
8526 // free range
8527 if (CMSTraceSweeper) {
8528 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8529 }
8530 // If the chunk is being coalesced and the current free range is
8531 // in the free lists, remove the current free range so that it
8532 // will be returned to the free lists in its entirety - all
8533 // the coalesced pieces included.
8534 if (freeRangeInFreeLists()) {
8535 FreeChunk* ffc = (FreeChunk*)freeFinger();
8536 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8537 "Size of free range is inconsistent with chunk size.");
8538 if (CMSTestInFreeList) {
8539 assert(_sp->verify_chunk_in_free_list(ffc),
8540 "free range is not in free lists");
8541 }
8542 _sp->removeFreeChunkFromFreeLists(ffc);
8543 set_freeRangeInFreeLists(false);
8544 }
8545 set_lastFreeRangeCoalesced(true);
8546 }
8547 // this will be swept up when we hit the end of the free range
8549 // Verify that the bit map has no bits marked between
8550 // addr and purported end of just dead object.
8551 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8552 }
8553 assert(_limit >= addr + size,
8554 "A freshly garbage chunk can't possibly straddle over _limit");
8555 if (inFreeRange()) lookahead_and_flush(fc, size);
8556 return size;
8557 }
8559 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8560 HeapWord* addr = (HeapWord*) fc;
8561 // The sweeper has just found a live object. Return any accumulated
8562 // left hand chunk to the free lists.
8563 if (inFreeRange()) {
8564 assert(freeFinger() < addr, "freeFinger points too high");
8565 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8566 }
8568 // This object is live: we'd normally expect this to be
8569 // an oop, and like to assert the following:
8570 // assert(oop(addr)->is_oop(), "live block should be an oop");
8571 // However, as we commented above, this may be an object whose
8572 // header hasn't yet been initialized.
8573 size_t size;
8574 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8575 if (_bitMap->isMarked(addr + 1)) {
8576 // Determine the size from the bit map, rather than trying to
8577 // compute it from the object header.
8578 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8579 size = pointer_delta(nextOneAddr + 1, addr);
8580 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8581 "alignment problem");
8583 #ifdef ASSERT
8584 if (oop(addr)->klass_or_null() != NULL) {
8585 // Ignore mark word because we are running concurrent with mutators
8586 assert(oop(addr)->is_oop(true), "live block should be an oop");
8587 assert(size ==
8588 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8589 "P-mark and computed size do not agree");
8590 }
8591 #endif
8593 } else {
8594 // This should be an initialized object that's alive.
8595 assert(oop(addr)->klass_or_null() != NULL,
8596 "Should be an initialized object");
8597 // Ignore mark word because we are running concurrent with mutators
8598 assert(oop(addr)->is_oop(true), "live block should be an oop");
8599 // Verify that the bit map has no bits marked between
8600 // addr and purported end of this block.
8601 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8602 assert(size >= 3, "Necessary for Printezis marks to work");
8603 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8604 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8605 }
8606 return size;
8607 }
8609 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8610 size_t chunkSize) {
8611 // do_post_free_or_garbage_chunk() should only be called in the case
8612 // of the adaptive free list allocator.
8613 const bool fcInFreeLists = fc->is_free();
8614 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8615 assert((HeapWord*)fc <= _limit, "sweep invariant");
8616 if (CMSTestInFreeList && fcInFreeLists) {
8617 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8618 }
8620 if (CMSTraceSweeper) {
8621 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8622 }
8624 HeapWord* const fc_addr = (HeapWord*) fc;
8626 bool coalesce;
8627 const size_t left = pointer_delta(fc_addr, freeFinger());
8628 const size_t right = chunkSize;
8629 switch (FLSCoalescePolicy) {
8630 // numeric value forms a coalition aggressiveness metric
8631 case 0: { // never coalesce
8632 coalesce = false;
8633 break;
8634 }
8635 case 1: { // coalesce if left & right chunks on overpopulated lists
8636 coalesce = _sp->coalOverPopulated(left) &&
8637 _sp->coalOverPopulated(right);
8638 break;
8639 }
8640 case 2: { // coalesce if left chunk on overpopulated list (default)
8641 coalesce = _sp->coalOverPopulated(left);
8642 break;
8643 }
8644 case 3: { // coalesce if left OR right chunk on overpopulated list
8645 coalesce = _sp->coalOverPopulated(left) ||
8646 _sp->coalOverPopulated(right);
8647 break;
8648 }
8649 case 4: { // always coalesce
8650 coalesce = true;
8651 break;
8652 }
8653 default:
8654 ShouldNotReachHere();
8655 }
8657 // Should the current free range be coalesced?
8658 // If the chunk is in a free range and either we decided to coalesce above
8659 // or the chunk is near the large block at the end of the heap
8660 // (isNearLargestChunk() returns true), then coalesce this chunk.
8661 const bool doCoalesce = inFreeRange()
8662 && (coalesce || _g->isNearLargestChunk(fc_addr));
8663 if (doCoalesce) {
8664 // Coalesce the current free range on the left with the new
8665 // chunk on the right. If either is on a free list,
8666 // it must be removed from the list and stashed in the closure.
8667 if (freeRangeInFreeLists()) {
8668 FreeChunk* const ffc = (FreeChunk*)freeFinger();
8669 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8670 "Size of free range is inconsistent with chunk size.");
8671 if (CMSTestInFreeList) {
8672 assert(_sp->verify_chunk_in_free_list(ffc),
8673 "Chunk is not in free lists");
8674 }
8675 _sp->coalDeath(ffc->size());
8676 _sp->removeFreeChunkFromFreeLists(ffc);
8677 set_freeRangeInFreeLists(false);
8678 }
8679 if (fcInFreeLists) {
8680 _sp->coalDeath(chunkSize);
8681 assert(fc->size() == chunkSize,
8682 "The chunk has the wrong size or is not in the free lists");
8683 _sp->removeFreeChunkFromFreeLists(fc);
8684 }
8685 set_lastFreeRangeCoalesced(true);
8686 print_free_block_coalesced(fc);
8687 } else { // not in a free range and/or should not coalesce
8688 // Return the current free range and start a new one.
8689 if (inFreeRange()) {
8690 // In a free range but cannot coalesce with the right hand chunk.
8691 // Put the current free range into the free lists.
8692 flush_cur_free_chunk(freeFinger(),
8693 pointer_delta(fc_addr, freeFinger()));
8694 }
8695 // Set up for new free range. Pass along whether the right hand
8696 // chunk is in the free lists.
8697 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8698 }
8699 }
8701 // Lookahead flush:
8702 // If we are tracking a free range, and this is the last chunk that
8703 // we'll look at because its end crosses past _limit, we'll preemptively
8704 // flush it along with any free range we may be holding on to. Note that
8705 // this can be the case only for an already free or freshly garbage
8706 // chunk. If this block is an object, it can never straddle
8707 // over _limit. The "straddling" occurs when _limit is set at
8708 // the previous end of the space when this cycle started, and
8709 // a subsequent heap expansion caused the previously co-terminal
8710 // free block to be coalesced with the newly expanded portion,
8711 // thus rendering _limit a non-block-boundary making it dangerous
8712 // for the sweeper to step over and examine.
8713 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8714 assert(inFreeRange(), "Should only be called if currently in a free range.");
8715 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8716 assert(_sp->used_region().contains(eob - 1),
8717 err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
8718 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8719 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8720 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8721 if (eob >= _limit) {
8722 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8723 if (CMSTraceSweeper) {
8724 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8725 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8726 "[" PTR_FORMAT "," PTR_FORMAT ")",
8727 _limit, fc, eob, _sp->bottom(), _sp->end());
8728 }
8729 // Return the storage we are tracking back into the free lists.
8730 if (CMSTraceSweeper) {
8731 gclog_or_tty->print_cr("Flushing ... ");
8732 }
8733 assert(freeFinger() < eob, "Error");
8734 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8735 }
8736 }
8738 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8739 assert(inFreeRange(), "Should only be called if currently in a free range.");
8740 assert(size > 0,
8741 "A zero sized chunk cannot be added to the free lists.");
8742 if (!freeRangeInFreeLists()) {
8743 if (CMSTestInFreeList) {
8744 FreeChunk* fc = (FreeChunk*) chunk;
8745 fc->set_size(size);
8746 assert(!_sp->verify_chunk_in_free_list(fc),
8747 "chunk should not be in free lists yet");
8748 }
8749 if (CMSTraceSweeper) {
8750 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8751 chunk, size);
8752 }
8753 // A new free range is going to be starting. The current
8754 // free range has not been added to the free lists yet or
8755 // was removed so add it back.
8756 // If the current free range was coalesced, then the death
8757 // of the free range was recorded. Record a birth now.
8758 if (lastFreeRangeCoalesced()) {
8759 _sp->coalBirth(size);
8760 }
8761 _sp->addChunkAndRepairOffsetTable(chunk, size,
8762 lastFreeRangeCoalesced());
8763 } else if (CMSTraceSweeper) {
8764 gclog_or_tty->print_cr("Already in free list: nothing to flush");
8765 }
8766 set_inFreeRange(false);
8767 set_freeRangeInFreeLists(false);
8768 }
8770 // We take a break if we've been at this for a while,
8771 // so as to avoid monopolizing the locks involved.
8772 void SweepClosure::do_yield_work(HeapWord* addr) {
8773 // Return current free chunk being used for coalescing (if any)
8774 // to the appropriate freelist. After yielding, the next
8775 // free block encountered will start a coalescing range of
8776 // free blocks. If the next free block is adjacent to the
8777 // chunk just flushed, they will need to wait for the next
8778 // sweep to be coalesced.
8779 if (inFreeRange()) {
8780 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8781 }
8783 // First give up the locks, then yield, then re-lock.
8784 // We should probably use a constructor/destructor idiom to
8785 // do this unlock/lock or modify the MutexUnlocker class to
8786 // serve our purpose. XXX
8787 assert_lock_strong(_bitMap->lock());
8788 assert_lock_strong(_freelistLock);
8789 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8790 "CMS thread should hold CMS token");
8791 _bitMap->lock()->unlock();
8792 _freelistLock->unlock();
8793 ConcurrentMarkSweepThread::desynchronize(true);
8794 ConcurrentMarkSweepThread::acknowledge_yield_request();
8795 _collector->stopTimer();
8796 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8797 if (PrintCMSStatistics != 0) {
8798 _collector->incrementYields();
8799 }
8800 _collector->icms_wait();
8802 // See the comment in coordinator_yield()
8803 for (unsigned i = 0; i < CMSYieldSleepCount &&
8804 ConcurrentMarkSweepThread::should_yield() &&
8805 !CMSCollector::foregroundGCIsActive(); ++i) {
8806 os::sleep(Thread::current(), 1, false);
8807 ConcurrentMarkSweepThread::acknowledge_yield_request();
8808 }
8810 ConcurrentMarkSweepThread::synchronize(true);
8811 _freelistLock->lock();
8812 _bitMap->lock()->lock_without_safepoint_check();
8813 _collector->startTimer();
8814 }
8816 #ifndef PRODUCT
8817 // This is actually very useful in a product build if it can
8818 // be called from the debugger. Compile it into the product
8819 // as needed.
8820 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8821 return debug_cms_space->verify_chunk_in_free_list(fc);
8822 }
8823 #endif
8825 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8826 if (CMSTraceSweeper) {
8827 gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8828 fc, fc->size());
8829 }
8830 }
8832 // CMSIsAliveClosure
8833 bool CMSIsAliveClosure::do_object_b(oop obj) {
8834 HeapWord* addr = (HeapWord*)obj;
8835 return addr != NULL &&
8836 (!_span.contains(addr) || _bit_map->isMarked(addr));
8837 }
8840 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8841 MemRegion span,
8842 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8843 bool cpc):
8844 _collector(collector),
8845 _span(span),
8846 _bit_map(bit_map),
8847 _mark_stack(mark_stack),
8848 _concurrent_precleaning(cpc) {
8849 assert(!_span.is_empty(), "Empty span could spell trouble");
8850 }
8853 // CMSKeepAliveClosure: the serial version
8854 void CMSKeepAliveClosure::do_oop(oop obj) {
8855 HeapWord* addr = (HeapWord*)obj;
8856 if (_span.contains(addr) &&
8857 !_bit_map->isMarked(addr)) {
8858 _bit_map->mark(addr);
8859 bool simulate_overflow = false;
8860 NOT_PRODUCT(
8861 if (CMSMarkStackOverflowALot &&
8862 _collector->simulate_overflow()) {
8863 // simulate a stack overflow
8864 simulate_overflow = true;
8865 }
8866 )
8867 if (simulate_overflow || !_mark_stack->push(obj)) {
8868 if (_concurrent_precleaning) {
8869 // We dirty the overflown object and let the remark
8870 // phase deal with it.
8871 assert(_collector->overflow_list_is_empty(), "Error");
8872 // In the case of object arrays, we need to dirty all of
8873 // the cards that the object spans. No locking or atomics
8874 // are needed since no one else can be mutating the mod union
8875 // table.
8876 if (obj->is_objArray()) {
8877 size_t sz = obj->size();
8878 HeapWord* end_card_addr =
8879 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8880 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8881 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8882 _collector->_modUnionTable.mark_range(redirty_range);
8883 } else {
8884 _collector->_modUnionTable.mark(addr);
8885 }
8886 _collector->_ser_kac_preclean_ovflw++;
8887 } else {
8888 _collector->push_on_overflow_list(obj);
8889 _collector->_ser_kac_ovflw++;
8890 }
8891 }
8892 }
8893 }
8895 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8896 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8898 // CMSParKeepAliveClosure: a parallel version of the above.
8899 // The work queues are private to each closure (thread),
8900 // but (may be) available for stealing by other threads.
8901 void CMSParKeepAliveClosure::do_oop(oop obj) {
8902 HeapWord* addr = (HeapWord*)obj;
8903 if (_span.contains(addr) &&
8904 !_bit_map->isMarked(addr)) {
8905 // In general, during recursive tracing, several threads
8906 // may be concurrently getting here; the first one to
8907 // "tag" it, claims it.
8908 if (_bit_map->par_mark(addr)) {
8909 bool res = _work_queue->push(obj);
8910 assert(res, "Low water mark should be much less than capacity");
8911 // Do a recursive trim in the hope that this will keep
8912 // stack usage lower, but leave some oops for potential stealers
8913 trim_queue(_low_water_mark);
8914 } // Else, another thread got there first
8915 }
8916 }
8918 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8919 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8921 void CMSParKeepAliveClosure::trim_queue(uint max) {
8922 while (_work_queue->size() > max) {
8923 oop new_oop;
8924 if (_work_queue->pop_local(new_oop)) {
8925 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8926 assert(_bit_map->isMarked((HeapWord*)new_oop),
8927 "no white objects on this stack!");
8928 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8929 // iterate over the oops in this oop, marking and pushing
8930 // the ones in CMS heap (i.e. in _span).
8931 new_oop->oop_iterate(&_mark_and_push);
8932 }
8933 }
8934 }
8936 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8937 CMSCollector* collector,
8938 MemRegion span, CMSBitMap* bit_map,
8939 OopTaskQueue* work_queue):
8940 _collector(collector),
8941 _span(span),
8942 _bit_map(bit_map),
8943 _work_queue(work_queue) { }
8945 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8946 HeapWord* addr = (HeapWord*)obj;
8947 if (_span.contains(addr) &&
8948 !_bit_map->isMarked(addr)) {
8949 if (_bit_map->par_mark(addr)) {
8950 bool simulate_overflow = false;
8951 NOT_PRODUCT(
8952 if (CMSMarkStackOverflowALot &&
8953 _collector->par_simulate_overflow()) {
8954 // simulate a stack overflow
8955 simulate_overflow = true;
8956 }
8957 )
8958 if (simulate_overflow || !_work_queue->push(obj)) {
8959 _collector->par_push_on_overflow_list(obj);
8960 _collector->_par_kac_ovflw++;
8961 }
8962 } // Else another thread got there already
8963 }
8964 }
8966 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8967 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8969 //////////////////////////////////////////////////////////////////
8970 // CMSExpansionCause /////////////////////////////
8971 //////////////////////////////////////////////////////////////////
8972 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8973 switch (cause) {
8974 case _no_expansion:
8975 return "No expansion";
8976 case _satisfy_free_ratio:
8977 return "Free ratio";
8978 case _satisfy_promotion:
8979 return "Satisfy promotion";
8980 case _satisfy_allocation:
8981 return "allocation";
8982 case _allocate_par_lab:
8983 return "Par LAB";
8984 case _allocate_par_spooling_space:
8985 return "Par Spooling Space";
8986 case _adaptive_size_policy:
8987 return "Ergonomics";
8988 default:
8989 return "unknown";
8990 }
8991 }
8993 void CMSDrainMarkingStackClosure::do_void() {
8994 // the max number to take from overflow list at a time
8995 const size_t num = _mark_stack->capacity()/4;
8996 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8997 "Overflow list should be NULL during concurrent phases");
8998 while (!_mark_stack->isEmpty() ||
8999 // if stack is empty, check the overflow list
9000 _collector->take_from_overflow_list(num, _mark_stack)) {
9001 oop obj = _mark_stack->pop();
9002 HeapWord* addr = (HeapWord*)obj;
9003 assert(_span.contains(addr), "Should be within span");
9004 assert(_bit_map->isMarked(addr), "Should be marked");
9005 assert(obj->is_oop(), "Should be an oop");
9006 obj->oop_iterate(_keep_alive);
9007 }
9008 }
9010 void CMSParDrainMarkingStackClosure::do_void() {
9011 // drain queue
9012 trim_queue(0);
9013 }
9015 // Trim our work_queue so its length is below max at return
9016 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
9017 while (_work_queue->size() > max) {
9018 oop new_oop;
9019 if (_work_queue->pop_local(new_oop)) {
9020 assert(new_oop->is_oop(), "Expected an oop");
9021 assert(_bit_map->isMarked((HeapWord*)new_oop),
9022 "no white objects on this stack!");
9023 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9024 // iterate over the oops in this oop, marking and pushing
9025 // the ones in CMS heap (i.e. in _span).
9026 new_oop->oop_iterate(&_mark_and_push);
9027 }
9028 }
9029 }
9031 ////////////////////////////////////////////////////////////////////
9032 // Support for Marking Stack Overflow list handling and related code
9033 ////////////////////////////////////////////////////////////////////
9034 // Much of the following code is similar in shape and spirit to the
9035 // code used in ParNewGC. We should try and share that code
9036 // as much as possible in the future.
9038 #ifndef PRODUCT
9039 // Debugging support for CMSStackOverflowALot
9041 // It's OK to call this multi-threaded; the worst thing
9042 // that can happen is that we'll get a bunch of closely
9043 // spaced simulated oveflows, but that's OK, in fact
9044 // probably good as it would exercise the overflow code
9045 // under contention.
9046 bool CMSCollector::simulate_overflow() {
9047 if (_overflow_counter-- <= 0) { // just being defensive
9048 _overflow_counter = CMSMarkStackOverflowInterval;
9049 return true;
9050 } else {
9051 return false;
9052 }
9053 }
9055 bool CMSCollector::par_simulate_overflow() {
9056 return simulate_overflow();
9057 }
9058 #endif
9060 // Single-threaded
9061 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9062 assert(stack->isEmpty(), "Expected precondition");
9063 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9064 size_t i = num;
9065 oop cur = _overflow_list;
9066 const markOop proto = markOopDesc::prototype();
9067 NOT_PRODUCT(ssize_t n = 0;)
9068 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9069 next = oop(cur->mark());
9070 cur->set_mark(proto); // until proven otherwise
9071 assert(cur->is_oop(), "Should be an oop");
9072 bool res = stack->push(cur);
9073 assert(res, "Bit off more than can chew?");
9074 NOT_PRODUCT(n++;)
9075 }
9076 _overflow_list = cur;
9077 #ifndef PRODUCT
9078 assert(_num_par_pushes >= n, "Too many pops?");
9079 _num_par_pushes -=n;
9080 #endif
9081 return !stack->isEmpty();
9082 }
9084 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
9085 // (MT-safe) Get a prefix of at most "num" from the list.
9086 // The overflow list is chained through the mark word of
9087 // each object in the list. We fetch the entire list,
9088 // break off a prefix of the right size and return the
9089 // remainder. If other threads try to take objects from
9090 // the overflow list at that time, they will wait for
9091 // some time to see if data becomes available. If (and
9092 // only if) another thread places one or more object(s)
9093 // on the global list before we have returned the suffix
9094 // to the global list, we will walk down our local list
9095 // to find its end and append the global list to
9096 // our suffix before returning it. This suffix walk can
9097 // prove to be expensive (quadratic in the amount of traffic)
9098 // when there are many objects in the overflow list and
9099 // there is much producer-consumer contention on the list.
9100 // *NOTE*: The overflow list manipulation code here and
9101 // in ParNewGeneration:: are very similar in shape,
9102 // except that in the ParNew case we use the old (from/eden)
9103 // copy of the object to thread the list via its klass word.
9104 // Because of the common code, if you make any changes in
9105 // the code below, please check the ParNew version to see if
9106 // similar changes might be needed.
9107 // CR 6797058 has been filed to consolidate the common code.
9108 bool CMSCollector::par_take_from_overflow_list(size_t num,
9109 OopTaskQueue* work_q,
9110 int no_of_gc_threads) {
9111 assert(work_q->size() == 0, "First empty local work queue");
9112 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9113 if (_overflow_list == NULL) {
9114 return false;
9115 }
9116 // Grab the entire list; we'll put back a suffix
9117 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9118 Thread* tid = Thread::current();
9119 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9120 // set to ParallelGCThreads.
9121 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9122 size_t sleep_time_millis = MAX2((size_t)1, num/100);
9123 // If the list is busy, we spin for a short while,
9124 // sleeping between attempts to get the list.
9125 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9126 os::sleep(tid, sleep_time_millis, false);
9127 if (_overflow_list == NULL) {
9128 // Nothing left to take
9129 return false;
9130 } else if (_overflow_list != BUSY) {
9131 // Try and grab the prefix
9132 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9133 }
9134 }
9135 // If the list was found to be empty, or we spun long
9136 // enough, we give up and return empty-handed. If we leave
9137 // the list in the BUSY state below, it must be the case that
9138 // some other thread holds the overflow list and will set it
9139 // to a non-BUSY state in the future.
9140 if (prefix == NULL || prefix == BUSY) {
9141 // Nothing to take or waited long enough
9142 if (prefix == NULL) {
9143 // Write back the NULL in case we overwrote it with BUSY above
9144 // and it is still the same value.
9145 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9146 }
9147 return false;
9148 }
9149 assert(prefix != NULL && prefix != BUSY, "Error");
9150 size_t i = num;
9151 oop cur = prefix;
9152 // Walk down the first "num" objects, unless we reach the end.
9153 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9154 if (cur->mark() == NULL) {
9155 // We have "num" or fewer elements in the list, so there
9156 // is nothing to return to the global list.
9157 // Write back the NULL in lieu of the BUSY we wrote
9158 // above, if it is still the same value.
9159 if (_overflow_list == BUSY) {
9160 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9161 }
9162 } else {
9163 // Chop off the suffix and rerturn it to the global list.
9164 assert(cur->mark() != BUSY, "Error");
9165 oop suffix_head = cur->mark(); // suffix will be put back on global list
9166 cur->set_mark(NULL); // break off suffix
9167 // It's possible that the list is still in the empty(busy) state
9168 // we left it in a short while ago; in that case we may be
9169 // able to place back the suffix without incurring the cost
9170 // of a walk down the list.
9171 oop observed_overflow_list = _overflow_list;
9172 oop cur_overflow_list = observed_overflow_list;
9173 bool attached = false;
9174 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9175 observed_overflow_list =
9176 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9177 if (cur_overflow_list == observed_overflow_list) {
9178 attached = true;
9179 break;
9180 } else cur_overflow_list = observed_overflow_list;
9181 }
9182 if (!attached) {
9183 // Too bad, someone else sneaked in (at least) an element; we'll need
9184 // to do a splice. Find tail of suffix so we can prepend suffix to global
9185 // list.
9186 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9187 oop suffix_tail = cur;
9188 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9189 "Tautology");
9190 observed_overflow_list = _overflow_list;
9191 do {
9192 cur_overflow_list = observed_overflow_list;
9193 if (cur_overflow_list != BUSY) {
9194 // Do the splice ...
9195 suffix_tail->set_mark(markOop(cur_overflow_list));
9196 } else { // cur_overflow_list == BUSY
9197 suffix_tail->set_mark(NULL);
9198 }
9199 // ... and try to place spliced list back on overflow_list ...
9200 observed_overflow_list =
9201 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9202 } while (cur_overflow_list != observed_overflow_list);
9203 // ... until we have succeeded in doing so.
9204 }
9205 }
9207 // Push the prefix elements on work_q
9208 assert(prefix != NULL, "control point invariant");
9209 const markOop proto = markOopDesc::prototype();
9210 oop next;
9211 NOT_PRODUCT(ssize_t n = 0;)
9212 for (cur = prefix; cur != NULL; cur = next) {
9213 next = oop(cur->mark());
9214 cur->set_mark(proto); // until proven otherwise
9215 assert(cur->is_oop(), "Should be an oop");
9216 bool res = work_q->push(cur);
9217 assert(res, "Bit off more than we can chew?");
9218 NOT_PRODUCT(n++;)
9219 }
9220 #ifndef PRODUCT
9221 assert(_num_par_pushes >= n, "Too many pops?");
9222 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9223 #endif
9224 return true;
9225 }
9227 // Single-threaded
9228 void CMSCollector::push_on_overflow_list(oop p) {
9229 NOT_PRODUCT(_num_par_pushes++;)
9230 assert(p->is_oop(), "Not an oop");
9231 preserve_mark_if_necessary(p);
9232 p->set_mark((markOop)_overflow_list);
9233 _overflow_list = p;
9234 }
9236 // Multi-threaded; use CAS to prepend to overflow list
9237 void CMSCollector::par_push_on_overflow_list(oop p) {
9238 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9239 assert(p->is_oop(), "Not an oop");
9240 par_preserve_mark_if_necessary(p);
9241 oop observed_overflow_list = _overflow_list;
9242 oop cur_overflow_list;
9243 do {
9244 cur_overflow_list = observed_overflow_list;
9245 if (cur_overflow_list != BUSY) {
9246 p->set_mark(markOop(cur_overflow_list));
9247 } else {
9248 p->set_mark(NULL);
9249 }
9250 observed_overflow_list =
9251 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9252 } while (cur_overflow_list != observed_overflow_list);
9253 }
9254 #undef BUSY
9256 // Single threaded
9257 // General Note on GrowableArray: pushes may silently fail
9258 // because we are (temporarily) out of C-heap for expanding
9259 // the stack. The problem is quite ubiquitous and affects
9260 // a lot of code in the JVM. The prudent thing for GrowableArray
9261 // to do (for now) is to exit with an error. However, that may
9262 // be too draconian in some cases because the caller may be
9263 // able to recover without much harm. For such cases, we
9264 // should probably introduce a "soft_push" method which returns
9265 // an indication of success or failure with the assumption that
9266 // the caller may be able to recover from a failure; code in
9267 // the VM can then be changed, incrementally, to deal with such
9268 // failures where possible, thus, incrementally hardening the VM
9269 // in such low resource situations.
9270 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9271 _preserved_oop_stack.push(p);
9272 _preserved_mark_stack.push(m);
9273 assert(m == p->mark(), "Mark word changed");
9274 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9275 "bijection");
9276 }
9278 // Single threaded
9279 void CMSCollector::preserve_mark_if_necessary(oop p) {
9280 markOop m = p->mark();
9281 if (m->must_be_preserved(p)) {
9282 preserve_mark_work(p, m);
9283 }
9284 }
9286 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9287 markOop m = p->mark();
9288 if (m->must_be_preserved(p)) {
9289 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9290 // Even though we read the mark word without holding
9291 // the lock, we are assured that it will not change
9292 // because we "own" this oop, so no other thread can
9293 // be trying to push it on the overflow list; see
9294 // the assertion in preserve_mark_work() that checks
9295 // that m == p->mark().
9296 preserve_mark_work(p, m);
9297 }
9298 }
9300 // We should be able to do this multi-threaded,
9301 // a chunk of stack being a task (this is
9302 // correct because each oop only ever appears
9303 // once in the overflow list. However, it's
9304 // not very easy to completely overlap this with
9305 // other operations, so will generally not be done
9306 // until all work's been completed. Because we
9307 // expect the preserved oop stack (set) to be small,
9308 // it's probably fine to do this single-threaded.
9309 // We can explore cleverer concurrent/overlapped/parallel
9310 // processing of preserved marks if we feel the
9311 // need for this in the future. Stack overflow should
9312 // be so rare in practice and, when it happens, its
9313 // effect on performance so great that this will
9314 // likely just be in the noise anyway.
9315 void CMSCollector::restore_preserved_marks_if_any() {
9316 assert(SafepointSynchronize::is_at_safepoint(),
9317 "world should be stopped");
9318 assert(Thread::current()->is_ConcurrentGC_thread() ||
9319 Thread::current()->is_VM_thread(),
9320 "should be single-threaded");
9321 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9322 "bijection");
9324 while (!_preserved_oop_stack.is_empty()) {
9325 oop p = _preserved_oop_stack.pop();
9326 assert(p->is_oop(), "Should be an oop");
9327 assert(_span.contains(p), "oop should be in _span");
9328 assert(p->mark() == markOopDesc::prototype(),
9329 "Set when taken from overflow list");
9330 markOop m = _preserved_mark_stack.pop();
9331 p->set_mark(m);
9332 }
9333 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9334 "stacks were cleared above");
9335 }
9337 #ifndef PRODUCT
9338 bool CMSCollector::no_preserved_marks() const {
9339 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9340 }
9341 #endif
9343 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9344 {
9345 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9346 CMSAdaptiveSizePolicy* size_policy =
9347 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9348 assert(size_policy->is_gc_cms_adaptive_size_policy(),
9349 "Wrong type for size policy");
9350 return size_policy;
9351 }
9353 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9354 size_t desired_promo_size) {
9355 if (cur_promo_size < desired_promo_size) {
9356 size_t expand_bytes = desired_promo_size - cur_promo_size;
9357 if (PrintAdaptiveSizePolicy && Verbose) {
9358 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9359 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9360 expand_bytes);
9361 }
9362 expand(expand_bytes,
9363 MinHeapDeltaBytes,
9364 CMSExpansionCause::_adaptive_size_policy);
9365 } else if (desired_promo_size < cur_promo_size) {
9366 size_t shrink_bytes = cur_promo_size - desired_promo_size;
9367 if (PrintAdaptiveSizePolicy && Verbose) {
9368 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9369 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9370 shrink_bytes);
9371 }
9372 shrink(shrink_bytes);
9373 }
9374 }
9376 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9377 GenCollectedHeap* gch = GenCollectedHeap::heap();
9378 CMSGCAdaptivePolicyCounters* counters =
9379 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9380 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9381 "Wrong kind of counters");
9382 return counters;
9383 }
9386 void ASConcurrentMarkSweepGeneration::update_counters() {
9387 if (UsePerfData) {
9388 _space_counters->update_all();
9389 _gen_counters->update_all();
9390 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9391 GenCollectedHeap* gch = GenCollectedHeap::heap();
9392 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9393 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9394 "Wrong gc statistics type");
9395 counters->update_counters(gc_stats_l);
9396 }
9397 }
9399 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9400 if (UsePerfData) {
9401 _space_counters->update_used(used);
9402 _space_counters->update_capacity();
9403 _gen_counters->update_all();
9405 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9406 GenCollectedHeap* gch = GenCollectedHeap::heap();
9407 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9408 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9409 "Wrong gc statistics type");
9410 counters->update_counters(gc_stats_l);
9411 }
9412 }
9414 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9415 assert_locked_or_safepoint(Heap_lock);
9416 assert_lock_strong(freelistLock());
9417 HeapWord* old_end = _cmsSpace->end();
9418 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9419 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9420 FreeChunk* chunk_at_end = find_chunk_at_end();
9421 if (chunk_at_end == NULL) {
9422 // No room to shrink
9423 if (PrintGCDetails && Verbose) {
9424 gclog_or_tty->print_cr("No room to shrink: old_end "
9425 PTR_FORMAT " unallocated_start " PTR_FORMAT
9426 " chunk_at_end " PTR_FORMAT,
9427 old_end, unallocated_start, chunk_at_end);
9428 }
9429 return;
9430 } else {
9432 // Find the chunk at the end of the space and determine
9433 // how much it can be shrunk.
9434 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9435 size_t aligned_shrinkable_size_in_bytes =
9436 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9437 assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9438 "Inconsistent chunk at end of space");
9439 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9440 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9442 // Shrink the underlying space
9443 _virtual_space.shrink_by(bytes);
9444 if (PrintGCDetails && Verbose) {
9445 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9446 " desired_bytes " SIZE_FORMAT
9447 " shrinkable_size_in_bytes " SIZE_FORMAT
9448 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9449 " bytes " SIZE_FORMAT,
9450 desired_bytes, shrinkable_size_in_bytes,
9451 aligned_shrinkable_size_in_bytes, bytes);
9452 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9453 " unallocated_start " SIZE_FORMAT,
9454 old_end, unallocated_start);
9455 }
9457 // If the space did shrink (shrinking is not guaranteed),
9458 // shrink the chunk at the end by the appropriate amount.
9459 if (((HeapWord*)_virtual_space.high()) < old_end) {
9460 size_t new_word_size =
9461 heap_word_size(_virtual_space.committed_size());
9463 // Have to remove the chunk from the dictionary because it is changing
9464 // size and might be someplace elsewhere in the dictionary.
9466 // Get the chunk at end, shrink it, and put it
9467 // back.
9468 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9469 size_t word_size_change = word_size_before - new_word_size;
9470 size_t chunk_at_end_old_size = chunk_at_end->size();
9471 assert(chunk_at_end_old_size >= word_size_change,
9472 "Shrink is too large");
9473 chunk_at_end->set_size(chunk_at_end_old_size -
9474 word_size_change);
9475 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9476 word_size_change);
9478 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9480 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9481 _bts->resize(new_word_size); // resize the block offset shared array
9482 Universe::heap()->barrier_set()->resize_covered_region(mr);
9483 _cmsSpace->assert_locked();
9484 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9486 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9488 // update the space and generation capacity counters
9489 if (UsePerfData) {
9490 _space_counters->update_capacity();
9491 _gen_counters->update_all();
9492 }
9494 if (Verbose && PrintGCDetails) {
9495 size_t new_mem_size = _virtual_space.committed_size();
9496 size_t old_mem_size = new_mem_size + bytes;
9497 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9498 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9499 }
9500 }
9502 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9503 "Inconsistency at end of space");
9504 assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9505 "Shrinking is inconsistent");
9506 return;
9507 }
9508 }
9509 // Transfer some number of overflown objects to usual marking
9510 // stack. Return true if some objects were transferred.
9511 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9512 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9513 (size_t)ParGCDesiredObjsFromOverflowList);
9515 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9516 assert(_collector->overflow_list_is_empty() || res,
9517 "If list is not empty, we should have taken something");
9518 assert(!res || !_mark_stack->isEmpty(),
9519 "If we took something, it should now be on our stack");
9520 return res;
9521 }
9523 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9524 size_t res = _sp->block_size_no_stall(addr, _collector);
9525 if (_sp->block_is_obj(addr)) {
9526 if (_live_bit_map->isMarked(addr)) {
9527 // It can't have been dead in a previous cycle
9528 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9529 } else {
9530 _dead_bit_map->mark(addr); // mark the dead object
9531 }
9532 }
9533 // Could be 0, if the block size could not be computed without stalling.
9534 return res;
9535 }
9537 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9539 switch (phase) {
9540 case CMSCollector::InitialMarking:
9541 initialize(true /* fullGC */ ,
9542 cause /* cause of the GC */,
9543 true /* recordGCBeginTime */,
9544 true /* recordPreGCUsage */,
9545 false /* recordPeakUsage */,
9546 false /* recordPostGCusage */,
9547 true /* recordAccumulatedGCTime */,
9548 false /* recordGCEndTime */,
9549 false /* countCollection */ );
9550 break;
9552 case CMSCollector::FinalMarking:
9553 initialize(true /* fullGC */ ,
9554 cause /* cause of the GC */,
9555 false /* recordGCBeginTime */,
9556 false /* recordPreGCUsage */,
9557 false /* recordPeakUsage */,
9558 false /* recordPostGCusage */,
9559 true /* recordAccumulatedGCTime */,
9560 false /* recordGCEndTime */,
9561 false /* countCollection */ );
9562 break;
9564 case CMSCollector::Sweeping:
9565 initialize(true /* fullGC */ ,
9566 cause /* cause of the GC */,
9567 false /* recordGCBeginTime */,
9568 false /* recordPreGCUsage */,
9569 true /* recordPeakUsage */,
9570 true /* recordPostGCusage */,
9571 false /* recordAccumulatedGCTime */,
9572 true /* recordGCEndTime */,
9573 true /* countCollection */ );
9574 break;
9576 default:
9577 ShouldNotReachHere();
9578 }
9579 }