Tue, 29 Apr 2014 15:17:27 +0200
8042195: Introduce umbrella header orderAccess.inline.hpp.
Reviewed-by: dholmes, kvn, stefank, twisti
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/codeCache.hpp"
30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
38 #include "gc_implementation/parNew/parNewGeneration.hpp"
39 #include "gc_implementation/shared/collectorCounters.hpp"
40 #include "gc_implementation/shared/gcTimer.hpp"
41 #include "gc_implementation/shared/gcTrace.hpp"
42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 #include "gc_implementation/shared/isGCActiveMark.hpp"
44 #include "gc_interface/collectedHeap.inline.hpp"
45 #include "memory/allocation.hpp"
46 #include "memory/cardTableRS.hpp"
47 #include "memory/collectorPolicy.hpp"
48 #include "memory/gcLocker.inline.hpp"
49 #include "memory/genCollectedHeap.hpp"
50 #include "memory/genMarkSweep.hpp"
51 #include "memory/genOopClosures.inline.hpp"
52 #include "memory/iterator.hpp"
53 #include "memory/padded.hpp"
54 #include "memory/referencePolicy.hpp"
55 #include "memory/resourceArea.hpp"
56 #include "memory/tenuredGeneration.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "prims/jvmtiExport.hpp"
59 #include "runtime/globals_extension.hpp"
60 #include "runtime/handles.inline.hpp"
61 #include "runtime/java.hpp"
62 #include "runtime/orderAccess.inline.hpp"
63 #include "runtime/vmThread.hpp"
64 #include "services/memoryService.hpp"
65 #include "services/runtimeService.hpp"
67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
69 // statics
70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
71 bool CMSCollector::_full_gc_requested = false;
72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
74 //////////////////////////////////////////////////////////////////
75 // In support of CMS/VM thread synchronization
76 //////////////////////////////////////////////////////////////////
77 // We split use of the CGC_lock into 2 "levels".
78 // The low-level locking is of the usual CGC_lock monitor. We introduce
79 // a higher level "token" (hereafter "CMS token") built on top of the
80 // low level monitor (hereafter "CGC lock").
81 // The token-passing protocol gives priority to the VM thread. The
82 // CMS-lock doesn't provide any fairness guarantees, but clients
83 // should ensure that it is only held for very short, bounded
84 // durations.
85 //
86 // When either of the CMS thread or the VM thread is involved in
87 // collection operations during which it does not want the other
88 // thread to interfere, it obtains the CMS token.
89 //
90 // If either thread tries to get the token while the other has
91 // it, that thread waits. However, if the VM thread and CMS thread
92 // both want the token, then the VM thread gets priority while the
93 // CMS thread waits. This ensures, for instance, that the "concurrent"
94 // phases of the CMS thread's work do not block out the VM thread
95 // for long periods of time as the CMS thread continues to hog
96 // the token. (See bug 4616232).
97 //
98 // The baton-passing functions are, however, controlled by the
99 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
100 // and here the low-level CMS lock, not the high level token,
101 // ensures mutual exclusion.
102 //
103 // Two important conditions that we have to satisfy:
104 // 1. if a thread does a low-level wait on the CMS lock, then it
105 // relinquishes the CMS token if it were holding that token
106 // when it acquired the low-level CMS lock.
107 // 2. any low-level notifications on the low-level lock
108 // should only be sent when a thread has relinquished the token.
109 //
110 // In the absence of either property, we'd have potential deadlock.
111 //
112 // We protect each of the CMS (concurrent and sequential) phases
113 // with the CMS _token_, not the CMS _lock_.
114 //
115 // The only code protected by CMS lock is the token acquisition code
116 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
117 // baton-passing code.
118 //
119 // Unfortunately, i couldn't come up with a good abstraction to factor and
120 // hide the naked CGC_lock manipulation in the baton-passing code
121 // further below. That's something we should try to do. Also, the proof
122 // of correctness of this 2-level locking scheme is far from obvious,
123 // and potentially quite slippery. We have an uneasy supsicion, for instance,
124 // that there may be a theoretical possibility of delay/starvation in the
125 // low-level lock/wait/notify scheme used for the baton-passing because of
126 // potential intereference with the priority scheme embodied in the
127 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
128 // invocation further below and marked with "XXX 20011219YSR".
129 // Indeed, as we note elsewhere, this may become yet more slippery
130 // in the presence of multiple CMS and/or multiple VM threads. XXX
132 class CMSTokenSync: public StackObj {
133 private:
134 bool _is_cms_thread;
135 public:
136 CMSTokenSync(bool is_cms_thread):
137 _is_cms_thread(is_cms_thread) {
138 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
139 "Incorrect argument to constructor");
140 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
141 }
143 ~CMSTokenSync() {
144 assert(_is_cms_thread ?
145 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
146 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
147 "Incorrect state");
148 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
149 }
150 };
152 // Convenience class that does a CMSTokenSync, and then acquires
153 // upto three locks.
154 class CMSTokenSyncWithLocks: public CMSTokenSync {
155 private:
156 // Note: locks are acquired in textual declaration order
157 // and released in the opposite order
158 MutexLockerEx _locker1, _locker2, _locker3;
159 public:
160 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
161 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
162 CMSTokenSync(is_cms_thread),
163 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
164 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
165 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
166 { }
167 };
170 // Wrapper class to temporarily disable icms during a foreground cms collection.
171 class ICMSDisabler: public StackObj {
172 public:
173 // The ctor disables icms and wakes up the thread so it notices the change;
174 // the dtor re-enables icms. Note that the CMSCollector methods will check
175 // CMSIncrementalMode.
176 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
177 ~ICMSDisabler() { CMSCollector::enable_icms(); }
178 };
180 //////////////////////////////////////////////////////////////////
181 // Concurrent Mark-Sweep Generation /////////////////////////////
182 //////////////////////////////////////////////////////////////////
184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
186 // This struct contains per-thread things necessary to support parallel
187 // young-gen collection.
188 class CMSParGCThreadState: public CHeapObj<mtGC> {
189 public:
190 CFLS_LAB lab;
191 PromotionInfo promo;
193 // Constructor.
194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195 promo.setSpace(cfls);
196 }
197 };
199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200 ReservedSpace rs, size_t initial_byte_size, int level,
201 CardTableRS* ct, bool use_adaptive_freelists,
202 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
203 CardGeneration(rs, initial_byte_size, level, ct),
204 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
205 _debug_collection_type(Concurrent_collection_type),
206 _did_compact(false)
207 {
208 HeapWord* bottom = (HeapWord*) _virtual_space.low();
209 HeapWord* end = (HeapWord*) _virtual_space.high();
211 _direct_allocated_words = 0;
212 NOT_PRODUCT(
213 _numObjectsPromoted = 0;
214 _numWordsPromoted = 0;
215 _numObjectsAllocated = 0;
216 _numWordsAllocated = 0;
217 )
219 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
220 use_adaptive_freelists,
221 dictionaryChoice);
222 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
223 if (_cmsSpace == NULL) {
224 vm_exit_during_initialization(
225 "CompactibleFreeListSpace allocation failure");
226 }
227 _cmsSpace->_gen = this;
229 _gc_stats = new CMSGCStats();
231 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
232 // offsets match. The ability to tell free chunks from objects
233 // depends on this property.
234 debug_only(
235 FreeChunk* junk = NULL;
236 assert(UseCompressedClassPointers ||
237 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
238 "Offset of FreeChunk::_prev within FreeChunk must match"
239 " that of OopDesc::_klass within OopDesc");
240 )
241 if (CollectedHeap::use_parallel_gc_threads()) {
242 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
243 _par_gc_thread_states =
244 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
245 if (_par_gc_thread_states == NULL) {
246 vm_exit_during_initialization("Could not allocate par gc structs");
247 }
248 for (uint i = 0; i < ParallelGCThreads; i++) {
249 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
250 if (_par_gc_thread_states[i] == NULL) {
251 vm_exit_during_initialization("Could not allocate par gc structs");
252 }
253 }
254 } else {
255 _par_gc_thread_states = NULL;
256 }
257 _incremental_collection_failed = false;
258 // The "dilatation_factor" is the expansion that can occur on
259 // account of the fact that the minimum object size in the CMS
260 // generation may be larger than that in, say, a contiguous young
261 // generation.
262 // Ideally, in the calculation below, we'd compute the dilatation
263 // factor as: MinChunkSize/(promoting_gen's min object size)
264 // Since we do not have such a general query interface for the
265 // promoting generation, we'll instead just use the mimimum
266 // object size (which today is a header's worth of space);
267 // note that all arithmetic is in units of HeapWords.
268 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
269 assert(_dilatation_factor >= 1.0, "from previous assert");
270 }
273 // The field "_initiating_occupancy" represents the occupancy percentage
274 // at which we trigger a new collection cycle. Unless explicitly specified
275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
276 // is calculated by:
277 //
278 // Let "f" be MinHeapFreeRatio in
279 //
280 // _intiating_occupancy = 100-f +
281 // f * (CMSTriggerRatio/100)
282 // where CMSTriggerRatio is the argument "tr" below.
283 //
284 // That is, if we assume the heap is at its desired maximum occupancy at the
285 // end of a collection, we let CMSTriggerRatio of the (purported) free
286 // space be allocated before initiating a new collection cycle.
287 //
288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
289 assert(io <= 100 && tr <= 100, "Check the arguments");
290 if (io >= 0) {
291 _initiating_occupancy = (double)io / 100.0;
292 } else {
293 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
294 (double)(tr * MinHeapFreeRatio) / 100.0)
295 / 100.0;
296 }
297 }
299 void ConcurrentMarkSweepGeneration::ref_processor_init() {
300 assert(collector() != NULL, "no collector");
301 collector()->ref_processor_init();
302 }
304 void CMSCollector::ref_processor_init() {
305 if (_ref_processor == NULL) {
306 // Allocate and initialize a reference processor
307 _ref_processor =
308 new ReferenceProcessor(_span, // span
309 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
310 (int) ParallelGCThreads, // mt processing degree
311 _cmsGen->refs_discovery_is_mt(), // mt discovery
312 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
313 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
314 &_is_alive_closure); // closure for liveness info
315 // Initialize the _ref_processor field of CMSGen
316 _cmsGen->set_ref_processor(_ref_processor);
318 }
319 }
321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
322 GenCollectedHeap* gch = GenCollectedHeap::heap();
323 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
324 "Wrong type of heap");
325 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
326 gch->gen_policy()->size_policy();
327 assert(sp->is_gc_cms_adaptive_size_policy(),
328 "Wrong type of size policy");
329 return sp;
330 }
332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
333 CMSGCAdaptivePolicyCounters* results =
334 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
335 assert(
336 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
337 "Wrong gc policy counter kind");
338 return results;
339 }
342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
344 const char* gen_name = "old";
346 // Generation Counters - generation 1, 1 subspace
347 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
349 _space_counters = new GSpaceCounters(gen_name, 0,
350 _virtual_space.reserved_size(),
351 this, _gen_counters);
352 }
354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
355 _cms_gen(cms_gen)
356 {
357 assert(alpha <= 100, "bad value");
358 _saved_alpha = alpha;
360 // Initialize the alphas to the bootstrap value of 100.
361 _gc0_alpha = _cms_alpha = 100;
363 _cms_begin_time.update();
364 _cms_end_time.update();
366 _gc0_duration = 0.0;
367 _gc0_period = 0.0;
368 _gc0_promoted = 0;
370 _cms_duration = 0.0;
371 _cms_period = 0.0;
372 _cms_allocated = 0;
374 _cms_used_at_gc0_begin = 0;
375 _cms_used_at_gc0_end = 0;
376 _allow_duty_cycle_reduction = false;
377 _valid_bits = 0;
378 _icms_duty_cycle = CMSIncrementalDutyCycle;
379 }
381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
382 // TBD: CR 6909490
383 return 1.0;
384 }
386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
387 }
389 // If promotion failure handling is on use
390 // the padded average size of the promotion for each
391 // young generation collection.
392 double CMSStats::time_until_cms_gen_full() const {
393 size_t cms_free = _cms_gen->cmsSpace()->free();
394 GenCollectedHeap* gch = GenCollectedHeap::heap();
395 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
396 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
397 if (cms_free > expected_promotion) {
398 // Start a cms collection if there isn't enough space to promote
399 // for the next minor collection. Use the padded average as
400 // a safety factor.
401 cms_free -= expected_promotion;
403 // Adjust by the safety factor.
404 double cms_free_dbl = (double)cms_free;
405 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
406 // Apply a further correction factor which tries to adjust
407 // for recent occurance of concurrent mode failures.
408 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
409 cms_free_dbl = cms_free_dbl * cms_adjustment;
411 if (PrintGCDetails && Verbose) {
412 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
413 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
414 cms_free, expected_promotion);
415 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
416 cms_free_dbl, cms_consumption_rate() + 1.0);
417 }
418 // Add 1 in case the consumption rate goes to zero.
419 return cms_free_dbl / (cms_consumption_rate() + 1.0);
420 }
421 return 0.0;
422 }
424 // Compare the duration of the cms collection to the
425 // time remaining before the cms generation is empty.
426 // Note that the time from the start of the cms collection
427 // to the start of the cms sweep (less than the total
428 // duration of the cms collection) can be used. This
429 // has been tried and some applications experienced
430 // promotion failures early in execution. This was
431 // possibly because the averages were not accurate
432 // enough at the beginning.
433 double CMSStats::time_until_cms_start() const {
434 // We add "gc0_period" to the "work" calculation
435 // below because this query is done (mostly) at the
436 // end of a scavenge, so we need to conservatively
437 // account for that much possible delay
438 // in the query so as to avoid concurrent mode failures
439 // due to starting the collection just a wee bit too
440 // late.
441 double work = cms_duration() + gc0_period();
442 double deadline = time_until_cms_gen_full();
443 // If a concurrent mode failure occurred recently, we want to be
444 // more conservative and halve our expected time_until_cms_gen_full()
445 if (work > deadline) {
446 if (Verbose && PrintGCDetails) {
447 gclog_or_tty->print(
448 " CMSCollector: collect because of anticipated promotion "
449 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
450 gc0_period(), time_until_cms_gen_full());
451 }
452 return 0.0;
453 }
454 return work - deadline;
455 }
457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
458 // amount of change to prevent wild oscillation.
459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
460 unsigned int new_duty_cycle) {
461 assert(old_duty_cycle <= 100, "bad input value");
462 assert(new_duty_cycle <= 100, "bad input value");
464 // Note: use subtraction with caution since it may underflow (values are
465 // unsigned). Addition is safe since we're in the range 0-100.
466 unsigned int damped_duty_cycle = new_duty_cycle;
467 if (new_duty_cycle < old_duty_cycle) {
468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
469 if (new_duty_cycle + largest_delta < old_duty_cycle) {
470 damped_duty_cycle = old_duty_cycle - largest_delta;
471 }
472 } else if (new_duty_cycle > old_duty_cycle) {
473 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
474 if (new_duty_cycle > old_duty_cycle + largest_delta) {
475 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
476 }
477 }
478 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
480 if (CMSTraceIncrementalPacing) {
481 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
482 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
483 }
484 return damped_duty_cycle;
485 }
487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
488 assert(CMSIncrementalPacing && valid(),
489 "should be handled in icms_update_duty_cycle()");
491 double cms_time_so_far = cms_timer().seconds();
492 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
493 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
495 // Avoid division by 0.
496 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
497 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
499 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
500 if (new_duty_cycle > _icms_duty_cycle) {
501 // Avoid very small duty cycles (1 or 2); 0 is allowed.
502 if (new_duty_cycle > 2) {
503 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
504 new_duty_cycle);
505 }
506 } else if (_allow_duty_cycle_reduction) {
507 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
508 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
509 // Respect the minimum duty cycle.
510 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
511 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
512 }
514 if (PrintGCDetails || CMSTraceIncrementalPacing) {
515 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
516 }
518 _allow_duty_cycle_reduction = false;
519 return _icms_duty_cycle;
520 }
522 #ifndef PRODUCT
523 void CMSStats::print_on(outputStream *st) const {
524 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
525 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
526 gc0_duration(), gc0_period(), gc0_promoted());
527 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
528 cms_duration(), cms_duration_per_mb(),
529 cms_period(), cms_allocated());
530 st->print(",cms_since_beg=%g,cms_since_end=%g",
531 cms_time_since_begin(), cms_time_since_end());
532 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
533 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
534 if (CMSIncrementalMode) {
535 st->print(",dc=%d", icms_duty_cycle());
536 }
538 if (valid()) {
539 st->print(",promo_rate=%g,cms_alloc_rate=%g",
540 promotion_rate(), cms_allocation_rate());
541 st->print(",cms_consumption_rate=%g,time_until_full=%g",
542 cms_consumption_rate(), time_until_cms_gen_full());
543 }
544 st->print(" ");
545 }
546 #endif // #ifndef PRODUCT
548 CMSCollector::CollectorState CMSCollector::_collectorState =
549 CMSCollector::Idling;
550 bool CMSCollector::_foregroundGCIsActive = false;
551 bool CMSCollector::_foregroundGCShouldWait = false;
553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
554 CardTableRS* ct,
555 ConcurrentMarkSweepPolicy* cp):
556 _cmsGen(cmsGen),
557 _ct(ct),
558 _ref_processor(NULL), // will be set later
559 _conc_workers(NULL), // may be set later
560 _abort_preclean(false),
561 _start_sampling(false),
562 _between_prologue_and_epilogue(false),
563 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
564 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
565 -1 /* lock-free */, "No_lock" /* dummy */),
566 _modUnionClosure(&_modUnionTable),
567 _modUnionClosurePar(&_modUnionTable),
568 // Adjust my span to cover old (cms) gen
569 _span(cmsGen->reserved()),
570 // Construct the is_alive_closure with _span & markBitMap
571 _is_alive_closure(_span, &_markBitMap),
572 _restart_addr(NULL),
573 _overflow_list(NULL),
574 _stats(cmsGen),
575 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
576 _eden_chunk_array(NULL), // may be set in ctor body
577 _eden_chunk_capacity(0), // -- ditto --
578 _eden_chunk_index(0), // -- ditto --
579 _survivor_plab_array(NULL), // -- ditto --
580 _survivor_chunk_array(NULL), // -- ditto --
581 _survivor_chunk_capacity(0), // -- ditto --
582 _survivor_chunk_index(0), // -- ditto --
583 _ser_pmc_preclean_ovflw(0),
584 _ser_kac_preclean_ovflw(0),
585 _ser_pmc_remark_ovflw(0),
586 _par_pmc_remark_ovflw(0),
587 _ser_kac_ovflw(0),
588 _par_kac_ovflw(0),
589 #ifndef PRODUCT
590 _num_par_pushes(0),
591 #endif
592 _collection_count_start(0),
593 _verifying(false),
594 _icms_start_limit(NULL),
595 _icms_stop_limit(NULL),
596 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
597 _completed_initialization(false),
598 _collector_policy(cp),
599 _should_unload_classes(CMSClassUnloadingEnabled),
600 _concurrent_cycles_since_last_unload(0),
601 _roots_scanning_options(SharedHeap::SO_None),
602 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
603 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
604 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
605 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
606 _cms_start_registered(false)
607 {
608 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
609 ExplicitGCInvokesConcurrent = true;
610 }
611 // Now expand the span and allocate the collection support structures
612 // (MUT, marking bit map etc.) to cover both generations subject to
613 // collection.
615 // For use by dirty card to oop closures.
616 _cmsGen->cmsSpace()->set_collector(this);
618 // Allocate MUT and marking bit map
619 {
620 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
621 if (!_markBitMap.allocate(_span)) {
622 warning("Failed to allocate CMS Bit Map");
623 return;
624 }
625 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
626 }
627 {
628 _modUnionTable.allocate(_span);
629 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
630 }
632 if (!_markStack.allocate(MarkStackSize)) {
633 warning("Failed to allocate CMS Marking Stack");
634 return;
635 }
637 // Support for multi-threaded concurrent phases
638 if (CMSConcurrentMTEnabled) {
639 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
640 // just for now
641 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
642 }
643 if (ConcGCThreads > 1) {
644 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
645 ConcGCThreads, true);
646 if (_conc_workers == NULL) {
647 warning("GC/CMS: _conc_workers allocation failure: "
648 "forcing -CMSConcurrentMTEnabled");
649 CMSConcurrentMTEnabled = false;
650 } else {
651 _conc_workers->initialize_workers();
652 }
653 } else {
654 CMSConcurrentMTEnabled = false;
655 }
656 }
657 if (!CMSConcurrentMTEnabled) {
658 ConcGCThreads = 0;
659 } else {
660 // Turn off CMSCleanOnEnter optimization temporarily for
661 // the MT case where it's not fixed yet; see 6178663.
662 CMSCleanOnEnter = false;
663 }
664 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
665 "Inconsistency");
667 // Parallel task queues; these are shared for the
668 // concurrent and stop-world phases of CMS, but
669 // are not shared with parallel scavenge (ParNew).
670 {
671 uint i;
672 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
674 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
675 || ParallelRefProcEnabled)
676 && num_queues > 0) {
677 _task_queues = new OopTaskQueueSet(num_queues);
678 if (_task_queues == NULL) {
679 warning("task_queues allocation failure.");
680 return;
681 }
682 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
683 if (_hash_seed == NULL) {
684 warning("_hash_seed array allocation failure");
685 return;
686 }
688 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
689 for (i = 0; i < num_queues; i++) {
690 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
691 if (q == NULL) {
692 warning("work_queue allocation failure.");
693 return;
694 }
695 _task_queues->register_queue(i, q);
696 }
697 for (i = 0; i < num_queues; i++) {
698 _task_queues->queue(i)->initialize();
699 _hash_seed[i] = 17; // copied from ParNew
700 }
701 }
702 }
704 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
706 // Clip CMSBootstrapOccupancy between 0 and 100.
707 _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
709 _full_gcs_since_conc_gc = 0;
711 // Now tell CMS generations the identity of their collector
712 ConcurrentMarkSweepGeneration::set_collector(this);
714 // Create & start a CMS thread for this CMS collector
715 _cmsThread = ConcurrentMarkSweepThread::start(this);
716 assert(cmsThread() != NULL, "CMS Thread should have been created");
717 assert(cmsThread()->collector() == this,
718 "CMS Thread should refer to this gen");
719 assert(CGC_lock != NULL, "Where's the CGC_lock?");
721 // Support for parallelizing young gen rescan
722 GenCollectedHeap* gch = GenCollectedHeap::heap();
723 _young_gen = gch->prev_gen(_cmsGen);
724 if (gch->supports_inline_contig_alloc()) {
725 _top_addr = gch->top_addr();
726 _end_addr = gch->end_addr();
727 assert(_young_gen != NULL, "no _young_gen");
728 _eden_chunk_index = 0;
729 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
730 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
731 if (_eden_chunk_array == NULL) {
732 _eden_chunk_capacity = 0;
733 warning("GC/CMS: _eden_chunk_array allocation failure");
734 }
735 }
736 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
738 // Support for parallelizing survivor space rescan
739 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
740 const size_t max_plab_samples =
741 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
743 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
744 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
745 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
746 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
747 || _cursor == NULL) {
748 warning("Failed to allocate survivor plab/chunk array");
749 if (_survivor_plab_array != NULL) {
750 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
751 _survivor_plab_array = NULL;
752 }
753 if (_survivor_chunk_array != NULL) {
754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
755 _survivor_chunk_array = NULL;
756 }
757 if (_cursor != NULL) {
758 FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
759 _cursor = NULL;
760 }
761 } else {
762 _survivor_chunk_capacity = 2*max_plab_samples;
763 for (uint i = 0; i < ParallelGCThreads; i++) {
764 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
765 if (vec == NULL) {
766 warning("Failed to allocate survivor plab array");
767 for (int j = i; j > 0; j--) {
768 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
769 }
770 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
771 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
772 _survivor_plab_array = NULL;
773 _survivor_chunk_array = NULL;
774 _survivor_chunk_capacity = 0;
775 break;
776 } else {
777 ChunkArray* cur =
778 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
779 max_plab_samples);
780 assert(cur->end() == 0, "Should be 0");
781 assert(cur->array() == vec, "Should be vec");
782 assert(cur->capacity() == max_plab_samples, "Error");
783 }
784 }
785 }
786 }
787 assert( ( _survivor_plab_array != NULL
788 && _survivor_chunk_array != NULL)
789 || ( _survivor_chunk_capacity == 0
790 && _survivor_chunk_index == 0),
791 "Error");
793 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
794 _gc_counters = new CollectorCounters("CMS", 1);
795 _completed_initialization = true;
796 _inter_sweep_timer.start(); // start of time
797 }
799 const char* ConcurrentMarkSweepGeneration::name() const {
800 return "concurrent mark-sweep generation";
801 }
802 void ConcurrentMarkSweepGeneration::update_counters() {
803 if (UsePerfData) {
804 _space_counters->update_all();
805 _gen_counters->update_all();
806 }
807 }
809 // this is an optimized version of update_counters(). it takes the
810 // used value as a parameter rather than computing it.
811 //
812 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
813 if (UsePerfData) {
814 _space_counters->update_used(used);
815 _space_counters->update_capacity();
816 _gen_counters->update_all();
817 }
818 }
820 void ConcurrentMarkSweepGeneration::print() const {
821 Generation::print();
822 cmsSpace()->print();
823 }
825 #ifndef PRODUCT
826 void ConcurrentMarkSweepGeneration::print_statistics() {
827 cmsSpace()->printFLCensus(0);
828 }
829 #endif
831 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
832 GenCollectedHeap* gch = GenCollectedHeap::heap();
833 if (PrintGCDetails) {
834 if (Verbose) {
835 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
836 level(), short_name(), s, used(), capacity());
837 } else {
838 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
839 level(), short_name(), s, used() / K, capacity() / K);
840 }
841 }
842 if (Verbose) {
843 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
844 gch->used(), gch->capacity());
845 } else {
846 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
847 gch->used() / K, gch->capacity() / K);
848 }
849 }
851 size_t
852 ConcurrentMarkSweepGeneration::contiguous_available() const {
853 // dld proposes an improvement in precision here. If the committed
854 // part of the space ends in a free block we should add that to
855 // uncommitted size in the calculation below. Will make this
856 // change later, staying with the approximation below for the
857 // time being. -- ysr.
858 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
859 }
861 size_t
862 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
863 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
864 }
866 size_t ConcurrentMarkSweepGeneration::max_available() const {
867 return free() + _virtual_space.uncommitted_size();
868 }
870 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
871 size_t available = max_available();
872 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
873 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
874 if (Verbose && PrintGCDetails) {
875 gclog_or_tty->print_cr(
876 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
877 "max_promo("SIZE_FORMAT")",
878 res? "":" not", available, res? ">=":"<",
879 av_promo, max_promotion_in_bytes);
880 }
881 return res;
882 }
884 // At a promotion failure dump information on block layout in heap
885 // (cms old generation).
886 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
887 if (CMSDumpAtPromotionFailure) {
888 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
889 }
890 }
892 CompactibleSpace*
893 ConcurrentMarkSweepGeneration::first_compaction_space() const {
894 return _cmsSpace;
895 }
897 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
898 // Clear the promotion information. These pointers can be adjusted
899 // along with all the other pointers into the heap but
900 // compaction is expected to be a rare event with
901 // a heap using cms so don't do it without seeing the need.
902 if (CollectedHeap::use_parallel_gc_threads()) {
903 for (uint i = 0; i < ParallelGCThreads; i++) {
904 _par_gc_thread_states[i]->promo.reset();
905 }
906 }
907 }
909 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
910 blk->do_space(_cmsSpace);
911 }
913 void ConcurrentMarkSweepGeneration::compute_new_size() {
914 assert_locked_or_safepoint(Heap_lock);
916 // If incremental collection failed, we just want to expand
917 // to the limit.
918 if (incremental_collection_failed()) {
919 clear_incremental_collection_failed();
920 grow_to_reserved();
921 return;
922 }
924 // The heap has been compacted but not reset yet.
925 // Any metric such as free() or used() will be incorrect.
927 CardGeneration::compute_new_size();
929 // Reset again after a possible resizing
930 if (did_compact()) {
931 cmsSpace()->reset_after_compaction();
932 }
933 }
935 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
936 assert_locked_or_safepoint(Heap_lock);
938 // If incremental collection failed, we just want to expand
939 // to the limit.
940 if (incremental_collection_failed()) {
941 clear_incremental_collection_failed();
942 grow_to_reserved();
943 return;
944 }
946 double free_percentage = ((double) free()) / capacity();
947 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
948 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
950 // compute expansion delta needed for reaching desired free percentage
951 if (free_percentage < desired_free_percentage) {
952 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
953 assert(desired_capacity >= capacity(), "invalid expansion size");
954 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
955 if (PrintGCDetails && Verbose) {
956 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
957 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
958 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
959 gclog_or_tty->print_cr(" Desired free fraction %f",
960 desired_free_percentage);
961 gclog_or_tty->print_cr(" Maximum free fraction %f",
962 maximum_free_percentage);
963 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
964 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
965 desired_capacity/1000);
966 int prev_level = level() - 1;
967 if (prev_level >= 0) {
968 size_t prev_size = 0;
969 GenCollectedHeap* gch = GenCollectedHeap::heap();
970 Generation* prev_gen = gch->_gens[prev_level];
971 prev_size = prev_gen->capacity();
972 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
973 prev_size/1000);
974 }
975 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
976 unsafe_max_alloc_nogc()/1000);
977 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
978 contiguous_available()/1000);
979 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
980 expand_bytes);
981 }
982 // safe if expansion fails
983 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
984 if (PrintGCDetails && Verbose) {
985 gclog_or_tty->print_cr(" Expanded free fraction %f",
986 ((double) free()) / capacity());
987 }
988 } else {
989 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
990 assert(desired_capacity <= capacity(), "invalid expansion size");
991 size_t shrink_bytes = capacity() - desired_capacity;
992 // Don't shrink unless the delta is greater than the minimum shrink we want
993 if (shrink_bytes >= MinHeapDeltaBytes) {
994 shrink_free_list_by(shrink_bytes);
995 }
996 }
997 }
999 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1000 return cmsSpace()->freelistLock();
1001 }
1003 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1004 bool tlab) {
1005 CMSSynchronousYieldRequest yr;
1006 MutexLockerEx x(freelistLock(),
1007 Mutex::_no_safepoint_check_flag);
1008 return have_lock_and_allocate(size, tlab);
1009 }
1011 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1012 bool tlab /* ignored */) {
1013 assert_lock_strong(freelistLock());
1014 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1015 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1016 // Allocate the object live (grey) if the background collector has
1017 // started marking. This is necessary because the marker may
1018 // have passed this address and consequently this object will
1019 // not otherwise be greyed and would be incorrectly swept up.
1020 // Note that if this object contains references, the writing
1021 // of those references will dirty the card containing this object
1022 // allowing the object to be blackened (and its references scanned)
1023 // either during a preclean phase or at the final checkpoint.
1024 if (res != NULL) {
1025 // We may block here with an uninitialized object with
1026 // its mark-bit or P-bits not yet set. Such objects need
1027 // to be safely navigable by block_start().
1028 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1029 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1030 collector()->direct_allocated(res, adjustedSize);
1031 _direct_allocated_words += adjustedSize;
1032 // allocation counters
1033 NOT_PRODUCT(
1034 _numObjectsAllocated++;
1035 _numWordsAllocated += (int)adjustedSize;
1036 )
1037 }
1038 return res;
1039 }
1041 // In the case of direct allocation by mutators in a generation that
1042 // is being concurrently collected, the object must be allocated
1043 // live (grey) if the background collector has started marking.
1044 // This is necessary because the marker may
1045 // have passed this address and consequently this object will
1046 // not otherwise be greyed and would be incorrectly swept up.
1047 // Note that if this object contains references, the writing
1048 // of those references will dirty the card containing this object
1049 // allowing the object to be blackened (and its references scanned)
1050 // either during a preclean phase or at the final checkpoint.
1051 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1052 assert(_markBitMap.covers(start, size), "Out of bounds");
1053 if (_collectorState >= Marking) {
1054 MutexLockerEx y(_markBitMap.lock(),
1055 Mutex::_no_safepoint_check_flag);
1056 // [see comments preceding SweepClosure::do_blk() below for details]
1057 //
1058 // Can the P-bits be deleted now? JJJ
1059 //
1060 // 1. need to mark the object as live so it isn't collected
1061 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1062 // 3. need to mark the end of the object so marking, precleaning or sweeping
1063 // can skip over uninitialized or unparsable objects. An allocated
1064 // object is considered uninitialized for our purposes as long as
1065 // its klass word is NULL. All old gen objects are parsable
1066 // as soon as they are initialized.)
1067 _markBitMap.mark(start); // object is live
1068 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1069 _markBitMap.mark(start + size - 1);
1070 // mark end of object
1071 }
1072 // check that oop looks uninitialized
1073 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1074 }
1076 void CMSCollector::promoted(bool par, HeapWord* start,
1077 bool is_obj_array, size_t obj_size) {
1078 assert(_markBitMap.covers(start), "Out of bounds");
1079 // See comment in direct_allocated() about when objects should
1080 // be allocated live.
1081 if (_collectorState >= Marking) {
1082 // we already hold the marking bit map lock, taken in
1083 // the prologue
1084 if (par) {
1085 _markBitMap.par_mark(start);
1086 } else {
1087 _markBitMap.mark(start);
1088 }
1089 // We don't need to mark the object as uninitialized (as
1090 // in direct_allocated above) because this is being done with the
1091 // world stopped and the object will be initialized by the
1092 // time the marking, precleaning or sweeping get to look at it.
1093 // But see the code for copying objects into the CMS generation,
1094 // where we need to ensure that concurrent readers of the
1095 // block offset table are able to safely navigate a block that
1096 // is in flux from being free to being allocated (and in
1097 // transition while being copied into) and subsequently
1098 // becoming a bona-fide object when the copy/promotion is complete.
1099 assert(SafepointSynchronize::is_at_safepoint(),
1100 "expect promotion only at safepoints");
1102 if (_collectorState < Sweeping) {
1103 // Mark the appropriate cards in the modUnionTable, so that
1104 // this object gets scanned before the sweep. If this is
1105 // not done, CMS generation references in the object might
1106 // not get marked.
1107 // For the case of arrays, which are otherwise precisely
1108 // marked, we need to dirty the entire array, not just its head.
1109 if (is_obj_array) {
1110 // The [par_]mark_range() method expects mr.end() below to
1111 // be aligned to the granularity of a bit's representation
1112 // in the heap. In the case of the MUT below, that's a
1113 // card size.
1114 MemRegion mr(start,
1115 (HeapWord*)round_to((intptr_t)(start + obj_size),
1116 CardTableModRefBS::card_size /* bytes */));
1117 if (par) {
1118 _modUnionTable.par_mark_range(mr);
1119 } else {
1120 _modUnionTable.mark_range(mr);
1121 }
1122 } else { // not an obj array; we can just mark the head
1123 if (par) {
1124 _modUnionTable.par_mark(start);
1125 } else {
1126 _modUnionTable.mark(start);
1127 }
1128 }
1129 }
1130 }
1131 }
1133 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1134 {
1135 size_t delta = pointer_delta(addr, space->bottom());
1136 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1137 }
1139 void CMSCollector::icms_update_allocation_limits()
1140 {
1141 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1142 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1144 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1145 if (CMSTraceIncrementalPacing) {
1146 stats().print();
1147 }
1149 assert(duty_cycle <= 100, "invalid duty cycle");
1150 if (duty_cycle != 0) {
1151 // The duty_cycle is a percentage between 0 and 100; convert to words and
1152 // then compute the offset from the endpoints of the space.
1153 size_t free_words = eden->free() / HeapWordSize;
1154 double free_words_dbl = (double)free_words;
1155 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1156 size_t offset_words = (free_words - duty_cycle_words) / 2;
1158 _icms_start_limit = eden->top() + offset_words;
1159 _icms_stop_limit = eden->end() - offset_words;
1161 // The limits may be adjusted (shifted to the right) by
1162 // CMSIncrementalOffset, to allow the application more mutator time after a
1163 // young gen gc (when all mutators were stopped) and before CMS starts and
1164 // takes away one or more cpus.
1165 if (CMSIncrementalOffset != 0) {
1166 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1167 size_t adjustment = (size_t)adjustment_dbl;
1168 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1169 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1170 _icms_start_limit += adjustment;
1171 _icms_stop_limit = tmp_stop;
1172 }
1173 }
1174 }
1175 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1176 _icms_start_limit = _icms_stop_limit = eden->end();
1177 }
1179 // Install the new start limit.
1180 eden->set_soft_end(_icms_start_limit);
1182 if (CMSTraceIncrementalMode) {
1183 gclog_or_tty->print(" icms alloc limits: "
1184 PTR_FORMAT "," PTR_FORMAT
1185 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1186 p2i(_icms_start_limit), p2i(_icms_stop_limit),
1187 percent_of_space(eden, _icms_start_limit),
1188 percent_of_space(eden, _icms_stop_limit));
1189 if (Verbose) {
1190 gclog_or_tty->print("eden: ");
1191 eden->print_on(gclog_or_tty);
1192 }
1193 }
1194 }
1196 // Any changes here should try to maintain the invariant
1197 // that if this method is called with _icms_start_limit
1198 // and _icms_stop_limit both NULL, then it should return NULL
1199 // and not notify the icms thread.
1200 HeapWord*
1201 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1202 size_t word_size)
1203 {
1204 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1205 // nop.
1206 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1207 if (top <= _icms_start_limit) {
1208 if (CMSTraceIncrementalMode) {
1209 space->print_on(gclog_or_tty);
1210 gclog_or_tty->stamp();
1211 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1212 ", new limit=" PTR_FORMAT
1213 " (" SIZE_FORMAT "%%)",
1214 p2i(top), p2i(_icms_stop_limit),
1215 percent_of_space(space, _icms_stop_limit));
1216 }
1217 ConcurrentMarkSweepThread::start_icms();
1218 assert(top < _icms_stop_limit, "Tautology");
1219 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1220 return _icms_stop_limit;
1221 }
1223 // The allocation will cross both the _start and _stop limits, so do the
1224 // stop notification also and return end().
1225 if (CMSTraceIncrementalMode) {
1226 space->print_on(gclog_or_tty);
1227 gclog_or_tty->stamp();
1228 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1229 ", new limit=" PTR_FORMAT
1230 " (" SIZE_FORMAT "%%)",
1231 p2i(top), p2i(space->end()),
1232 percent_of_space(space, space->end()));
1233 }
1234 ConcurrentMarkSweepThread::stop_icms();
1235 return space->end();
1236 }
1238 if (top <= _icms_stop_limit) {
1239 if (CMSTraceIncrementalMode) {
1240 space->print_on(gclog_or_tty);
1241 gclog_or_tty->stamp();
1242 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1243 ", new limit=" PTR_FORMAT
1244 " (" SIZE_FORMAT "%%)",
1245 top, space->end(),
1246 percent_of_space(space, space->end()));
1247 }
1248 ConcurrentMarkSweepThread::stop_icms();
1249 return space->end();
1250 }
1252 if (CMSTraceIncrementalMode) {
1253 space->print_on(gclog_or_tty);
1254 gclog_or_tty->stamp();
1255 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1256 ", new limit=" PTR_FORMAT,
1257 top, NULL);
1258 }
1259 }
1261 return NULL;
1262 }
1264 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1265 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1266 // allocate, copy and if necessary update promoinfo --
1267 // delegate to underlying space.
1268 assert_lock_strong(freelistLock());
1270 #ifndef PRODUCT
1271 if (Universe::heap()->promotion_should_fail()) {
1272 return NULL;
1273 }
1274 #endif // #ifndef PRODUCT
1276 oop res = _cmsSpace->promote(obj, obj_size);
1277 if (res == NULL) {
1278 // expand and retry
1279 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1280 expand(s*HeapWordSize, MinHeapDeltaBytes,
1281 CMSExpansionCause::_satisfy_promotion);
1282 // Since there's currently no next generation, we don't try to promote
1283 // into a more senior generation.
1284 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1285 "is made to pass on a possibly failing "
1286 "promotion to next generation");
1287 res = _cmsSpace->promote(obj, obj_size);
1288 }
1289 if (res != NULL) {
1290 // See comment in allocate() about when objects should
1291 // be allocated live.
1292 assert(obj->is_oop(), "Will dereference klass pointer below");
1293 collector()->promoted(false, // Not parallel
1294 (HeapWord*)res, obj->is_objArray(), obj_size);
1295 // promotion counters
1296 NOT_PRODUCT(
1297 _numObjectsPromoted++;
1298 _numWordsPromoted +=
1299 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1300 )
1301 }
1302 return res;
1303 }
1306 HeapWord*
1307 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1308 HeapWord* top,
1309 size_t word_sz)
1310 {
1311 return collector()->allocation_limit_reached(space, top, word_sz);
1312 }
1314 // IMPORTANT: Notes on object size recognition in CMS.
1315 // ---------------------------------------------------
1316 // A block of storage in the CMS generation is always in
1317 // one of three states. A free block (FREE), an allocated
1318 // object (OBJECT) whose size() method reports the correct size,
1319 // and an intermediate state (TRANSIENT) in which its size cannot
1320 // be accurately determined.
1321 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1322 // -----------------------------------------------------
1323 // FREE: klass_word & 1 == 1; mark_word holds block size
1324 //
1325 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1326 // obj->size() computes correct size
1327 //
1328 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1329 //
1330 // STATE IDENTIFICATION: (64 bit+COOPS)
1331 // ------------------------------------
1332 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1333 //
1334 // OBJECT: klass_word installed; klass_word != 0;
1335 // obj->size() computes correct size
1336 //
1337 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1338 //
1339 //
1340 // STATE TRANSITION DIAGRAM
1341 //
1342 // mut / parnew mut / parnew
1343 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1344 // ^ |
1345 // |------------------------ DEAD <------------------------------------|
1346 // sweep mut
1347 //
1348 // While a block is in TRANSIENT state its size cannot be determined
1349 // so readers will either need to come back later or stall until
1350 // the size can be determined. Note that for the case of direct
1351 // allocation, P-bits, when available, may be used to determine the
1352 // size of an object that may not yet have been initialized.
1354 // Things to support parallel young-gen collection.
1355 oop
1356 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1357 oop old, markOop m,
1358 size_t word_sz) {
1359 #ifndef PRODUCT
1360 if (Universe::heap()->promotion_should_fail()) {
1361 return NULL;
1362 }
1363 #endif // #ifndef PRODUCT
1365 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1366 PromotionInfo* promoInfo = &ps->promo;
1367 // if we are tracking promotions, then first ensure space for
1368 // promotion (including spooling space for saving header if necessary).
1369 // then allocate and copy, then track promoted info if needed.
1370 // When tracking (see PromotionInfo::track()), the mark word may
1371 // be displaced and in this case restoration of the mark word
1372 // occurs in the (oop_since_save_marks_)iterate phase.
1373 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1374 // Out of space for allocating spooling buffers;
1375 // try expanding and allocating spooling buffers.
1376 if (!expand_and_ensure_spooling_space(promoInfo)) {
1377 return NULL;
1378 }
1379 }
1380 assert(promoInfo->has_spooling_space(), "Control point invariant");
1381 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1382 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1383 if (obj_ptr == NULL) {
1384 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1385 if (obj_ptr == NULL) {
1386 return NULL;
1387 }
1388 }
1389 oop obj = oop(obj_ptr);
1390 OrderAccess::storestore();
1391 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1392 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1393 // IMPORTANT: See note on object initialization for CMS above.
1394 // Otherwise, copy the object. Here we must be careful to insert the
1395 // klass pointer last, since this marks the block as an allocated object.
1396 // Except with compressed oops it's the mark word.
1397 HeapWord* old_ptr = (HeapWord*)old;
1398 // Restore the mark word copied above.
1399 obj->set_mark(m);
1400 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1401 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1402 OrderAccess::storestore();
1404 if (UseCompressedClassPointers) {
1405 // Copy gap missed by (aligned) header size calculation below
1406 obj->set_klass_gap(old->klass_gap());
1407 }
1408 if (word_sz > (size_t)oopDesc::header_size()) {
1409 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1410 obj_ptr + oopDesc::header_size(),
1411 word_sz - oopDesc::header_size());
1412 }
1414 // Now we can track the promoted object, if necessary. We take care
1415 // to delay the transition from uninitialized to full object
1416 // (i.e., insertion of klass pointer) until after, so that it
1417 // atomically becomes a promoted object.
1418 if (promoInfo->tracking()) {
1419 promoInfo->track((PromotedObject*)obj, old->klass());
1420 }
1421 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1422 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1423 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1425 // Finally, install the klass pointer (this should be volatile).
1426 OrderAccess::storestore();
1427 obj->set_klass(old->klass());
1428 // We should now be able to calculate the right size for this object
1429 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1431 collector()->promoted(true, // parallel
1432 obj_ptr, old->is_objArray(), word_sz);
1434 NOT_PRODUCT(
1435 Atomic::inc_ptr(&_numObjectsPromoted);
1436 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1437 )
1439 return obj;
1440 }
1442 void
1443 ConcurrentMarkSweepGeneration::
1444 par_promote_alloc_undo(int thread_num,
1445 HeapWord* obj, size_t word_sz) {
1446 // CMS does not support promotion undo.
1447 ShouldNotReachHere();
1448 }
1450 void
1451 ConcurrentMarkSweepGeneration::
1452 par_promote_alloc_done(int thread_num) {
1453 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1454 ps->lab.retire(thread_num);
1455 }
1457 void
1458 ConcurrentMarkSweepGeneration::
1459 par_oop_since_save_marks_iterate_done(int thread_num) {
1460 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1461 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1462 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1463 }
1465 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1466 size_t size,
1467 bool tlab)
1468 {
1469 // We allow a STW collection only if a full
1470 // collection was requested.
1471 return full || should_allocate(size, tlab); // FIX ME !!!
1472 // This and promotion failure handling are connected at the
1473 // hip and should be fixed by untying them.
1474 }
1476 bool CMSCollector::shouldConcurrentCollect() {
1477 if (_full_gc_requested) {
1478 if (Verbose && PrintGCDetails) {
1479 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1480 " gc request (or gc_locker)");
1481 }
1482 return true;
1483 }
1485 // For debugging purposes, change the type of collection.
1486 // If the rotation is not on the concurrent collection
1487 // type, don't start a concurrent collection.
1488 NOT_PRODUCT(
1489 if (RotateCMSCollectionTypes &&
1490 (_cmsGen->debug_collection_type() !=
1491 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1492 assert(_cmsGen->debug_collection_type() !=
1493 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1494 "Bad cms collection type");
1495 return false;
1496 }
1497 )
1499 FreelistLocker x(this);
1500 // ------------------------------------------------------------------
1501 // Print out lots of information which affects the initiation of
1502 // a collection.
1503 if (PrintCMSInitiationStatistics && stats().valid()) {
1504 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1505 gclog_or_tty->stamp();
1506 gclog_or_tty->cr();
1507 stats().print_on(gclog_or_tty);
1508 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1509 stats().time_until_cms_gen_full());
1510 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1511 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1512 _cmsGen->contiguous_available());
1513 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1514 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1515 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1516 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1517 gclog_or_tty->print_cr("metadata initialized %d",
1518 MetaspaceGC::should_concurrent_collect());
1519 }
1520 // ------------------------------------------------------------------
1522 // If the estimated time to complete a cms collection (cms_duration())
1523 // is less than the estimated time remaining until the cms generation
1524 // is full, start a collection.
1525 if (!UseCMSInitiatingOccupancyOnly) {
1526 if (stats().valid()) {
1527 if (stats().time_until_cms_start() == 0.0) {
1528 return true;
1529 }
1530 } else {
1531 // We want to conservatively collect somewhat early in order
1532 // to try and "bootstrap" our CMS/promotion statistics;
1533 // this branch will not fire after the first successful CMS
1534 // collection because the stats should then be valid.
1535 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1536 if (Verbose && PrintGCDetails) {
1537 gclog_or_tty->print_cr(
1538 " CMSCollector: collect for bootstrapping statistics:"
1539 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1540 _bootstrap_occupancy);
1541 }
1542 return true;
1543 }
1544 }
1545 }
1547 // Otherwise, we start a collection cycle if
1548 // old gen want a collection cycle started. Each may use
1549 // an appropriate criterion for making this decision.
1550 // XXX We need to make sure that the gen expansion
1551 // criterion dovetails well with this. XXX NEED TO FIX THIS
1552 if (_cmsGen->should_concurrent_collect()) {
1553 if (Verbose && PrintGCDetails) {
1554 gclog_or_tty->print_cr("CMS old gen initiated");
1555 }
1556 return true;
1557 }
1559 // We start a collection if we believe an incremental collection may fail;
1560 // this is not likely to be productive in practice because it's probably too
1561 // late anyway.
1562 GenCollectedHeap* gch = GenCollectedHeap::heap();
1563 assert(gch->collector_policy()->is_two_generation_policy(),
1564 "You may want to check the correctness of the following");
1565 if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1566 if (Verbose && PrintGCDetails) {
1567 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1568 }
1569 return true;
1570 }
1572 if (MetaspaceGC::should_concurrent_collect()) {
1573 if (Verbose && PrintGCDetails) {
1574 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1575 }
1576 return true;
1577 }
1579 return false;
1580 }
1582 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1584 // Clear _expansion_cause fields of constituent generations
1585 void CMSCollector::clear_expansion_cause() {
1586 _cmsGen->clear_expansion_cause();
1587 }
1589 // We should be conservative in starting a collection cycle. To
1590 // start too eagerly runs the risk of collecting too often in the
1591 // extreme. To collect too rarely falls back on full collections,
1592 // which works, even if not optimum in terms of concurrent work.
1593 // As a work around for too eagerly collecting, use the flag
1594 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1595 // giving the user an easily understandable way of controlling the
1596 // collections.
1597 // We want to start a new collection cycle if any of the following
1598 // conditions hold:
1599 // . our current occupancy exceeds the configured initiating occupancy
1600 // for this generation, or
1601 // . we recently needed to expand this space and have not, since that
1602 // expansion, done a collection of this generation, or
1603 // . the underlying space believes that it may be a good idea to initiate
1604 // a concurrent collection (this may be based on criteria such as the
1605 // following: the space uses linear allocation and linear allocation is
1606 // going to fail, or there is believed to be excessive fragmentation in
1607 // the generation, etc... or ...
1608 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1609 // the case of the old generation; see CR 6543076):
1610 // we may be approaching a point at which allocation requests may fail because
1611 // we will be out of sufficient free space given allocation rate estimates.]
1612 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1614 assert_lock_strong(freelistLock());
1615 if (occupancy() > initiating_occupancy()) {
1616 if (PrintGCDetails && Verbose) {
1617 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1618 short_name(), occupancy(), initiating_occupancy());
1619 }
1620 return true;
1621 }
1622 if (UseCMSInitiatingOccupancyOnly) {
1623 return false;
1624 }
1625 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1626 if (PrintGCDetails && Verbose) {
1627 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1628 short_name());
1629 }
1630 return true;
1631 }
1632 if (_cmsSpace->should_concurrent_collect()) {
1633 if (PrintGCDetails && Verbose) {
1634 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1635 short_name());
1636 }
1637 return true;
1638 }
1639 return false;
1640 }
1642 void ConcurrentMarkSweepGeneration::collect(bool full,
1643 bool clear_all_soft_refs,
1644 size_t size,
1645 bool tlab)
1646 {
1647 collector()->collect(full, clear_all_soft_refs, size, tlab);
1648 }
1650 void CMSCollector::collect(bool full,
1651 bool clear_all_soft_refs,
1652 size_t size,
1653 bool tlab)
1654 {
1655 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1656 // For debugging purposes skip the collection if the state
1657 // is not currently idle
1658 if (TraceCMSState) {
1659 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1660 Thread::current(), full, _collectorState);
1661 }
1662 return;
1663 }
1665 // The following "if" branch is present for defensive reasons.
1666 // In the current uses of this interface, it can be replaced with:
1667 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1668 // But I am not placing that assert here to allow future
1669 // generality in invoking this interface.
1670 if (GC_locker::is_active()) {
1671 // A consistency test for GC_locker
1672 assert(GC_locker::needs_gc(), "Should have been set already");
1673 // Skip this foreground collection, instead
1674 // expanding the heap if necessary.
1675 // Need the free list locks for the call to free() in compute_new_size()
1676 compute_new_size();
1677 return;
1678 }
1679 acquire_control_and_collect(full, clear_all_soft_refs);
1680 _full_gcs_since_conc_gc++;
1681 }
1683 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1684 GenCollectedHeap* gch = GenCollectedHeap::heap();
1685 unsigned int gc_count = gch->total_full_collections();
1686 if (gc_count == full_gc_count) {
1687 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1688 _full_gc_requested = true;
1689 _full_gc_cause = cause;
1690 CGC_lock->notify(); // nudge CMS thread
1691 } else {
1692 assert(gc_count > full_gc_count, "Error: causal loop");
1693 }
1694 }
1696 bool CMSCollector::is_external_interruption() {
1697 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1698 return GCCause::is_user_requested_gc(cause) ||
1699 GCCause::is_serviceability_requested_gc(cause);
1700 }
1702 void CMSCollector::report_concurrent_mode_interruption() {
1703 if (is_external_interruption()) {
1704 if (PrintGCDetails) {
1705 gclog_or_tty->print(" (concurrent mode interrupted)");
1706 }
1707 } else {
1708 if (PrintGCDetails) {
1709 gclog_or_tty->print(" (concurrent mode failure)");
1710 }
1711 _gc_tracer_cm->report_concurrent_mode_failure();
1712 }
1713 }
1716 // The foreground and background collectors need to coordinate in order
1717 // to make sure that they do not mutually interfere with CMS collections.
1718 // When a background collection is active,
1719 // the foreground collector may need to take over (preempt) and
1720 // synchronously complete an ongoing collection. Depending on the
1721 // frequency of the background collections and the heap usage
1722 // of the application, this preemption can be seldom or frequent.
1723 // There are only certain
1724 // points in the background collection that the "collection-baton"
1725 // can be passed to the foreground collector.
1726 //
1727 // The foreground collector will wait for the baton before
1728 // starting any part of the collection. The foreground collector
1729 // will only wait at one location.
1730 //
1731 // The background collector will yield the baton before starting a new
1732 // phase of the collection (e.g., before initial marking, marking from roots,
1733 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1734 // of the loop which switches the phases. The background collector does some
1735 // of the phases (initial mark, final re-mark) with the world stopped.
1736 // Because of locking involved in stopping the world,
1737 // the foreground collector should not block waiting for the background
1738 // collector when it is doing a stop-the-world phase. The background
1739 // collector will yield the baton at an additional point just before
1740 // it enters a stop-the-world phase. Once the world is stopped, the
1741 // background collector checks the phase of the collection. If the
1742 // phase has not changed, it proceeds with the collection. If the
1743 // phase has changed, it skips that phase of the collection. See
1744 // the comments on the use of the Heap_lock in collect_in_background().
1745 //
1746 // Variable used in baton passing.
1747 // _foregroundGCIsActive - Set to true by the foreground collector when
1748 // it wants the baton. The foreground clears it when it has finished
1749 // the collection.
1750 // _foregroundGCShouldWait - Set to true by the background collector
1751 // when it is running. The foreground collector waits while
1752 // _foregroundGCShouldWait is true.
1753 // CGC_lock - monitor used to protect access to the above variables
1754 // and to notify the foreground and background collectors.
1755 // _collectorState - current state of the CMS collection.
1756 //
1757 // The foreground collector
1758 // acquires the CGC_lock
1759 // sets _foregroundGCIsActive
1760 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1761 // various locks acquired in preparation for the collection
1762 // are released so as not to block the background collector
1763 // that is in the midst of a collection
1764 // proceeds with the collection
1765 // clears _foregroundGCIsActive
1766 // returns
1767 //
1768 // The background collector in a loop iterating on the phases of the
1769 // collection
1770 // acquires the CGC_lock
1771 // sets _foregroundGCShouldWait
1772 // if _foregroundGCIsActive is set
1773 // clears _foregroundGCShouldWait, notifies _CGC_lock
1774 // waits on _CGC_lock for _foregroundGCIsActive to become false
1775 // and exits the loop.
1776 // otherwise
1777 // proceed with that phase of the collection
1778 // if the phase is a stop-the-world phase,
1779 // yield the baton once more just before enqueueing
1780 // the stop-world CMS operation (executed by the VM thread).
1781 // returns after all phases of the collection are done
1782 //
1784 void CMSCollector::acquire_control_and_collect(bool full,
1785 bool clear_all_soft_refs) {
1786 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1787 assert(!Thread::current()->is_ConcurrentGC_thread(),
1788 "shouldn't try to acquire control from self!");
1790 // Start the protocol for acquiring control of the
1791 // collection from the background collector (aka CMS thread).
1792 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1793 "VM thread should have CMS token");
1794 // Remember the possibly interrupted state of an ongoing
1795 // concurrent collection
1796 CollectorState first_state = _collectorState;
1798 // Signal to a possibly ongoing concurrent collection that
1799 // we want to do a foreground collection.
1800 _foregroundGCIsActive = true;
1802 // Disable incremental mode during a foreground collection.
1803 ICMSDisabler icms_disabler;
1805 // release locks and wait for a notify from the background collector
1806 // releasing the locks in only necessary for phases which
1807 // do yields to improve the granularity of the collection.
1808 assert_lock_strong(bitMapLock());
1809 // We need to lock the Free list lock for the space that we are
1810 // currently collecting.
1811 assert(haveFreelistLocks(), "Must be holding free list locks");
1812 bitMapLock()->unlock();
1813 releaseFreelistLocks();
1814 {
1815 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1816 if (_foregroundGCShouldWait) {
1817 // We are going to be waiting for action for the CMS thread;
1818 // it had better not be gone (for instance at shutdown)!
1819 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1820 "CMS thread must be running");
1821 // Wait here until the background collector gives us the go-ahead
1822 ConcurrentMarkSweepThread::clear_CMS_flag(
1823 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1824 // Get a possibly blocked CMS thread going:
1825 // Note that we set _foregroundGCIsActive true above,
1826 // without protection of the CGC_lock.
1827 CGC_lock->notify();
1828 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1829 "Possible deadlock");
1830 while (_foregroundGCShouldWait) {
1831 // wait for notification
1832 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1833 // Possibility of delay/starvation here, since CMS token does
1834 // not know to give priority to VM thread? Actually, i think
1835 // there wouldn't be any delay/starvation, but the proof of
1836 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1837 }
1838 ConcurrentMarkSweepThread::set_CMS_flag(
1839 ConcurrentMarkSweepThread::CMS_vm_has_token);
1840 }
1841 }
1842 // The CMS_token is already held. Get back the other locks.
1843 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1844 "VM thread should have CMS token");
1845 getFreelistLocks();
1846 bitMapLock()->lock_without_safepoint_check();
1847 if (TraceCMSState) {
1848 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1849 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1850 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1851 }
1853 // Check if we need to do a compaction, or if not, whether
1854 // we need to start the mark-sweep from scratch.
1855 bool should_compact = false;
1856 bool should_start_over = false;
1857 decide_foreground_collection_type(clear_all_soft_refs,
1858 &should_compact, &should_start_over);
1860 NOT_PRODUCT(
1861 if (RotateCMSCollectionTypes) {
1862 if (_cmsGen->debug_collection_type() ==
1863 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1864 should_compact = true;
1865 } else if (_cmsGen->debug_collection_type() ==
1866 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1867 should_compact = false;
1868 }
1869 }
1870 )
1872 if (first_state > Idling) {
1873 report_concurrent_mode_interruption();
1874 }
1876 set_did_compact(should_compact);
1877 if (should_compact) {
1878 // If the collection is being acquired from the background
1879 // collector, there may be references on the discovered
1880 // references lists that have NULL referents (being those
1881 // that were concurrently cleared by a mutator) or
1882 // that are no longer active (having been enqueued concurrently
1883 // by the mutator).
1884 // Scrub the list of those references because Mark-Sweep-Compact
1885 // code assumes referents are not NULL and that all discovered
1886 // Reference objects are active.
1887 ref_processor()->clean_up_discovered_references();
1889 if (first_state > Idling) {
1890 save_heap_summary();
1891 }
1893 do_compaction_work(clear_all_soft_refs);
1895 // Has the GC time limit been exceeded?
1896 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1897 size_t max_eden_size = young_gen->max_capacity() -
1898 young_gen->to()->capacity() -
1899 young_gen->from()->capacity();
1900 GenCollectedHeap* gch = GenCollectedHeap::heap();
1901 GCCause::Cause gc_cause = gch->gc_cause();
1902 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1903 young_gen->eden()->used(),
1904 _cmsGen->max_capacity(),
1905 max_eden_size,
1906 full,
1907 gc_cause,
1908 gch->collector_policy());
1909 } else {
1910 do_mark_sweep_work(clear_all_soft_refs, first_state,
1911 should_start_over);
1912 }
1913 // Reset the expansion cause, now that we just completed
1914 // a collection cycle.
1915 clear_expansion_cause();
1916 _foregroundGCIsActive = false;
1917 return;
1918 }
1920 // Resize the tenured generation
1921 // after obtaining the free list locks for the
1922 // two generations.
1923 void CMSCollector::compute_new_size() {
1924 assert_locked_or_safepoint(Heap_lock);
1925 FreelistLocker z(this);
1926 MetaspaceGC::compute_new_size();
1927 _cmsGen->compute_new_size_free_list();
1928 }
1930 // A work method used by foreground collection to determine
1931 // what type of collection (compacting or not, continuing or fresh)
1932 // it should do.
1933 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1934 // and CMSCompactWhenClearAllSoftRefs the default in the future
1935 // and do away with the flags after a suitable period.
1936 void CMSCollector::decide_foreground_collection_type(
1937 bool clear_all_soft_refs, bool* should_compact,
1938 bool* should_start_over) {
1939 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1940 // flag is set, and we have either requested a System.gc() or
1941 // the number of full gc's since the last concurrent cycle
1942 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1943 // or if an incremental collection has failed
1944 GenCollectedHeap* gch = GenCollectedHeap::heap();
1945 assert(gch->collector_policy()->is_two_generation_policy(),
1946 "You may want to check the correctness of the following");
1947 // Inform cms gen if this was due to partial collection failing.
1948 // The CMS gen may use this fact to determine its expansion policy.
1949 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1950 assert(!_cmsGen->incremental_collection_failed(),
1951 "Should have been noticed, reacted to and cleared");
1952 _cmsGen->set_incremental_collection_failed();
1953 }
1954 *should_compact =
1955 UseCMSCompactAtFullCollection &&
1956 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1957 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1958 gch->incremental_collection_will_fail(true /* consult_young */));
1959 *should_start_over = false;
1960 if (clear_all_soft_refs && !*should_compact) {
1961 // We are about to do a last ditch collection attempt
1962 // so it would normally make sense to do a compaction
1963 // to reclaim as much space as possible.
1964 if (CMSCompactWhenClearAllSoftRefs) {
1965 // Default: The rationale is that in this case either
1966 // we are past the final marking phase, in which case
1967 // we'd have to start over, or so little has been done
1968 // that there's little point in saving that work. Compaction
1969 // appears to be the sensible choice in either case.
1970 *should_compact = true;
1971 } else {
1972 // We have been asked to clear all soft refs, but not to
1973 // compact. Make sure that we aren't past the final checkpoint
1974 // phase, for that is where we process soft refs. If we are already
1975 // past that phase, we'll need to redo the refs discovery phase and
1976 // if necessary clear soft refs that weren't previously
1977 // cleared. We do so by remembering the phase in which
1978 // we came in, and if we are past the refs processing
1979 // phase, we'll choose to just redo the mark-sweep
1980 // collection from scratch.
1981 if (_collectorState > FinalMarking) {
1982 // We are past the refs processing phase;
1983 // start over and do a fresh synchronous CMS cycle
1984 _collectorState = Resetting; // skip to reset to start new cycle
1985 reset(false /* == !asynch */);
1986 *should_start_over = true;
1987 } // else we can continue a possibly ongoing current cycle
1988 }
1989 }
1990 }
1992 // A work method used by the foreground collector to do
1993 // a mark-sweep-compact.
1994 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1995 GenCollectedHeap* gch = GenCollectedHeap::heap();
1997 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1998 gc_timer->register_gc_start();
2000 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2001 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2003 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
2004 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2005 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2006 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2007 }
2009 // Sample collection interval time and reset for collection pause.
2010 if (UseAdaptiveSizePolicy) {
2011 size_policy()->msc_collection_begin();
2012 }
2014 // Temporarily widen the span of the weak reference processing to
2015 // the entire heap.
2016 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2017 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2018 // Temporarily, clear the "is_alive_non_header" field of the
2019 // reference processor.
2020 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2021 // Temporarily make reference _processing_ single threaded (non-MT).
2022 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2023 // Temporarily make refs discovery atomic
2024 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2025 // Temporarily make reference _discovery_ single threaded (non-MT)
2026 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2028 ref_processor()->set_enqueuing_is_done(false);
2029 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2030 ref_processor()->setup_policy(clear_all_soft_refs);
2031 // If an asynchronous collection finishes, the _modUnionTable is
2032 // all clear. If we are assuming the collection from an asynchronous
2033 // collection, clear the _modUnionTable.
2034 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2035 "_modUnionTable should be clear if the baton was not passed");
2036 _modUnionTable.clear_all();
2037 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2038 "mod union for klasses should be clear if the baton was passed");
2039 _ct->klass_rem_set()->clear_mod_union();
2041 // We must adjust the allocation statistics being maintained
2042 // in the free list space. We do so by reading and clearing
2043 // the sweep timer and updating the block flux rate estimates below.
2044 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2045 if (_inter_sweep_timer.is_active()) {
2046 _inter_sweep_timer.stop();
2047 // Note that we do not use this sample to update the _inter_sweep_estimate.
2048 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2049 _inter_sweep_estimate.padded_average(),
2050 _intra_sweep_estimate.padded_average());
2051 }
2053 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2054 ref_processor(), clear_all_soft_refs);
2055 #ifdef ASSERT
2056 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2057 size_t free_size = cms_space->free();
2058 assert(free_size ==
2059 pointer_delta(cms_space->end(), cms_space->compaction_top())
2060 * HeapWordSize,
2061 "All the free space should be compacted into one chunk at top");
2062 assert(cms_space->dictionary()->total_chunk_size(
2063 debug_only(cms_space->freelistLock())) == 0 ||
2064 cms_space->totalSizeInIndexedFreeLists() == 0,
2065 "All the free space should be in a single chunk");
2066 size_t num = cms_space->totalCount();
2067 assert((free_size == 0 && num == 0) ||
2068 (free_size > 0 && (num == 1 || num == 2)),
2069 "There should be at most 2 free chunks after compaction");
2070 #endif // ASSERT
2071 _collectorState = Resetting;
2072 assert(_restart_addr == NULL,
2073 "Should have been NULL'd before baton was passed");
2074 reset(false /* == !asynch */);
2075 _cmsGen->reset_after_compaction();
2076 _concurrent_cycles_since_last_unload = 0;
2078 // Clear any data recorded in the PLAB chunk arrays.
2079 if (_survivor_plab_array != NULL) {
2080 reset_survivor_plab_arrays();
2081 }
2083 // Adjust the per-size allocation stats for the next epoch.
2084 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2085 // Restart the "inter sweep timer" for the next epoch.
2086 _inter_sweep_timer.reset();
2087 _inter_sweep_timer.start();
2089 // Sample collection pause time and reset for collection interval.
2090 if (UseAdaptiveSizePolicy) {
2091 size_policy()->msc_collection_end(gch->gc_cause());
2092 }
2094 gc_timer->register_gc_end();
2096 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2098 // For a mark-sweep-compact, compute_new_size() will be called
2099 // in the heap's do_collection() method.
2100 }
2102 // A work method used by the foreground collector to do
2103 // a mark-sweep, after taking over from a possibly on-going
2104 // concurrent mark-sweep collection.
2105 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2106 CollectorState first_state, bool should_start_over) {
2107 if (PrintGC && Verbose) {
2108 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2109 "collector with count %d",
2110 _full_gcs_since_conc_gc);
2111 }
2112 switch (_collectorState) {
2113 case Idling:
2114 if (first_state == Idling || should_start_over) {
2115 // The background GC was not active, or should
2116 // restarted from scratch; start the cycle.
2117 _collectorState = InitialMarking;
2118 }
2119 // If first_state was not Idling, then a background GC
2120 // was in progress and has now finished. No need to do it
2121 // again. Leave the state as Idling.
2122 break;
2123 case Precleaning:
2124 // In the foreground case don't do the precleaning since
2125 // it is not done concurrently and there is extra work
2126 // required.
2127 _collectorState = FinalMarking;
2128 }
2129 collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2131 // For a mark-sweep, compute_new_size() will be called
2132 // in the heap's do_collection() method.
2133 }
2136 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2137 DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2138 EdenSpace* eden_space = dng->eden();
2139 ContiguousSpace* from_space = dng->from();
2140 ContiguousSpace* to_space = dng->to();
2141 // Eden
2142 if (_eden_chunk_array != NULL) {
2143 gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2144 eden_space->bottom(), eden_space->top(),
2145 eden_space->end(), eden_space->capacity());
2146 gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2147 "_eden_chunk_capacity=" SIZE_FORMAT,
2148 _eden_chunk_index, _eden_chunk_capacity);
2149 for (size_t i = 0; i < _eden_chunk_index; i++) {
2150 gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2151 i, _eden_chunk_array[i]);
2152 }
2153 }
2154 // Survivor
2155 if (_survivor_chunk_array != NULL) {
2156 gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2157 from_space->bottom(), from_space->top(),
2158 from_space->end(), from_space->capacity());
2159 gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2160 "_survivor_chunk_capacity=" SIZE_FORMAT,
2161 _survivor_chunk_index, _survivor_chunk_capacity);
2162 for (size_t i = 0; i < _survivor_chunk_index; i++) {
2163 gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2164 i, _survivor_chunk_array[i]);
2165 }
2166 }
2167 }
2169 void CMSCollector::getFreelistLocks() const {
2170 // Get locks for all free lists in all generations that this
2171 // collector is responsible for
2172 _cmsGen->freelistLock()->lock_without_safepoint_check();
2173 }
2175 void CMSCollector::releaseFreelistLocks() const {
2176 // Release locks for all free lists in all generations that this
2177 // collector is responsible for
2178 _cmsGen->freelistLock()->unlock();
2179 }
2181 bool CMSCollector::haveFreelistLocks() const {
2182 // Check locks for all free lists in all generations that this
2183 // collector is responsible for
2184 assert_lock_strong(_cmsGen->freelistLock());
2185 PRODUCT_ONLY(ShouldNotReachHere());
2186 return true;
2187 }
2189 // A utility class that is used by the CMS collector to
2190 // temporarily "release" the foreground collector from its
2191 // usual obligation to wait for the background collector to
2192 // complete an ongoing phase before proceeding.
2193 class ReleaseForegroundGC: public StackObj {
2194 private:
2195 CMSCollector* _c;
2196 public:
2197 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2198 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2199 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2200 // allow a potentially blocked foreground collector to proceed
2201 _c->_foregroundGCShouldWait = false;
2202 if (_c->_foregroundGCIsActive) {
2203 CGC_lock->notify();
2204 }
2205 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2206 "Possible deadlock");
2207 }
2209 ~ReleaseForegroundGC() {
2210 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2211 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2212 _c->_foregroundGCShouldWait = true;
2213 }
2214 };
2216 // There are separate collect_in_background and collect_in_foreground because of
2217 // the different locking requirements of the background collector and the
2218 // foreground collector. There was originally an attempt to share
2219 // one "collect" method between the background collector and the foreground
2220 // collector but the if-then-else required made it cleaner to have
2221 // separate methods.
2222 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2223 assert(Thread::current()->is_ConcurrentGC_thread(),
2224 "A CMS asynchronous collection is only allowed on a CMS thread.");
2226 GenCollectedHeap* gch = GenCollectedHeap::heap();
2227 {
2228 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2229 MutexLockerEx hl(Heap_lock, safepoint_check);
2230 FreelistLocker fll(this);
2231 MutexLockerEx x(CGC_lock, safepoint_check);
2232 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2233 // The foreground collector is active or we're
2234 // not using asynchronous collections. Skip this
2235 // background collection.
2236 assert(!_foregroundGCShouldWait, "Should be clear");
2237 return;
2238 } else {
2239 assert(_collectorState == Idling, "Should be idling before start.");
2240 _collectorState = InitialMarking;
2241 register_gc_start(cause);
2242 // Reset the expansion cause, now that we are about to begin
2243 // a new cycle.
2244 clear_expansion_cause();
2246 // Clear the MetaspaceGC flag since a concurrent collection
2247 // is starting but also clear it after the collection.
2248 MetaspaceGC::set_should_concurrent_collect(false);
2249 }
2250 // Decide if we want to enable class unloading as part of the
2251 // ensuing concurrent GC cycle.
2252 update_should_unload_classes();
2253 _full_gc_requested = false; // acks all outstanding full gc requests
2254 _full_gc_cause = GCCause::_no_gc;
2255 // Signal that we are about to start a collection
2256 gch->increment_total_full_collections(); // ... starting a collection cycle
2257 _collection_count_start = gch->total_full_collections();
2258 }
2260 // Used for PrintGC
2261 size_t prev_used;
2262 if (PrintGC && Verbose) {
2263 prev_used = _cmsGen->used(); // XXXPERM
2264 }
2266 // The change of the collection state is normally done at this level;
2267 // the exceptions are phases that are executed while the world is
2268 // stopped. For those phases the change of state is done while the
2269 // world is stopped. For baton passing purposes this allows the
2270 // background collector to finish the phase and change state atomically.
2271 // The foreground collector cannot wait on a phase that is done
2272 // while the world is stopped because the foreground collector already
2273 // has the world stopped and would deadlock.
2274 while (_collectorState != Idling) {
2275 if (TraceCMSState) {
2276 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2277 Thread::current(), _collectorState);
2278 }
2279 // The foreground collector
2280 // holds the Heap_lock throughout its collection.
2281 // holds the CMS token (but not the lock)
2282 // except while it is waiting for the background collector to yield.
2283 //
2284 // The foreground collector should be blocked (not for long)
2285 // if the background collector is about to start a phase
2286 // executed with world stopped. If the background
2287 // collector has already started such a phase, the
2288 // foreground collector is blocked waiting for the
2289 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2290 // are executed in the VM thread.
2291 //
2292 // The locking order is
2293 // PendingListLock (PLL) -- if applicable (FinalMarking)
2294 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2295 // CMS token (claimed in
2296 // stop_world_and_do() -->
2297 // safepoint_synchronize() -->
2298 // CMSThread::synchronize())
2300 {
2301 // Check if the FG collector wants us to yield.
2302 CMSTokenSync x(true); // is cms thread
2303 if (waitForForegroundGC()) {
2304 // We yielded to a foreground GC, nothing more to be
2305 // done this round.
2306 assert(_foregroundGCShouldWait == false, "We set it to false in "
2307 "waitForForegroundGC()");
2308 if (TraceCMSState) {
2309 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2310 " exiting collection CMS state %d",
2311 Thread::current(), _collectorState);
2312 }
2313 return;
2314 } else {
2315 // The background collector can run but check to see if the
2316 // foreground collector has done a collection while the
2317 // background collector was waiting to get the CGC_lock
2318 // above. If yes, break so that _foregroundGCShouldWait
2319 // is cleared before returning.
2320 if (_collectorState == Idling) {
2321 break;
2322 }
2323 }
2324 }
2326 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2327 "should be waiting");
2329 switch (_collectorState) {
2330 case InitialMarking:
2331 {
2332 ReleaseForegroundGC x(this);
2333 stats().record_cms_begin();
2334 VM_CMS_Initial_Mark initial_mark_op(this);
2335 VMThread::execute(&initial_mark_op);
2336 }
2337 // The collector state may be any legal state at this point
2338 // since the background collector may have yielded to the
2339 // foreground collector.
2340 break;
2341 case Marking:
2342 // initial marking in checkpointRootsInitialWork has been completed
2343 if (markFromRoots(true)) { // we were successful
2344 assert(_collectorState == Precleaning, "Collector state should "
2345 "have changed");
2346 } else {
2347 assert(_foregroundGCIsActive, "Internal state inconsistency");
2348 }
2349 break;
2350 case Precleaning:
2351 if (UseAdaptiveSizePolicy) {
2352 size_policy()->concurrent_precleaning_begin();
2353 }
2354 // marking from roots in markFromRoots has been completed
2355 preclean();
2356 if (UseAdaptiveSizePolicy) {
2357 size_policy()->concurrent_precleaning_end();
2358 }
2359 assert(_collectorState == AbortablePreclean ||
2360 _collectorState == FinalMarking,
2361 "Collector state should have changed");
2362 break;
2363 case AbortablePreclean:
2364 if (UseAdaptiveSizePolicy) {
2365 size_policy()->concurrent_phases_resume();
2366 }
2367 abortable_preclean();
2368 if (UseAdaptiveSizePolicy) {
2369 size_policy()->concurrent_precleaning_end();
2370 }
2371 assert(_collectorState == FinalMarking, "Collector state should "
2372 "have changed");
2373 break;
2374 case FinalMarking:
2375 {
2376 ReleaseForegroundGC x(this);
2378 VM_CMS_Final_Remark final_remark_op(this);
2379 VMThread::execute(&final_remark_op);
2380 }
2381 assert(_foregroundGCShouldWait, "block post-condition");
2382 break;
2383 case Sweeping:
2384 if (UseAdaptiveSizePolicy) {
2385 size_policy()->concurrent_sweeping_begin();
2386 }
2387 // final marking in checkpointRootsFinal has been completed
2388 sweep(true);
2389 assert(_collectorState == Resizing, "Collector state change "
2390 "to Resizing must be done under the free_list_lock");
2391 _full_gcs_since_conc_gc = 0;
2393 // Stop the timers for adaptive size policy for the concurrent phases
2394 if (UseAdaptiveSizePolicy) {
2395 size_policy()->concurrent_sweeping_end();
2396 size_policy()->concurrent_phases_end(gch->gc_cause(),
2397 gch->prev_gen(_cmsGen)->capacity(),
2398 _cmsGen->free());
2399 }
2401 case Resizing: {
2402 // Sweeping has been completed...
2403 // At this point the background collection has completed.
2404 // Don't move the call to compute_new_size() down
2405 // into code that might be executed if the background
2406 // collection was preempted.
2407 {
2408 ReleaseForegroundGC x(this); // unblock FG collection
2409 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2410 CMSTokenSync z(true); // not strictly needed.
2411 if (_collectorState == Resizing) {
2412 compute_new_size();
2413 save_heap_summary();
2414 _collectorState = Resetting;
2415 } else {
2416 assert(_collectorState == Idling, "The state should only change"
2417 " because the foreground collector has finished the collection");
2418 }
2419 }
2420 break;
2421 }
2422 case Resetting:
2423 // CMS heap resizing has been completed
2424 reset(true);
2425 assert(_collectorState == Idling, "Collector state should "
2426 "have changed");
2428 MetaspaceGC::set_should_concurrent_collect(false);
2430 stats().record_cms_end();
2431 // Don't move the concurrent_phases_end() and compute_new_size()
2432 // calls to here because a preempted background collection
2433 // has it's state set to "Resetting".
2434 break;
2435 case Idling:
2436 default:
2437 ShouldNotReachHere();
2438 break;
2439 }
2440 if (TraceCMSState) {
2441 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2442 Thread::current(), _collectorState);
2443 }
2444 assert(_foregroundGCShouldWait, "block post-condition");
2445 }
2447 // Should this be in gc_epilogue?
2448 collector_policy()->counters()->update_counters();
2450 {
2451 // Clear _foregroundGCShouldWait and, in the event that the
2452 // foreground collector is waiting, notify it, before
2453 // returning.
2454 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2455 _foregroundGCShouldWait = false;
2456 if (_foregroundGCIsActive) {
2457 CGC_lock->notify();
2458 }
2459 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2460 "Possible deadlock");
2461 }
2462 if (TraceCMSState) {
2463 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2464 " exiting collection CMS state %d",
2465 Thread::current(), _collectorState);
2466 }
2467 if (PrintGC && Verbose) {
2468 _cmsGen->print_heap_change(prev_used);
2469 }
2470 }
2472 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2473 if (!_cms_start_registered) {
2474 register_gc_start(cause);
2475 }
2476 }
2478 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2479 _cms_start_registered = true;
2480 _gc_timer_cm->register_gc_start();
2481 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2482 }
2484 void CMSCollector::register_gc_end() {
2485 if (_cms_start_registered) {
2486 report_heap_summary(GCWhen::AfterGC);
2488 _gc_timer_cm->register_gc_end();
2489 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2490 _cms_start_registered = false;
2491 }
2492 }
2494 void CMSCollector::save_heap_summary() {
2495 GenCollectedHeap* gch = GenCollectedHeap::heap();
2496 _last_heap_summary = gch->create_heap_summary();
2497 _last_metaspace_summary = gch->create_metaspace_summary();
2498 }
2500 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2501 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2502 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2503 }
2505 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2506 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2507 "Foreground collector should be waiting, not executing");
2508 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2509 "may only be done by the VM Thread with the world stopped");
2510 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2511 "VM thread should have CMS token");
2513 // The gc id is created in register_foreground_gc_start if this collection is synchronous
2514 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
2515 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2516 true, NULL, gc_id);)
2517 if (UseAdaptiveSizePolicy) {
2518 size_policy()->ms_collection_begin();
2519 }
2520 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2522 HandleMark hm; // Discard invalid handles created during verification
2524 if (VerifyBeforeGC &&
2525 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2526 Universe::verify();
2527 }
2529 // Snapshot the soft reference policy to be used in this collection cycle.
2530 ref_processor()->setup_policy(clear_all_soft_refs);
2532 // Decide if class unloading should be done
2533 update_should_unload_classes();
2535 bool init_mark_was_synchronous = false; // until proven otherwise
2536 while (_collectorState != Idling) {
2537 if (TraceCMSState) {
2538 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2539 Thread::current(), _collectorState);
2540 }
2541 switch (_collectorState) {
2542 case InitialMarking:
2543 register_foreground_gc_start(cause);
2544 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2545 checkpointRootsInitial(false);
2546 assert(_collectorState == Marking, "Collector state should have changed"
2547 " within checkpointRootsInitial()");
2548 break;
2549 case Marking:
2550 // initial marking in checkpointRootsInitialWork has been completed
2551 if (VerifyDuringGC &&
2552 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2553 Universe::verify("Verify before initial mark: ");
2554 }
2555 {
2556 bool res = markFromRoots(false);
2557 assert(res && _collectorState == FinalMarking, "Collector state should "
2558 "have changed");
2559 break;
2560 }
2561 case FinalMarking:
2562 if (VerifyDuringGC &&
2563 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2564 Universe::verify("Verify before re-mark: ");
2565 }
2566 checkpointRootsFinal(false, clear_all_soft_refs,
2567 init_mark_was_synchronous);
2568 assert(_collectorState == Sweeping, "Collector state should not "
2569 "have changed within checkpointRootsFinal()");
2570 break;
2571 case Sweeping:
2572 // final marking in checkpointRootsFinal has been completed
2573 if (VerifyDuringGC &&
2574 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2575 Universe::verify("Verify before sweep: ");
2576 }
2577 sweep(false);
2578 assert(_collectorState == Resizing, "Incorrect state");
2579 break;
2580 case Resizing: {
2581 // Sweeping has been completed; the actual resize in this case
2582 // is done separately; nothing to be done in this state.
2583 _collectorState = Resetting;
2584 break;
2585 }
2586 case Resetting:
2587 // The heap has been resized.
2588 if (VerifyDuringGC &&
2589 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2590 Universe::verify("Verify before reset: ");
2591 }
2592 save_heap_summary();
2593 reset(false);
2594 assert(_collectorState == Idling, "Collector state should "
2595 "have changed");
2596 break;
2597 case Precleaning:
2598 case AbortablePreclean:
2599 // Elide the preclean phase
2600 _collectorState = FinalMarking;
2601 break;
2602 default:
2603 ShouldNotReachHere();
2604 }
2605 if (TraceCMSState) {
2606 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2607 Thread::current(), _collectorState);
2608 }
2609 }
2611 if (UseAdaptiveSizePolicy) {
2612 GenCollectedHeap* gch = GenCollectedHeap::heap();
2613 size_policy()->ms_collection_end(gch->gc_cause());
2614 }
2616 if (VerifyAfterGC &&
2617 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2618 Universe::verify();
2619 }
2620 if (TraceCMSState) {
2621 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2622 " exiting collection CMS state %d",
2623 Thread::current(), _collectorState);
2624 }
2625 }
2627 bool CMSCollector::waitForForegroundGC() {
2628 bool res = false;
2629 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2630 "CMS thread should have CMS token");
2631 // Block the foreground collector until the
2632 // background collectors decides whether to
2633 // yield.
2634 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2635 _foregroundGCShouldWait = true;
2636 if (_foregroundGCIsActive) {
2637 // The background collector yields to the
2638 // foreground collector and returns a value
2639 // indicating that it has yielded. The foreground
2640 // collector can proceed.
2641 res = true;
2642 _foregroundGCShouldWait = false;
2643 ConcurrentMarkSweepThread::clear_CMS_flag(
2644 ConcurrentMarkSweepThread::CMS_cms_has_token);
2645 ConcurrentMarkSweepThread::set_CMS_flag(
2646 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2647 // Get a possibly blocked foreground thread going
2648 CGC_lock->notify();
2649 if (TraceCMSState) {
2650 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2651 Thread::current(), _collectorState);
2652 }
2653 while (_foregroundGCIsActive) {
2654 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2655 }
2656 ConcurrentMarkSweepThread::set_CMS_flag(
2657 ConcurrentMarkSweepThread::CMS_cms_has_token);
2658 ConcurrentMarkSweepThread::clear_CMS_flag(
2659 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2660 }
2661 if (TraceCMSState) {
2662 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2663 Thread::current(), _collectorState);
2664 }
2665 return res;
2666 }
2668 // Because of the need to lock the free lists and other structures in
2669 // the collector, common to all the generations that the collector is
2670 // collecting, we need the gc_prologues of individual CMS generations
2671 // delegate to their collector. It may have been simpler had the
2672 // current infrastructure allowed one to call a prologue on a
2673 // collector. In the absence of that we have the generation's
2674 // prologue delegate to the collector, which delegates back
2675 // some "local" work to a worker method in the individual generations
2676 // that it's responsible for collecting, while itself doing any
2677 // work common to all generations it's responsible for. A similar
2678 // comment applies to the gc_epilogue()'s.
2679 // The role of the varaible _between_prologue_and_epilogue is to
2680 // enforce the invocation protocol.
2681 void CMSCollector::gc_prologue(bool full) {
2682 // Call gc_prologue_work() for the CMSGen
2683 // we are responsible for.
2685 // The following locking discipline assumes that we are only called
2686 // when the world is stopped.
2687 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2689 // The CMSCollector prologue must call the gc_prologues for the
2690 // "generations" that it's responsible
2691 // for.
2693 assert( Thread::current()->is_VM_thread()
2694 || ( CMSScavengeBeforeRemark
2695 && Thread::current()->is_ConcurrentGC_thread()),
2696 "Incorrect thread type for prologue execution");
2698 if (_between_prologue_and_epilogue) {
2699 // We have already been invoked; this is a gc_prologue delegation
2700 // from yet another CMS generation that we are responsible for, just
2701 // ignore it since all relevant work has already been done.
2702 return;
2703 }
2705 // set a bit saying prologue has been called; cleared in epilogue
2706 _between_prologue_and_epilogue = true;
2707 // Claim locks for common data structures, then call gc_prologue_work()
2708 // for each CMSGen.
2710 getFreelistLocks(); // gets free list locks on constituent spaces
2711 bitMapLock()->lock_without_safepoint_check();
2713 // Should call gc_prologue_work() for all cms gens we are responsible for
2714 bool duringMarking = _collectorState >= Marking
2715 && _collectorState < Sweeping;
2717 // The young collections clear the modified oops state, which tells if
2718 // there are any modified oops in the class. The remark phase also needs
2719 // that information. Tell the young collection to save the union of all
2720 // modified klasses.
2721 if (duringMarking) {
2722 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2723 }
2725 bool registerClosure = duringMarking;
2727 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2728 &_modUnionClosurePar
2729 : &_modUnionClosure;
2730 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2732 if (!full) {
2733 stats().record_gc0_begin();
2734 }
2735 }
2737 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2739 _capacity_at_prologue = capacity();
2740 _used_at_prologue = used();
2742 // Delegate to CMScollector which knows how to coordinate between
2743 // this and any other CMS generations that it is responsible for
2744 // collecting.
2745 collector()->gc_prologue(full);
2746 }
2748 // This is a "private" interface for use by this generation's CMSCollector.
2749 // Not to be called directly by any other entity (for instance,
2750 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2751 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2752 bool registerClosure, ModUnionClosure* modUnionClosure) {
2753 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2754 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2755 "Should be NULL");
2756 if (registerClosure) {
2757 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2758 }
2759 cmsSpace()->gc_prologue();
2760 // Clear stat counters
2761 NOT_PRODUCT(
2762 assert(_numObjectsPromoted == 0, "check");
2763 assert(_numWordsPromoted == 0, "check");
2764 if (Verbose && PrintGC) {
2765 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2766 SIZE_FORMAT" bytes concurrently",
2767 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2768 }
2769 _numObjectsAllocated = 0;
2770 _numWordsAllocated = 0;
2771 )
2772 }
2774 void CMSCollector::gc_epilogue(bool full) {
2775 // The following locking discipline assumes that we are only called
2776 // when the world is stopped.
2777 assert(SafepointSynchronize::is_at_safepoint(),
2778 "world is stopped assumption");
2780 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2781 // if linear allocation blocks need to be appropriately marked to allow the
2782 // the blocks to be parsable. We also check here whether we need to nudge the
2783 // CMS collector thread to start a new cycle (if it's not already active).
2784 assert( Thread::current()->is_VM_thread()
2785 || ( CMSScavengeBeforeRemark
2786 && Thread::current()->is_ConcurrentGC_thread()),
2787 "Incorrect thread type for epilogue execution");
2789 if (!_between_prologue_and_epilogue) {
2790 // We have already been invoked; this is a gc_epilogue delegation
2791 // from yet another CMS generation that we are responsible for, just
2792 // ignore it since all relevant work has already been done.
2793 return;
2794 }
2795 assert(haveFreelistLocks(), "must have freelist locks");
2796 assert_lock_strong(bitMapLock());
2798 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2800 _cmsGen->gc_epilogue_work(full);
2802 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2803 // in case sampling was not already enabled, enable it
2804 _start_sampling = true;
2805 }
2806 // reset _eden_chunk_array so sampling starts afresh
2807 _eden_chunk_index = 0;
2809 size_t cms_used = _cmsGen->cmsSpace()->used();
2811 // update performance counters - this uses a special version of
2812 // update_counters() that allows the utilization to be passed as a
2813 // parameter, avoiding multiple calls to used().
2814 //
2815 _cmsGen->update_counters(cms_used);
2817 if (CMSIncrementalMode) {
2818 icms_update_allocation_limits();
2819 }
2821 bitMapLock()->unlock();
2822 releaseFreelistLocks();
2824 if (!CleanChunkPoolAsync) {
2825 Chunk::clean_chunk_pool();
2826 }
2828 set_did_compact(false);
2829 _between_prologue_and_epilogue = false; // ready for next cycle
2830 }
2832 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2833 collector()->gc_epilogue(full);
2835 // Also reset promotion tracking in par gc thread states.
2836 if (CollectedHeap::use_parallel_gc_threads()) {
2837 for (uint i = 0; i < ParallelGCThreads; i++) {
2838 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2839 }
2840 }
2841 }
2843 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2844 assert(!incremental_collection_failed(), "Should have been cleared");
2845 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2846 cmsSpace()->gc_epilogue();
2847 // Print stat counters
2848 NOT_PRODUCT(
2849 assert(_numObjectsAllocated == 0, "check");
2850 assert(_numWordsAllocated == 0, "check");
2851 if (Verbose && PrintGC) {
2852 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2853 SIZE_FORMAT" bytes",
2854 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2855 }
2856 _numObjectsPromoted = 0;
2857 _numWordsPromoted = 0;
2858 )
2860 if (PrintGC && Verbose) {
2861 // Call down the chain in contiguous_available needs the freelistLock
2862 // so print this out before releasing the freeListLock.
2863 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2864 contiguous_available());
2865 }
2866 }
2868 #ifndef PRODUCT
2869 bool CMSCollector::have_cms_token() {
2870 Thread* thr = Thread::current();
2871 if (thr->is_VM_thread()) {
2872 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2873 } else if (thr->is_ConcurrentGC_thread()) {
2874 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2875 } else if (thr->is_GC_task_thread()) {
2876 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2877 ParGCRareEvent_lock->owned_by_self();
2878 }
2879 return false;
2880 }
2881 #endif
2883 // Check reachability of the given heap address in CMS generation,
2884 // treating all other generations as roots.
2885 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2886 // We could "guarantee" below, rather than assert, but i'll
2887 // leave these as "asserts" so that an adventurous debugger
2888 // could try this in the product build provided some subset of
2889 // the conditions were met, provided they were intersted in the
2890 // results and knew that the computation below wouldn't interfere
2891 // with other concurrent computations mutating the structures
2892 // being read or written.
2893 assert(SafepointSynchronize::is_at_safepoint(),
2894 "Else mutations in object graph will make answer suspect");
2895 assert(have_cms_token(), "Should hold cms token");
2896 assert(haveFreelistLocks(), "must hold free list locks");
2897 assert_lock_strong(bitMapLock());
2899 // Clear the marking bit map array before starting, but, just
2900 // for kicks, first report if the given address is already marked
2901 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2902 _markBitMap.isMarked(addr) ? "" : " not");
2904 if (verify_after_remark()) {
2905 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2906 bool result = verification_mark_bm()->isMarked(addr);
2907 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2908 result ? "IS" : "is NOT");
2909 return result;
2910 } else {
2911 gclog_or_tty->print_cr("Could not compute result");
2912 return false;
2913 }
2914 }
2917 void
2918 CMSCollector::print_on_error(outputStream* st) {
2919 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2920 if (collector != NULL) {
2921 CMSBitMap* bitmap = &collector->_markBitMap;
2922 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2923 bitmap->print_on_error(st, " Bits: ");
2925 st->cr();
2927 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2928 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2929 mut_bitmap->print_on_error(st, " Bits: ");
2930 }
2931 }
2933 ////////////////////////////////////////////////////////
2934 // CMS Verification Support
2935 ////////////////////////////////////////////////////////
2936 // Following the remark phase, the following invariant
2937 // should hold -- each object in the CMS heap which is
2938 // marked in markBitMap() should be marked in the verification_mark_bm().
2940 class VerifyMarkedClosure: public BitMapClosure {
2941 CMSBitMap* _marks;
2942 bool _failed;
2944 public:
2945 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2947 bool do_bit(size_t offset) {
2948 HeapWord* addr = _marks->offsetToHeapWord(offset);
2949 if (!_marks->isMarked(addr)) {
2950 oop(addr)->print_on(gclog_or_tty);
2951 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2952 _failed = true;
2953 }
2954 return true;
2955 }
2957 bool failed() { return _failed; }
2958 };
2960 bool CMSCollector::verify_after_remark(bool silent) {
2961 if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2962 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2963 static bool init = false;
2965 assert(SafepointSynchronize::is_at_safepoint(),
2966 "Else mutations in object graph will make answer suspect");
2967 assert(have_cms_token(),
2968 "Else there may be mutual interference in use of "
2969 " verification data structures");
2970 assert(_collectorState > Marking && _collectorState <= Sweeping,
2971 "Else marking info checked here may be obsolete");
2972 assert(haveFreelistLocks(), "must hold free list locks");
2973 assert_lock_strong(bitMapLock());
2976 // Allocate marking bit map if not already allocated
2977 if (!init) { // first time
2978 if (!verification_mark_bm()->allocate(_span)) {
2979 return false;
2980 }
2981 init = true;
2982 }
2984 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2986 // Turn off refs discovery -- so we will be tracing through refs.
2987 // This is as intended, because by this time
2988 // GC must already have cleared any refs that need to be cleared,
2989 // and traced those that need to be marked; moreover,
2990 // the marking done here is not going to intefere in any
2991 // way with the marking information used by GC.
2992 NoRefDiscovery no_discovery(ref_processor());
2994 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2996 // Clear any marks from a previous round
2997 verification_mark_bm()->clear_all();
2998 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2999 verify_work_stacks_empty();
3001 GenCollectedHeap* gch = GenCollectedHeap::heap();
3002 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3003 // Update the saved marks which may affect the root scans.
3004 gch->save_marks();
3006 if (CMSRemarkVerifyVariant == 1) {
3007 // In this first variant of verification, we complete
3008 // all marking, then check if the new marks-verctor is
3009 // a subset of the CMS marks-vector.
3010 verify_after_remark_work_1();
3011 } else if (CMSRemarkVerifyVariant == 2) {
3012 // In this second variant of verification, we flag an error
3013 // (i.e. an object reachable in the new marks-vector not reachable
3014 // in the CMS marks-vector) immediately, also indicating the
3015 // identify of an object (A) that references the unmarked object (B) --
3016 // presumably, a mutation to A failed to be picked up by preclean/remark?
3017 verify_after_remark_work_2();
3018 } else {
3019 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3020 CMSRemarkVerifyVariant);
3021 }
3022 if (!silent) gclog_or_tty->print(" done] ");
3023 return true;
3024 }
3026 void CMSCollector::verify_after_remark_work_1() {
3027 ResourceMark rm;
3028 HandleMark hm;
3029 GenCollectedHeap* gch = GenCollectedHeap::heap();
3031 // Get a clear set of claim bits for the strong roots processing to work with.
3032 ClassLoaderDataGraph::clear_claimed_marks();
3034 // Mark from roots one level into CMS
3035 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3036 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3038 gch->gen_process_strong_roots(_cmsGen->level(),
3039 true, // younger gens are roots
3040 true, // activate StrongRootsScope
3041 false, // not scavenging
3042 SharedHeap::ScanningOption(roots_scanning_options()),
3043 ¬Older,
3044 true, // walk code active on stacks
3045 NULL,
3046 NULL); // SSS: Provide correct closure
3048 // Now mark from the roots
3049 MarkFromRootsClosure markFromRootsClosure(this, _span,
3050 verification_mark_bm(), verification_mark_stack(),
3051 false /* don't yield */, true /* verifying */);
3052 assert(_restart_addr == NULL, "Expected pre-condition");
3053 verification_mark_bm()->iterate(&markFromRootsClosure);
3054 while (_restart_addr != NULL) {
3055 // Deal with stack overflow: by restarting at the indicated
3056 // address.
3057 HeapWord* ra = _restart_addr;
3058 markFromRootsClosure.reset(ra);
3059 _restart_addr = NULL;
3060 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3061 }
3062 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3063 verify_work_stacks_empty();
3065 // Marking completed -- now verify that each bit marked in
3066 // verification_mark_bm() is also marked in markBitMap(); flag all
3067 // errors by printing corresponding objects.
3068 VerifyMarkedClosure vcl(markBitMap());
3069 verification_mark_bm()->iterate(&vcl);
3070 if (vcl.failed()) {
3071 gclog_or_tty->print("Verification failed");
3072 Universe::heap()->print_on(gclog_or_tty);
3073 fatal("CMS: failed marking verification after remark");
3074 }
3075 }
3077 class VerifyKlassOopsKlassClosure : public KlassClosure {
3078 class VerifyKlassOopsClosure : public OopClosure {
3079 CMSBitMap* _bitmap;
3080 public:
3081 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3082 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3083 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3084 } _oop_closure;
3085 public:
3086 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3087 void do_klass(Klass* k) {
3088 k->oops_do(&_oop_closure);
3089 }
3090 };
3092 void CMSCollector::verify_after_remark_work_2() {
3093 ResourceMark rm;
3094 HandleMark hm;
3095 GenCollectedHeap* gch = GenCollectedHeap::heap();
3097 // Get a clear set of claim bits for the strong roots processing to work with.
3098 ClassLoaderDataGraph::clear_claimed_marks();
3100 // Mark from roots one level into CMS
3101 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3102 markBitMap());
3103 CMKlassClosure klass_closure(¬Older);
3105 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3106 gch->gen_process_strong_roots(_cmsGen->level(),
3107 true, // younger gens are roots
3108 true, // activate StrongRootsScope
3109 false, // not scavenging
3110 SharedHeap::ScanningOption(roots_scanning_options()),
3111 ¬Older,
3112 true, // walk code active on stacks
3113 NULL,
3114 &klass_closure);
3116 // Now mark from the roots
3117 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3118 verification_mark_bm(), markBitMap(), verification_mark_stack());
3119 assert(_restart_addr == NULL, "Expected pre-condition");
3120 verification_mark_bm()->iterate(&markFromRootsClosure);
3121 while (_restart_addr != NULL) {
3122 // Deal with stack overflow: by restarting at the indicated
3123 // address.
3124 HeapWord* ra = _restart_addr;
3125 markFromRootsClosure.reset(ra);
3126 _restart_addr = NULL;
3127 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3128 }
3129 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3130 verify_work_stacks_empty();
3132 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3133 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3135 // Marking completed -- now verify that each bit marked in
3136 // verification_mark_bm() is also marked in markBitMap(); flag all
3137 // errors by printing corresponding objects.
3138 VerifyMarkedClosure vcl(markBitMap());
3139 verification_mark_bm()->iterate(&vcl);
3140 assert(!vcl.failed(), "Else verification above should not have succeeded");
3141 }
3143 void ConcurrentMarkSweepGeneration::save_marks() {
3144 // delegate to CMS space
3145 cmsSpace()->save_marks();
3146 for (uint i = 0; i < ParallelGCThreads; i++) {
3147 _par_gc_thread_states[i]->promo.startTrackingPromotions();
3148 }
3149 }
3151 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3152 return cmsSpace()->no_allocs_since_save_marks();
3153 }
3155 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
3156 \
3157 void ConcurrentMarkSweepGeneration:: \
3158 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
3159 cl->set_generation(this); \
3160 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
3161 cl->reset_generation(); \
3162 save_marks(); \
3163 }
3165 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3167 void
3168 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3169 cl->set_generation(this);
3170 younger_refs_in_space_iterate(_cmsSpace, cl);
3171 cl->reset_generation();
3172 }
3174 void
3175 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3176 if (freelistLock()->owned_by_self()) {
3177 Generation::oop_iterate(mr, cl);
3178 } else {
3179 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3180 Generation::oop_iterate(mr, cl);
3181 }
3182 }
3184 void
3185 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3186 if (freelistLock()->owned_by_self()) {
3187 Generation::oop_iterate(cl);
3188 } else {
3189 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3190 Generation::oop_iterate(cl);
3191 }
3192 }
3194 void
3195 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3196 if (freelistLock()->owned_by_self()) {
3197 Generation::object_iterate(cl);
3198 } else {
3199 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3200 Generation::object_iterate(cl);
3201 }
3202 }
3204 void
3205 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3206 if (freelistLock()->owned_by_self()) {
3207 Generation::safe_object_iterate(cl);
3208 } else {
3209 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3210 Generation::safe_object_iterate(cl);
3211 }
3212 }
3214 void
3215 ConcurrentMarkSweepGeneration::post_compact() {
3216 }
3218 void
3219 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3220 // Fix the linear allocation blocks to look like free blocks.
3222 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3223 // are not called when the heap is verified during universe initialization and
3224 // at vm shutdown.
3225 if (freelistLock()->owned_by_self()) {
3226 cmsSpace()->prepare_for_verify();
3227 } else {
3228 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3229 cmsSpace()->prepare_for_verify();
3230 }
3231 }
3233 void
3234 ConcurrentMarkSweepGeneration::verify() {
3235 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3236 // are not called when the heap is verified during universe initialization and
3237 // at vm shutdown.
3238 if (freelistLock()->owned_by_self()) {
3239 cmsSpace()->verify();
3240 } else {
3241 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3242 cmsSpace()->verify();
3243 }
3244 }
3246 void CMSCollector::verify() {
3247 _cmsGen->verify();
3248 }
3250 #ifndef PRODUCT
3251 bool CMSCollector::overflow_list_is_empty() const {
3252 assert(_num_par_pushes >= 0, "Inconsistency");
3253 if (_overflow_list == NULL) {
3254 assert(_num_par_pushes == 0, "Inconsistency");
3255 }
3256 return _overflow_list == NULL;
3257 }
3259 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3260 // merely consolidate assertion checks that appear to occur together frequently.
3261 void CMSCollector::verify_work_stacks_empty() const {
3262 assert(_markStack.isEmpty(), "Marking stack should be empty");
3263 assert(overflow_list_is_empty(), "Overflow list should be empty");
3264 }
3266 void CMSCollector::verify_overflow_empty() const {
3267 assert(overflow_list_is_empty(), "Overflow list should be empty");
3268 assert(no_preserved_marks(), "No preserved marks");
3269 }
3270 #endif // PRODUCT
3272 // Decide if we want to enable class unloading as part of the
3273 // ensuing concurrent GC cycle. We will collect and
3274 // unload classes if it's the case that:
3275 // (1) an explicit gc request has been made and the flag
3276 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3277 // (2) (a) class unloading is enabled at the command line, and
3278 // (b) old gen is getting really full
3279 // NOTE: Provided there is no change in the state of the heap between
3280 // calls to this method, it should have idempotent results. Moreover,
3281 // its results should be monotonically increasing (i.e. going from 0 to 1,
3282 // but not 1 to 0) between successive calls between which the heap was
3283 // not collected. For the implementation below, it must thus rely on
3284 // the property that concurrent_cycles_since_last_unload()
3285 // will not decrease unless a collection cycle happened and that
3286 // _cmsGen->is_too_full() are
3287 // themselves also monotonic in that sense. See check_monotonicity()
3288 // below.
3289 void CMSCollector::update_should_unload_classes() {
3290 _should_unload_classes = false;
3291 // Condition 1 above
3292 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3293 _should_unload_classes = true;
3294 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3295 // Disjuncts 2.b.(i,ii,iii) above
3296 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3297 CMSClassUnloadingMaxInterval)
3298 || _cmsGen->is_too_full();
3299 }
3300 }
3302 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3303 bool res = should_concurrent_collect();
3304 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3305 return res;
3306 }
3308 void CMSCollector::setup_cms_unloading_and_verification_state() {
3309 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3310 || VerifyBeforeExit;
3311 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3313 // We set the proper root for this CMS cycle here.
3314 if (should_unload_classes()) { // Should unload classes this cycle
3315 remove_root_scanning_option(SharedHeap::SO_AllClasses);
3316 add_root_scanning_option(SharedHeap::SO_SystemClasses);
3317 remove_root_scanning_option(rso); // Shrink the root set appropriately
3318 set_verifying(should_verify); // Set verification state for this cycle
3319 return; // Nothing else needs to be done at this time
3320 }
3322 // Not unloading classes this cycle
3323 assert(!should_unload_classes(), "Inconsitency!");
3324 remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3325 add_root_scanning_option(SharedHeap::SO_AllClasses);
3327 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3328 // Include symbols, strings and code cache elements to prevent their resurrection.
3329 add_root_scanning_option(rso);
3330 set_verifying(true);
3331 } else if (verifying() && !should_verify) {
3332 // We were verifying, but some verification flags got disabled.
3333 set_verifying(false);
3334 // Exclude symbols, strings and code cache elements from root scanning to
3335 // reduce IM and RM pauses.
3336 remove_root_scanning_option(rso);
3337 }
3338 }
3341 #ifndef PRODUCT
3342 HeapWord* CMSCollector::block_start(const void* p) const {
3343 const HeapWord* addr = (HeapWord*)p;
3344 if (_span.contains(p)) {
3345 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3346 return _cmsGen->cmsSpace()->block_start(p);
3347 }
3348 }
3349 return NULL;
3350 }
3351 #endif
3353 HeapWord*
3354 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3355 bool tlab,
3356 bool parallel) {
3357 CMSSynchronousYieldRequest yr;
3358 assert(!tlab, "Can't deal with TLAB allocation");
3359 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3360 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3361 CMSExpansionCause::_satisfy_allocation);
3362 if (GCExpandToAllocateDelayMillis > 0) {
3363 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3364 }
3365 return have_lock_and_allocate(word_size, tlab);
3366 }
3368 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3369 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3370 // to CardGeneration and share it...
3371 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3372 return CardGeneration::expand(bytes, expand_bytes);
3373 }
3375 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3376 CMSExpansionCause::Cause cause)
3377 {
3379 bool success = expand(bytes, expand_bytes);
3381 // remember why we expanded; this information is used
3382 // by shouldConcurrentCollect() when making decisions on whether to start
3383 // a new CMS cycle.
3384 if (success) {
3385 set_expansion_cause(cause);
3386 if (PrintGCDetails && Verbose) {
3387 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3388 CMSExpansionCause::to_string(cause));
3389 }
3390 }
3391 }
3393 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3394 HeapWord* res = NULL;
3395 MutexLocker x(ParGCRareEvent_lock);
3396 while (true) {
3397 // Expansion by some other thread might make alloc OK now:
3398 res = ps->lab.alloc(word_sz);
3399 if (res != NULL) return res;
3400 // If there's not enough expansion space available, give up.
3401 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3402 return NULL;
3403 }
3404 // Otherwise, we try expansion.
3405 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3406 CMSExpansionCause::_allocate_par_lab);
3407 // Now go around the loop and try alloc again;
3408 // A competing par_promote might beat us to the expansion space,
3409 // so we may go around the loop again if promotion fails agaion.
3410 if (GCExpandToAllocateDelayMillis > 0) {
3411 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3412 }
3413 }
3414 }
3417 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3418 PromotionInfo* promo) {
3419 MutexLocker x(ParGCRareEvent_lock);
3420 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3421 while (true) {
3422 // Expansion by some other thread might make alloc OK now:
3423 if (promo->ensure_spooling_space()) {
3424 assert(promo->has_spooling_space(),
3425 "Post-condition of successful ensure_spooling_space()");
3426 return true;
3427 }
3428 // If there's not enough expansion space available, give up.
3429 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3430 return false;
3431 }
3432 // Otherwise, we try expansion.
3433 expand(refill_size_bytes, MinHeapDeltaBytes,
3434 CMSExpansionCause::_allocate_par_spooling_space);
3435 // Now go around the loop and try alloc again;
3436 // A competing allocation might beat us to the expansion space,
3437 // so we may go around the loop again if allocation fails again.
3438 if (GCExpandToAllocateDelayMillis > 0) {
3439 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3440 }
3441 }
3442 }
3445 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3446 assert_locked_or_safepoint(ExpandHeap_lock);
3447 // Shrink committed space
3448 _virtual_space.shrink_by(bytes);
3449 // Shrink space; this also shrinks the space's BOT
3450 _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3451 size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3452 // Shrink the shared block offset array
3453 _bts->resize(new_word_size);
3454 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3455 // Shrink the card table
3456 Universe::heap()->barrier_set()->resize_covered_region(mr);
3458 if (Verbose && PrintGC) {
3459 size_t new_mem_size = _virtual_space.committed_size();
3460 size_t old_mem_size = new_mem_size + bytes;
3461 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3462 name(), old_mem_size/K, new_mem_size/K);
3463 }
3464 }
3466 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3467 assert_locked_or_safepoint(Heap_lock);
3468 size_t size = ReservedSpace::page_align_size_down(bytes);
3469 // Only shrink if a compaction was done so that all the free space
3470 // in the generation is in a contiguous block at the end.
3471 if (size > 0 && did_compact()) {
3472 shrink_by(size);
3473 }
3474 }
3476 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3477 assert_locked_or_safepoint(Heap_lock);
3478 bool result = _virtual_space.expand_by(bytes);
3479 if (result) {
3480 size_t new_word_size =
3481 heap_word_size(_virtual_space.committed_size());
3482 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3483 _bts->resize(new_word_size); // resize the block offset shared array
3484 Universe::heap()->barrier_set()->resize_covered_region(mr);
3485 // Hmmmm... why doesn't CFLS::set_end verify locking?
3486 // This is quite ugly; FIX ME XXX
3487 _cmsSpace->assert_locked(freelistLock());
3488 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3490 // update the space and generation capacity counters
3491 if (UsePerfData) {
3492 _space_counters->update_capacity();
3493 _gen_counters->update_all();
3494 }
3496 if (Verbose && PrintGC) {
3497 size_t new_mem_size = _virtual_space.committed_size();
3498 size_t old_mem_size = new_mem_size - bytes;
3499 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3500 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3501 }
3502 }
3503 return result;
3504 }
3506 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3507 assert_locked_or_safepoint(Heap_lock);
3508 bool success = true;
3509 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3510 if (remaining_bytes > 0) {
3511 success = grow_by(remaining_bytes);
3512 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3513 }
3514 return success;
3515 }
3517 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3518 assert_locked_or_safepoint(Heap_lock);
3519 assert_lock_strong(freelistLock());
3520 if (PrintGCDetails && Verbose) {
3521 warning("Shrinking of CMS not yet implemented");
3522 }
3523 return;
3524 }
3527 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3528 // phases.
3529 class CMSPhaseAccounting: public StackObj {
3530 public:
3531 CMSPhaseAccounting(CMSCollector *collector,
3532 const char *phase,
3533 const GCId gc_id,
3534 bool print_cr = true);
3535 ~CMSPhaseAccounting();
3537 private:
3538 CMSCollector *_collector;
3539 const char *_phase;
3540 elapsedTimer _wallclock;
3541 bool _print_cr;
3542 const GCId _gc_id;
3544 public:
3545 // Not MT-safe; so do not pass around these StackObj's
3546 // where they may be accessed by other threads.
3547 jlong wallclock_millis() {
3548 assert(_wallclock.is_active(), "Wall clock should not stop");
3549 _wallclock.stop(); // to record time
3550 jlong ret = _wallclock.milliseconds();
3551 _wallclock.start(); // restart
3552 return ret;
3553 }
3554 };
3556 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3557 const char *phase,
3558 const GCId gc_id,
3559 bool print_cr) :
3560 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
3562 if (PrintCMSStatistics != 0) {
3563 _collector->resetYields();
3564 }
3565 if (PrintGCDetails) {
3566 gclog_or_tty->gclog_stamp(_gc_id);
3567 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3568 _collector->cmsGen()->short_name(), _phase);
3569 }
3570 _collector->resetTimer();
3571 _wallclock.start();
3572 _collector->startTimer();
3573 }
3575 CMSPhaseAccounting::~CMSPhaseAccounting() {
3576 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3577 _collector->stopTimer();
3578 _wallclock.stop();
3579 if (PrintGCDetails) {
3580 gclog_or_tty->gclog_stamp(_gc_id);
3581 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3582 _collector->cmsGen()->short_name(),
3583 _phase, _collector->timerValue(), _wallclock.seconds());
3584 if (_print_cr) {
3585 gclog_or_tty->cr();
3586 }
3587 if (PrintCMSStatistics != 0) {
3588 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3589 _collector->yields());
3590 }
3591 }
3592 }
3594 // CMS work
3596 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3597 class CMSParMarkTask : public AbstractGangTask {
3598 protected:
3599 CMSCollector* _collector;
3600 int _n_workers;
3601 CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3602 AbstractGangTask(name),
3603 _collector(collector),
3604 _n_workers(n_workers) {}
3605 // Work method in support of parallel rescan ... of young gen spaces
3606 void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3607 ContiguousSpace* space,
3608 HeapWord** chunk_array, size_t chunk_top);
3609 void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3610 };
3612 // Parallel initial mark task
3613 class CMSParInitialMarkTask: public CMSParMarkTask {
3614 public:
3615 CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3616 CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3617 collector, n_workers) {}
3618 void work(uint worker_id);
3619 };
3621 // Checkpoint the roots into this generation from outside
3622 // this generation. [Note this initial checkpoint need only
3623 // be approximate -- we'll do a catch up phase subsequently.]
3624 void CMSCollector::checkpointRootsInitial(bool asynch) {
3625 assert(_collectorState == InitialMarking, "Wrong collector state");
3626 check_correct_thread_executing();
3627 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3629 save_heap_summary();
3630 report_heap_summary(GCWhen::BeforeGC);
3632 ReferenceProcessor* rp = ref_processor();
3633 SpecializationStats::clear();
3634 assert(_restart_addr == NULL, "Control point invariant");
3635 if (asynch) {
3636 // acquire locks for subsequent manipulations
3637 MutexLockerEx x(bitMapLock(),
3638 Mutex::_no_safepoint_check_flag);
3639 checkpointRootsInitialWork(asynch);
3640 // enable ("weak") refs discovery
3641 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3642 _collectorState = Marking;
3643 } else {
3644 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3645 // which recognizes if we are a CMS generation, and doesn't try to turn on
3646 // discovery; verify that they aren't meddling.
3647 assert(!rp->discovery_is_atomic(),
3648 "incorrect setting of discovery predicate");
3649 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3650 "ref discovery for this generation kind");
3651 // already have locks
3652 checkpointRootsInitialWork(asynch);
3653 // now enable ("weak") refs discovery
3654 rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3655 _collectorState = Marking;
3656 }
3657 SpecializationStats::print();
3658 }
3660 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3661 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3662 assert(_collectorState == InitialMarking, "just checking");
3664 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3665 // precede our marking with a collection of all
3666 // younger generations to keep floating garbage to a minimum.
3667 // XXX: we won't do this for now -- it's an optimization to be done later.
3669 // already have locks
3670 assert_lock_strong(bitMapLock());
3671 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3673 // Setup the verification and class unloading state for this
3674 // CMS collection cycle.
3675 setup_cms_unloading_and_verification_state();
3677 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3678 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3679 if (UseAdaptiveSizePolicy) {
3680 size_policy()->checkpoint_roots_initial_begin();
3681 }
3683 // Reset all the PLAB chunk arrays if necessary.
3684 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3685 reset_survivor_plab_arrays();
3686 }
3688 ResourceMark rm;
3689 HandleMark hm;
3691 FalseClosure falseClosure;
3692 // In the case of a synchronous collection, we will elide the
3693 // remark step, so it's important to catch all the nmethod oops
3694 // in this step.
3695 // The final 'true' flag to gen_process_strong_roots will ensure this.
3696 // If 'async' is true, we can relax the nmethod tracing.
3697 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3698 GenCollectedHeap* gch = GenCollectedHeap::heap();
3700 verify_work_stacks_empty();
3701 verify_overflow_empty();
3703 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3704 // Update the saved marks which may affect the root scans.
3705 gch->save_marks();
3707 // weak reference processing has not started yet.
3708 ref_processor()->set_enqueuing_is_done(false);
3710 // Need to remember all newly created CLDs,
3711 // so that we can guarantee that the remark finds them.
3712 ClassLoaderDataGraph::remember_new_clds(true);
3714 // Whenever a CLD is found, it will be claimed before proceeding to mark
3715 // the klasses. The claimed marks need to be cleared before marking starts.
3716 ClassLoaderDataGraph::clear_claimed_marks();
3718 if (CMSPrintEdenSurvivorChunks) {
3719 print_eden_and_survivor_chunk_arrays();
3720 }
3722 {
3723 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3724 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3725 // The parallel version.
3726 FlexibleWorkGang* workers = gch->workers();
3727 assert(workers != NULL, "Need parallel worker threads.");
3728 int n_workers = workers->active_workers();
3729 CMSParInitialMarkTask tsk(this, n_workers);
3730 gch->set_par_threads(n_workers);
3731 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3732 if (n_workers > 1) {
3733 GenCollectedHeap::StrongRootsScope srs(gch);
3734 workers->run_task(&tsk);
3735 } else {
3736 GenCollectedHeap::StrongRootsScope srs(gch);
3737 tsk.work(0);
3738 }
3739 gch->set_par_threads(0);
3740 } else {
3741 // The serial version.
3742 CMKlassClosure klass_closure(¬Older);
3743 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3744 gch->gen_process_strong_roots(_cmsGen->level(),
3745 true, // younger gens are roots
3746 true, // activate StrongRootsScope
3747 false, // not scavenging
3748 SharedHeap::ScanningOption(roots_scanning_options()),
3749 ¬Older,
3750 true, // walk all of code cache if (so & SO_CodeCache)
3751 NULL,
3752 &klass_closure);
3753 }
3754 }
3756 // Clear mod-union table; it will be dirtied in the prologue of
3757 // CMS generation per each younger generation collection.
3759 assert(_modUnionTable.isAllClear(),
3760 "Was cleared in most recent final checkpoint phase"
3761 " or no bits are set in the gc_prologue before the start of the next "
3762 "subsequent marking phase.");
3764 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3766 // Save the end of the used_region of the constituent generations
3767 // to be used to limit the extent of sweep in each generation.
3768 save_sweep_limits();
3769 if (UseAdaptiveSizePolicy) {
3770 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3771 }
3772 verify_overflow_empty();
3773 }
3775 bool CMSCollector::markFromRoots(bool asynch) {
3776 // we might be tempted to assert that:
3777 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3778 // "inconsistent argument?");
3779 // However that wouldn't be right, because it's possible that
3780 // a safepoint is indeed in progress as a younger generation
3781 // stop-the-world GC happens even as we mark in this generation.
3782 assert(_collectorState == Marking, "inconsistent state?");
3783 check_correct_thread_executing();
3784 verify_overflow_empty();
3786 bool res;
3787 if (asynch) {
3789 // Start the timers for adaptive size policy for the concurrent phases
3790 // Do it here so that the foreground MS can use the concurrent
3791 // timer since a foreground MS might has the sweep done concurrently
3792 // or STW.
3793 if (UseAdaptiveSizePolicy) {
3794 size_policy()->concurrent_marking_begin();
3795 }
3797 // Weak ref discovery note: We may be discovering weak
3798 // refs in this generation concurrent (but interleaved) with
3799 // weak ref discovery by a younger generation collector.
3801 CMSTokenSyncWithLocks ts(true, bitMapLock());
3802 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3803 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3804 res = markFromRootsWork(asynch);
3805 if (res) {
3806 _collectorState = Precleaning;
3807 } else { // We failed and a foreground collection wants to take over
3808 assert(_foregroundGCIsActive, "internal state inconsistency");
3809 assert(_restart_addr == NULL, "foreground will restart from scratch");
3810 if (PrintGCDetails) {
3811 gclog_or_tty->print_cr("bailing out to foreground collection");
3812 }
3813 }
3814 if (UseAdaptiveSizePolicy) {
3815 size_policy()->concurrent_marking_end();
3816 }
3817 } else {
3818 assert(SafepointSynchronize::is_at_safepoint(),
3819 "inconsistent with asynch == false");
3820 if (UseAdaptiveSizePolicy) {
3821 size_policy()->ms_collection_marking_begin();
3822 }
3823 // already have locks
3824 res = markFromRootsWork(asynch);
3825 _collectorState = FinalMarking;
3826 if (UseAdaptiveSizePolicy) {
3827 GenCollectedHeap* gch = GenCollectedHeap::heap();
3828 size_policy()->ms_collection_marking_end(gch->gc_cause());
3829 }
3830 }
3831 verify_overflow_empty();
3832 return res;
3833 }
3835 bool CMSCollector::markFromRootsWork(bool asynch) {
3836 // iterate over marked bits in bit map, doing a full scan and mark
3837 // from these roots using the following algorithm:
3838 // . if oop is to the right of the current scan pointer,
3839 // mark corresponding bit (we'll process it later)
3840 // . else (oop is to left of current scan pointer)
3841 // push oop on marking stack
3842 // . drain the marking stack
3844 // Note that when we do a marking step we need to hold the
3845 // bit map lock -- recall that direct allocation (by mutators)
3846 // and promotion (by younger generation collectors) is also
3847 // marking the bit map. [the so-called allocate live policy.]
3848 // Because the implementation of bit map marking is not
3849 // robust wrt simultaneous marking of bits in the same word,
3850 // we need to make sure that there is no such interference
3851 // between concurrent such updates.
3853 // already have locks
3854 assert_lock_strong(bitMapLock());
3856 verify_work_stacks_empty();
3857 verify_overflow_empty();
3858 bool result = false;
3859 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3860 result = do_marking_mt(asynch);
3861 } else {
3862 result = do_marking_st(asynch);
3863 }
3864 return result;
3865 }
3867 // Forward decl
3868 class CMSConcMarkingTask;
3870 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3871 CMSCollector* _collector;
3872 CMSConcMarkingTask* _task;
3873 public:
3874 virtual void yield();
3876 // "n_threads" is the number of threads to be terminated.
3877 // "queue_set" is a set of work queues of other threads.
3878 // "collector" is the CMS collector associated with this task terminator.
3879 // "yield" indicates whether we need the gang as a whole to yield.
3880 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3881 ParallelTaskTerminator(n_threads, queue_set),
3882 _collector(collector) { }
3884 void set_task(CMSConcMarkingTask* task) {
3885 _task = task;
3886 }
3887 };
3889 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3890 CMSConcMarkingTask* _task;
3891 public:
3892 bool should_exit_termination();
3893 void set_task(CMSConcMarkingTask* task) {
3894 _task = task;
3895 }
3896 };
3898 // MT Concurrent Marking Task
3899 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3900 CMSCollector* _collector;
3901 int _n_workers; // requested/desired # workers
3902 bool _asynch;
3903 bool _result;
3904 CompactibleFreeListSpace* _cms_space;
3905 char _pad_front[64]; // padding to ...
3906 HeapWord* _global_finger; // ... avoid sharing cache line
3907 char _pad_back[64];
3908 HeapWord* _restart_addr;
3910 // Exposed here for yielding support
3911 Mutex* const _bit_map_lock;
3913 // The per thread work queues, available here for stealing
3914 OopTaskQueueSet* _task_queues;
3916 // Termination (and yielding) support
3917 CMSConcMarkingTerminator _term;
3918 CMSConcMarkingTerminatorTerminator _term_term;
3920 public:
3921 CMSConcMarkingTask(CMSCollector* collector,
3922 CompactibleFreeListSpace* cms_space,
3923 bool asynch,
3924 YieldingFlexibleWorkGang* workers,
3925 OopTaskQueueSet* task_queues):
3926 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3927 _collector(collector),
3928 _cms_space(cms_space),
3929 _asynch(asynch), _n_workers(0), _result(true),
3930 _task_queues(task_queues),
3931 _term(_n_workers, task_queues, _collector),
3932 _bit_map_lock(collector->bitMapLock())
3933 {
3934 _requested_size = _n_workers;
3935 _term.set_task(this);
3936 _term_term.set_task(this);
3937 _restart_addr = _global_finger = _cms_space->bottom();
3938 }
3941 OopTaskQueueSet* task_queues() { return _task_queues; }
3943 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3945 HeapWord** global_finger_addr() { return &_global_finger; }
3947 CMSConcMarkingTerminator* terminator() { return &_term; }
3949 virtual void set_for_termination(int active_workers) {
3950 terminator()->reset_for_reuse(active_workers);
3951 }
3953 void work(uint worker_id);
3954 bool should_yield() {
3955 return ConcurrentMarkSweepThread::should_yield()
3956 && !_collector->foregroundGCIsActive()
3957 && _asynch;
3958 }
3960 virtual void coordinator_yield(); // stuff done by coordinator
3961 bool result() { return _result; }
3963 void reset(HeapWord* ra) {
3964 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3965 _restart_addr = _global_finger = ra;
3966 _term.reset_for_reuse();
3967 }
3969 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3970 OopTaskQueue* work_q);
3972 private:
3973 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3974 void do_work_steal(int i);
3975 void bump_global_finger(HeapWord* f);
3976 };
3978 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3979 assert(_task != NULL, "Error");
3980 return _task->yielding();
3981 // Note that we do not need the disjunct || _task->should_yield() above
3982 // because we want terminating threads to yield only if the task
3983 // is already in the midst of yielding, which happens only after at least one
3984 // thread has yielded.
3985 }
3987 void CMSConcMarkingTerminator::yield() {
3988 if (_task->should_yield()) {
3989 _task->yield();
3990 } else {
3991 ParallelTaskTerminator::yield();
3992 }
3993 }
3995 ////////////////////////////////////////////////////////////////
3996 // Concurrent Marking Algorithm Sketch
3997 ////////////////////////////////////////////////////////////////
3998 // Until all tasks exhausted (both spaces):
3999 // -- claim next available chunk
4000 // -- bump global finger via CAS
4001 // -- find first object that starts in this chunk
4002 // and start scanning bitmap from that position
4003 // -- scan marked objects for oops
4004 // -- CAS-mark target, and if successful:
4005 // . if target oop is above global finger (volatile read)
4006 // nothing to do
4007 // . if target oop is in chunk and above local finger
4008 // then nothing to do
4009 // . else push on work-queue
4010 // -- Deal with possible overflow issues:
4011 // . local work-queue overflow causes stuff to be pushed on
4012 // global (common) overflow queue
4013 // . always first empty local work queue
4014 // . then get a batch of oops from global work queue if any
4015 // . then do work stealing
4016 // -- When all tasks claimed (both spaces)
4017 // and local work queue empty,
4018 // then in a loop do:
4019 // . check global overflow stack; steal a batch of oops and trace
4020 // . try to steal from other threads oif GOS is empty
4021 // . if neither is available, offer termination
4022 // -- Terminate and return result
4023 //
4024 void CMSConcMarkingTask::work(uint worker_id) {
4025 elapsedTimer _timer;
4026 ResourceMark rm;
4027 HandleMark hm;
4029 DEBUG_ONLY(_collector->verify_overflow_empty();)
4031 // Before we begin work, our work queue should be empty
4032 assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4033 // Scan the bitmap covering _cms_space, tracing through grey objects.
4034 _timer.start();
4035 do_scan_and_mark(worker_id, _cms_space);
4036 _timer.stop();
4037 if (PrintCMSStatistics != 0) {
4038 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4039 worker_id, _timer.seconds());
4040 // XXX: need xxx/xxx type of notation, two timers
4041 }
4043 // ... do work stealing
4044 _timer.reset();
4045 _timer.start();
4046 do_work_steal(worker_id);
4047 _timer.stop();
4048 if (PrintCMSStatistics != 0) {
4049 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4050 worker_id, _timer.seconds());
4051 // XXX: need xxx/xxx type of notation, two timers
4052 }
4053 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4054 assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4055 // Note that under the current task protocol, the
4056 // following assertion is true even of the spaces
4057 // expanded since the completion of the concurrent
4058 // marking. XXX This will likely change under a strict
4059 // ABORT semantics.
4060 // After perm removal the comparison was changed to
4061 // greater than or equal to from strictly greater than.
4062 // Before perm removal the highest address sweep would
4063 // have been at the end of perm gen but now is at the
4064 // end of the tenured gen.
4065 assert(_global_finger >= _cms_space->end(),
4066 "All tasks have been completed");
4067 DEBUG_ONLY(_collector->verify_overflow_empty();)
4068 }
4070 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4071 HeapWord* read = _global_finger;
4072 HeapWord* cur = read;
4073 while (f > read) {
4074 cur = read;
4075 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4076 if (cur == read) {
4077 // our cas succeeded
4078 assert(_global_finger >= f, "protocol consistency");
4079 break;
4080 }
4081 }
4082 }
4084 // This is really inefficient, and should be redone by
4085 // using (not yet available) block-read and -write interfaces to the
4086 // stack and the work_queue. XXX FIX ME !!!
4087 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4088 OopTaskQueue* work_q) {
4089 // Fast lock-free check
4090 if (ovflw_stk->length() == 0) {
4091 return false;
4092 }
4093 assert(work_q->size() == 0, "Shouldn't steal");
4094 MutexLockerEx ml(ovflw_stk->par_lock(),
4095 Mutex::_no_safepoint_check_flag);
4096 // Grab up to 1/4 the size of the work queue
4097 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4098 (size_t)ParGCDesiredObjsFromOverflowList);
4099 num = MIN2(num, ovflw_stk->length());
4100 for (int i = (int) num; i > 0; i--) {
4101 oop cur = ovflw_stk->pop();
4102 assert(cur != NULL, "Counted wrong?");
4103 work_q->push(cur);
4104 }
4105 return num > 0;
4106 }
4108 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4109 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4110 int n_tasks = pst->n_tasks();
4111 // We allow that there may be no tasks to do here because
4112 // we are restarting after a stack overflow.
4113 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4114 uint nth_task = 0;
4116 HeapWord* aligned_start = sp->bottom();
4117 if (sp->used_region().contains(_restart_addr)) {
4118 // Align down to a card boundary for the start of 0th task
4119 // for this space.
4120 aligned_start =
4121 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4122 CardTableModRefBS::card_size);
4123 }
4125 size_t chunk_size = sp->marking_task_size();
4126 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4127 // Having claimed the nth task in this space,
4128 // compute the chunk that it corresponds to:
4129 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4130 aligned_start + (nth_task+1)*chunk_size);
4131 // Try and bump the global finger via a CAS;
4132 // note that we need to do the global finger bump
4133 // _before_ taking the intersection below, because
4134 // the task corresponding to that region will be
4135 // deemed done even if the used_region() expands
4136 // because of allocation -- as it almost certainly will
4137 // during start-up while the threads yield in the
4138 // closure below.
4139 HeapWord* finger = span.end();
4140 bump_global_finger(finger); // atomically
4141 // There are null tasks here corresponding to chunks
4142 // beyond the "top" address of the space.
4143 span = span.intersection(sp->used_region());
4144 if (!span.is_empty()) { // Non-null task
4145 HeapWord* prev_obj;
4146 assert(!span.contains(_restart_addr) || nth_task == 0,
4147 "Inconsistency");
4148 if (nth_task == 0) {
4149 // For the 0th task, we'll not need to compute a block_start.
4150 if (span.contains(_restart_addr)) {
4151 // In the case of a restart because of stack overflow,
4152 // we might additionally skip a chunk prefix.
4153 prev_obj = _restart_addr;
4154 } else {
4155 prev_obj = span.start();
4156 }
4157 } else {
4158 // We want to skip the first object because
4159 // the protocol is to scan any object in its entirety
4160 // that _starts_ in this span; a fortiori, any
4161 // object starting in an earlier span is scanned
4162 // as part of an earlier claimed task.
4163 // Below we use the "careful" version of block_start
4164 // so we do not try to navigate uninitialized objects.
4165 prev_obj = sp->block_start_careful(span.start());
4166 // Below we use a variant of block_size that uses the
4167 // Printezis bits to avoid waiting for allocated
4168 // objects to become initialized/parsable.
4169 while (prev_obj < span.start()) {
4170 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4171 if (sz > 0) {
4172 prev_obj += sz;
4173 } else {
4174 // In this case we may end up doing a bit of redundant
4175 // scanning, but that appears unavoidable, short of
4176 // locking the free list locks; see bug 6324141.
4177 break;
4178 }
4179 }
4180 }
4181 if (prev_obj < span.end()) {
4182 MemRegion my_span = MemRegion(prev_obj, span.end());
4183 // Do the marking work within a non-empty span --
4184 // the last argument to the constructor indicates whether the
4185 // iteration should be incremental with periodic yields.
4186 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4187 &_collector->_markBitMap,
4188 work_queue(i),
4189 &_collector->_markStack,
4190 _asynch);
4191 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4192 } // else nothing to do for this task
4193 } // else nothing to do for this task
4194 }
4195 // We'd be tempted to assert here that since there are no
4196 // more tasks left to claim in this space, the global_finger
4197 // must exceed space->top() and a fortiori space->end(). However,
4198 // that would not quite be correct because the bumping of
4199 // global_finger occurs strictly after the claiming of a task,
4200 // so by the time we reach here the global finger may not yet
4201 // have been bumped up by the thread that claimed the last
4202 // task.
4203 pst->all_tasks_completed();
4204 }
4206 class Par_ConcMarkingClosure: public CMSOopClosure {
4207 private:
4208 CMSCollector* _collector;
4209 CMSConcMarkingTask* _task;
4210 MemRegion _span;
4211 CMSBitMap* _bit_map;
4212 CMSMarkStack* _overflow_stack;
4213 OopTaskQueue* _work_queue;
4214 protected:
4215 DO_OOP_WORK_DEFN
4216 public:
4217 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4218 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4219 CMSOopClosure(collector->ref_processor()),
4220 _collector(collector),
4221 _task(task),
4222 _span(collector->_span),
4223 _work_queue(work_queue),
4224 _bit_map(bit_map),
4225 _overflow_stack(overflow_stack)
4226 { }
4227 virtual void do_oop(oop* p);
4228 virtual void do_oop(narrowOop* p);
4230 void trim_queue(size_t max);
4231 void handle_stack_overflow(HeapWord* lost);
4232 void do_yield_check() {
4233 if (_task->should_yield()) {
4234 _task->yield();
4235 }
4236 }
4237 };
4239 // Grey object scanning during work stealing phase --
4240 // the salient assumption here is that any references
4241 // that are in these stolen objects being scanned must
4242 // already have been initialized (else they would not have
4243 // been published), so we do not need to check for
4244 // uninitialized objects before pushing here.
4245 void Par_ConcMarkingClosure::do_oop(oop obj) {
4246 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4247 HeapWord* addr = (HeapWord*)obj;
4248 // Check if oop points into the CMS generation
4249 // and is not marked
4250 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4251 // a white object ...
4252 // If we manage to "claim" the object, by being the
4253 // first thread to mark it, then we push it on our
4254 // marking stack
4255 if (_bit_map->par_mark(addr)) { // ... now grey
4256 // push on work queue (grey set)
4257 bool simulate_overflow = false;
4258 NOT_PRODUCT(
4259 if (CMSMarkStackOverflowALot &&
4260 _collector->simulate_overflow()) {
4261 // simulate a stack overflow
4262 simulate_overflow = true;
4263 }
4264 )
4265 if (simulate_overflow ||
4266 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4267 // stack overflow
4268 if (PrintCMSStatistics != 0) {
4269 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4270 SIZE_FORMAT, _overflow_stack->capacity());
4271 }
4272 // We cannot assert that the overflow stack is full because
4273 // it may have been emptied since.
4274 assert(simulate_overflow ||
4275 _work_queue->size() == _work_queue->max_elems(),
4276 "Else push should have succeeded");
4277 handle_stack_overflow(addr);
4278 }
4279 } // Else, some other thread got there first
4280 do_yield_check();
4281 }
4282 }
4284 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4285 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4287 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4288 while (_work_queue->size() > max) {
4289 oop new_oop;
4290 if (_work_queue->pop_local(new_oop)) {
4291 assert(new_oop->is_oop(), "Should be an oop");
4292 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4293 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4294 new_oop->oop_iterate(this); // do_oop() above
4295 do_yield_check();
4296 }
4297 }
4298 }
4300 // Upon stack overflow, we discard (part of) the stack,
4301 // remembering the least address amongst those discarded
4302 // in CMSCollector's _restart_address.
4303 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4304 // We need to do this under a mutex to prevent other
4305 // workers from interfering with the work done below.
4306 MutexLockerEx ml(_overflow_stack->par_lock(),
4307 Mutex::_no_safepoint_check_flag);
4308 // Remember the least grey address discarded
4309 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4310 _collector->lower_restart_addr(ra);
4311 _overflow_stack->reset(); // discard stack contents
4312 _overflow_stack->expand(); // expand the stack if possible
4313 }
4316 void CMSConcMarkingTask::do_work_steal(int i) {
4317 OopTaskQueue* work_q = work_queue(i);
4318 oop obj_to_scan;
4319 CMSBitMap* bm = &(_collector->_markBitMap);
4320 CMSMarkStack* ovflw = &(_collector->_markStack);
4321 int* seed = _collector->hash_seed(i);
4322 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4323 while (true) {
4324 cl.trim_queue(0);
4325 assert(work_q->size() == 0, "Should have been emptied above");
4326 if (get_work_from_overflow_stack(ovflw, work_q)) {
4327 // Can't assert below because the work obtained from the
4328 // overflow stack may already have been stolen from us.
4329 // assert(work_q->size() > 0, "Work from overflow stack");
4330 continue;
4331 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4332 assert(obj_to_scan->is_oop(), "Should be an oop");
4333 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4334 obj_to_scan->oop_iterate(&cl);
4335 } else if (terminator()->offer_termination(&_term_term)) {
4336 assert(work_q->size() == 0, "Impossible!");
4337 break;
4338 } else if (yielding() || should_yield()) {
4339 yield();
4340 }
4341 }
4342 }
4344 // This is run by the CMS (coordinator) thread.
4345 void CMSConcMarkingTask::coordinator_yield() {
4346 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4347 "CMS thread should hold CMS token");
4348 // First give up the locks, then yield, then re-lock
4349 // We should probably use a constructor/destructor idiom to
4350 // do this unlock/lock or modify the MutexUnlocker class to
4351 // serve our purpose. XXX
4352 assert_lock_strong(_bit_map_lock);
4353 _bit_map_lock->unlock();
4354 ConcurrentMarkSweepThread::desynchronize(true);
4355 ConcurrentMarkSweepThread::acknowledge_yield_request();
4356 _collector->stopTimer();
4357 if (PrintCMSStatistics != 0) {
4358 _collector->incrementYields();
4359 }
4360 _collector->icms_wait();
4362 // It is possible for whichever thread initiated the yield request
4363 // not to get a chance to wake up and take the bitmap lock between
4364 // this thread releasing it and reacquiring it. So, while the
4365 // should_yield() flag is on, let's sleep for a bit to give the
4366 // other thread a chance to wake up. The limit imposed on the number
4367 // of iterations is defensive, to avoid any unforseen circumstances
4368 // putting us into an infinite loop. Since it's always been this
4369 // (coordinator_yield()) method that was observed to cause the
4370 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4371 // which is by default non-zero. For the other seven methods that
4372 // also perform the yield operation, as are using a different
4373 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4374 // can enable the sleeping for those methods too, if necessary.
4375 // See 6442774.
4376 //
4377 // We really need to reconsider the synchronization between the GC
4378 // thread and the yield-requesting threads in the future and we
4379 // should really use wait/notify, which is the recommended
4380 // way of doing this type of interaction. Additionally, we should
4381 // consolidate the eight methods that do the yield operation and they
4382 // are almost identical into one for better maintenability and
4383 // readability. See 6445193.
4384 //
4385 // Tony 2006.06.29
4386 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4387 ConcurrentMarkSweepThread::should_yield() &&
4388 !CMSCollector::foregroundGCIsActive(); ++i) {
4389 os::sleep(Thread::current(), 1, false);
4390 ConcurrentMarkSweepThread::acknowledge_yield_request();
4391 }
4393 ConcurrentMarkSweepThread::synchronize(true);
4394 _bit_map_lock->lock_without_safepoint_check();
4395 _collector->startTimer();
4396 }
4398 bool CMSCollector::do_marking_mt(bool asynch) {
4399 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4400 int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4401 conc_workers()->total_workers(),
4402 conc_workers()->active_workers(),
4403 Threads::number_of_non_daemon_threads());
4404 conc_workers()->set_active_workers(num_workers);
4406 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4408 CMSConcMarkingTask tsk(this,
4409 cms_space,
4410 asynch,
4411 conc_workers(),
4412 task_queues());
4414 // Since the actual number of workers we get may be different
4415 // from the number we requested above, do we need to do anything different
4416 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4417 // class?? XXX
4418 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4420 // Refs discovery is already non-atomic.
4421 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4422 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4423 conc_workers()->start_task(&tsk);
4424 while (tsk.yielded()) {
4425 tsk.coordinator_yield();
4426 conc_workers()->continue_task(&tsk);
4427 }
4428 // If the task was aborted, _restart_addr will be non-NULL
4429 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4430 while (_restart_addr != NULL) {
4431 // XXX For now we do not make use of ABORTED state and have not
4432 // yet implemented the right abort semantics (even in the original
4433 // single-threaded CMS case). That needs some more investigation
4434 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4435 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4436 // If _restart_addr is non-NULL, a marking stack overflow
4437 // occurred; we need to do a fresh marking iteration from the
4438 // indicated restart address.
4439 if (_foregroundGCIsActive && asynch) {
4440 // We may be running into repeated stack overflows, having
4441 // reached the limit of the stack size, while making very
4442 // slow forward progress. It may be best to bail out and
4443 // let the foreground collector do its job.
4444 // Clear _restart_addr, so that foreground GC
4445 // works from scratch. This avoids the headache of
4446 // a "rescan" which would otherwise be needed because
4447 // of the dirty mod union table & card table.
4448 _restart_addr = NULL;
4449 return false;
4450 }
4451 // Adjust the task to restart from _restart_addr
4452 tsk.reset(_restart_addr);
4453 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4454 _restart_addr);
4455 _restart_addr = NULL;
4456 // Get the workers going again
4457 conc_workers()->start_task(&tsk);
4458 while (tsk.yielded()) {
4459 tsk.coordinator_yield();
4460 conc_workers()->continue_task(&tsk);
4461 }
4462 }
4463 assert(tsk.completed(), "Inconsistency");
4464 assert(tsk.result() == true, "Inconsistency");
4465 return true;
4466 }
4468 bool CMSCollector::do_marking_st(bool asynch) {
4469 ResourceMark rm;
4470 HandleMark hm;
4472 // Temporarily make refs discovery single threaded (non-MT)
4473 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4474 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4475 &_markStack, CMSYield && asynch);
4476 // the last argument to iterate indicates whether the iteration
4477 // should be incremental with periodic yields.
4478 _markBitMap.iterate(&markFromRootsClosure);
4479 // If _restart_addr is non-NULL, a marking stack overflow
4480 // occurred; we need to do a fresh iteration from the
4481 // indicated restart address.
4482 while (_restart_addr != NULL) {
4483 if (_foregroundGCIsActive && asynch) {
4484 // We may be running into repeated stack overflows, having
4485 // reached the limit of the stack size, while making very
4486 // slow forward progress. It may be best to bail out and
4487 // let the foreground collector do its job.
4488 // Clear _restart_addr, so that foreground GC
4489 // works from scratch. This avoids the headache of
4490 // a "rescan" which would otherwise be needed because
4491 // of the dirty mod union table & card table.
4492 _restart_addr = NULL;
4493 return false; // indicating failure to complete marking
4494 }
4495 // Deal with stack overflow:
4496 // we restart marking from _restart_addr
4497 HeapWord* ra = _restart_addr;
4498 markFromRootsClosure.reset(ra);
4499 _restart_addr = NULL;
4500 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4501 }
4502 return true;
4503 }
4505 void CMSCollector::preclean() {
4506 check_correct_thread_executing();
4507 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4508 verify_work_stacks_empty();
4509 verify_overflow_empty();
4510 _abort_preclean = false;
4511 if (CMSPrecleaningEnabled) {
4512 if (!CMSEdenChunksRecordAlways) {
4513 _eden_chunk_index = 0;
4514 }
4515 size_t used = get_eden_used();
4516 size_t capacity = get_eden_capacity();
4517 // Don't start sampling unless we will get sufficiently
4518 // many samples.
4519 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4520 * CMSScheduleRemarkEdenPenetration)) {
4521 _start_sampling = true;
4522 } else {
4523 _start_sampling = false;
4524 }
4525 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4526 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4527 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4528 }
4529 CMSTokenSync x(true); // is cms thread
4530 if (CMSPrecleaningEnabled) {
4531 sample_eden();
4532 _collectorState = AbortablePreclean;
4533 } else {
4534 _collectorState = FinalMarking;
4535 }
4536 verify_work_stacks_empty();
4537 verify_overflow_empty();
4538 }
4540 // Try and schedule the remark such that young gen
4541 // occupancy is CMSScheduleRemarkEdenPenetration %.
4542 void CMSCollector::abortable_preclean() {
4543 check_correct_thread_executing();
4544 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4545 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4547 // If Eden's current occupancy is below this threshold,
4548 // immediately schedule the remark; else preclean
4549 // past the next scavenge in an effort to
4550 // schedule the pause as described avove. By choosing
4551 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4552 // we will never do an actual abortable preclean cycle.
4553 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4554 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4555 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4556 // We need more smarts in the abortable preclean
4557 // loop below to deal with cases where allocation
4558 // in young gen is very very slow, and our precleaning
4559 // is running a losing race against a horde of
4560 // mutators intent on flooding us with CMS updates
4561 // (dirty cards).
4562 // One, admittedly dumb, strategy is to give up
4563 // after a certain number of abortable precleaning loops
4564 // or after a certain maximum time. We want to make
4565 // this smarter in the next iteration.
4566 // XXX FIX ME!!! YSR
4567 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4568 while (!(should_abort_preclean() ||
4569 ConcurrentMarkSweepThread::should_terminate())) {
4570 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4571 cumworkdone += workdone;
4572 loops++;
4573 // Voluntarily terminate abortable preclean phase if we have
4574 // been at it for too long.
4575 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4576 loops >= CMSMaxAbortablePrecleanLoops) {
4577 if (PrintGCDetails) {
4578 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4579 }
4580 break;
4581 }
4582 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4583 if (PrintGCDetails) {
4584 gclog_or_tty->print(" CMS: abort preclean due to time ");
4585 }
4586 break;
4587 }
4588 // If we are doing little work each iteration, we should
4589 // take a short break.
4590 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4591 // Sleep for some time, waiting for work to accumulate
4592 stopTimer();
4593 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4594 startTimer();
4595 waited++;
4596 }
4597 }
4598 if (PrintCMSStatistics > 0) {
4599 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4600 loops, waited, cumworkdone);
4601 }
4602 }
4603 CMSTokenSync x(true); // is cms thread
4604 if (_collectorState != Idling) {
4605 assert(_collectorState == AbortablePreclean,
4606 "Spontaneous state transition?");
4607 _collectorState = FinalMarking;
4608 } // Else, a foreground collection completed this CMS cycle.
4609 return;
4610 }
4612 // Respond to an Eden sampling opportunity
4613 void CMSCollector::sample_eden() {
4614 // Make sure a young gc cannot sneak in between our
4615 // reading and recording of a sample.
4616 assert(Thread::current()->is_ConcurrentGC_thread(),
4617 "Only the cms thread may collect Eden samples");
4618 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4619 "Should collect samples while holding CMS token");
4620 if (!_start_sampling) {
4621 return;
4622 }
4623 // When CMSEdenChunksRecordAlways is true, the eden chunk array
4624 // is populated by the young generation.
4625 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4626 if (_eden_chunk_index < _eden_chunk_capacity) {
4627 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4628 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4629 "Unexpected state of Eden");
4630 // We'd like to check that what we just sampled is an oop-start address;
4631 // however, we cannot do that here since the object may not yet have been
4632 // initialized. So we'll instead do the check when we _use_ this sample
4633 // later.
4634 if (_eden_chunk_index == 0 ||
4635 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4636 _eden_chunk_array[_eden_chunk_index-1])
4637 >= CMSSamplingGrain)) {
4638 _eden_chunk_index++; // commit sample
4639 }
4640 }
4641 }
4642 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4643 size_t used = get_eden_used();
4644 size_t capacity = get_eden_capacity();
4645 assert(used <= capacity, "Unexpected state of Eden");
4646 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4647 _abort_preclean = true;
4648 }
4649 }
4650 }
4653 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4654 assert(_collectorState == Precleaning ||
4655 _collectorState == AbortablePreclean, "incorrect state");
4656 ResourceMark rm;
4657 HandleMark hm;
4659 // Precleaning is currently not MT but the reference processor
4660 // may be set for MT. Disable it temporarily here.
4661 ReferenceProcessor* rp = ref_processor();
4662 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4664 // Do one pass of scrubbing the discovered reference lists
4665 // to remove any reference objects with strongly-reachable
4666 // referents.
4667 if (clean_refs) {
4668 CMSPrecleanRefsYieldClosure yield_cl(this);
4669 assert(rp->span().equals(_span), "Spans should be equal");
4670 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4671 &_markStack, true /* preclean */);
4672 CMSDrainMarkingStackClosure complete_trace(this,
4673 _span, &_markBitMap, &_markStack,
4674 &keep_alive, true /* preclean */);
4676 // We don't want this step to interfere with a young
4677 // collection because we don't want to take CPU
4678 // or memory bandwidth away from the young GC threads
4679 // (which may be as many as there are CPUs).
4680 // Note that we don't need to protect ourselves from
4681 // interference with mutators because they can't
4682 // manipulate the discovered reference lists nor affect
4683 // the computed reachability of the referents, the
4684 // only properties manipulated by the precleaning
4685 // of these reference lists.
4686 stopTimer();
4687 CMSTokenSyncWithLocks x(true /* is cms thread */,
4688 bitMapLock());
4689 startTimer();
4690 sample_eden();
4692 // The following will yield to allow foreground
4693 // collection to proceed promptly. XXX YSR:
4694 // The code in this method may need further
4695 // tweaking for better performance and some restructuring
4696 // for cleaner interfaces.
4697 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4698 rp->preclean_discovered_references(
4699 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4700 gc_timer, _gc_tracer_cm->gc_id());
4701 }
4703 if (clean_survivor) { // preclean the active survivor space(s)
4704 assert(_young_gen->kind() == Generation::DefNew ||
4705 _young_gen->kind() == Generation::ParNew ||
4706 _young_gen->kind() == Generation::ASParNew,
4707 "incorrect type for cast");
4708 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4709 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4710 &_markBitMap, &_modUnionTable,
4711 &_markStack, true /* precleaning phase */);
4712 stopTimer();
4713 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4714 bitMapLock());
4715 startTimer();
4716 unsigned int before_count =
4717 GenCollectedHeap::heap()->total_collections();
4718 SurvivorSpacePrecleanClosure
4719 sss_cl(this, _span, &_markBitMap, &_markStack,
4720 &pam_cl, before_count, CMSYield);
4721 dng->from()->object_iterate_careful(&sss_cl);
4722 dng->to()->object_iterate_careful(&sss_cl);
4723 }
4724 MarkRefsIntoAndScanClosure
4725 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4726 &_markStack, this, CMSYield,
4727 true /* precleaning phase */);
4728 // CAUTION: The following closure has persistent state that may need to
4729 // be reset upon a decrease in the sequence of addresses it
4730 // processes.
4731 ScanMarkedObjectsAgainCarefullyClosure
4732 smoac_cl(this, _span,
4733 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4735 // Preclean dirty cards in ModUnionTable and CardTable using
4736 // appropriate convergence criterion;
4737 // repeat CMSPrecleanIter times unless we find that
4738 // we are losing.
4739 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4740 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4741 "Bad convergence multiplier");
4742 assert(CMSPrecleanThreshold >= 100,
4743 "Unreasonably low CMSPrecleanThreshold");
4745 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4746 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4747 numIter < CMSPrecleanIter;
4748 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4749 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4750 if (Verbose && PrintGCDetails) {
4751 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4752 }
4753 // Either there are very few dirty cards, so re-mark
4754 // pause will be small anyway, or our pre-cleaning isn't
4755 // that much faster than the rate at which cards are being
4756 // dirtied, so we might as well stop and re-mark since
4757 // precleaning won't improve our re-mark time by much.
4758 if (curNumCards <= CMSPrecleanThreshold ||
4759 (numIter > 0 &&
4760 (curNumCards * CMSPrecleanDenominator >
4761 lastNumCards * CMSPrecleanNumerator))) {
4762 numIter++;
4763 cumNumCards += curNumCards;
4764 break;
4765 }
4766 }
4768 preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4770 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4771 cumNumCards += curNumCards;
4772 if (PrintGCDetails && PrintCMSStatistics != 0) {
4773 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4774 curNumCards, cumNumCards, numIter);
4775 }
4776 return cumNumCards; // as a measure of useful work done
4777 }
4779 // PRECLEANING NOTES:
4780 // Precleaning involves:
4781 // . reading the bits of the modUnionTable and clearing the set bits.
4782 // . For the cards corresponding to the set bits, we scan the
4783 // objects on those cards. This means we need the free_list_lock
4784 // so that we can safely iterate over the CMS space when scanning
4785 // for oops.
4786 // . When we scan the objects, we'll be both reading and setting
4787 // marks in the marking bit map, so we'll need the marking bit map.
4788 // . For protecting _collector_state transitions, we take the CGC_lock.
4789 // Note that any races in the reading of of card table entries by the
4790 // CMS thread on the one hand and the clearing of those entries by the
4791 // VM thread or the setting of those entries by the mutator threads on the
4792 // other are quite benign. However, for efficiency it makes sense to keep
4793 // the VM thread from racing with the CMS thread while the latter is
4794 // dirty card info to the modUnionTable. We therefore also use the
4795 // CGC_lock to protect the reading of the card table and the mod union
4796 // table by the CM thread.
4797 // . We run concurrently with mutator updates, so scanning
4798 // needs to be done carefully -- we should not try to scan
4799 // potentially uninitialized objects.
4800 //
4801 // Locking strategy: While holding the CGC_lock, we scan over and
4802 // reset a maximal dirty range of the mod union / card tables, then lock
4803 // the free_list_lock and bitmap lock to do a full marking, then
4804 // release these locks; and repeat the cycle. This allows for a
4805 // certain amount of fairness in the sharing of these locks between
4806 // the CMS collector on the one hand, and the VM thread and the
4807 // mutators on the other.
4809 // NOTE: preclean_mod_union_table() and preclean_card_table()
4810 // further below are largely identical; if you need to modify
4811 // one of these methods, please check the other method too.
4813 size_t CMSCollector::preclean_mod_union_table(
4814 ConcurrentMarkSweepGeneration* gen,
4815 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4816 verify_work_stacks_empty();
4817 verify_overflow_empty();
4819 // strategy: starting with the first card, accumulate contiguous
4820 // ranges of dirty cards; clear these cards, then scan the region
4821 // covered by these cards.
4823 // Since all of the MUT is committed ahead, we can just use
4824 // that, in case the generations expand while we are precleaning.
4825 // It might also be fine to just use the committed part of the
4826 // generation, but we might potentially miss cards when the
4827 // generation is rapidly expanding while we are in the midst
4828 // of precleaning.
4829 HeapWord* startAddr = gen->reserved().start();
4830 HeapWord* endAddr = gen->reserved().end();
4832 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4834 size_t numDirtyCards, cumNumDirtyCards;
4835 HeapWord *nextAddr, *lastAddr;
4836 for (cumNumDirtyCards = numDirtyCards = 0,
4837 nextAddr = lastAddr = startAddr;
4838 nextAddr < endAddr;
4839 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4841 ResourceMark rm;
4842 HandleMark hm;
4844 MemRegion dirtyRegion;
4845 {
4846 stopTimer();
4847 // Potential yield point
4848 CMSTokenSync ts(true);
4849 startTimer();
4850 sample_eden();
4851 // Get dirty region starting at nextOffset (inclusive),
4852 // simultaneously clearing it.
4853 dirtyRegion =
4854 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4855 assert(dirtyRegion.start() >= nextAddr,
4856 "returned region inconsistent?");
4857 }
4858 // Remember where the next search should begin.
4859 // The returned region (if non-empty) is a right open interval,
4860 // so lastOffset is obtained from the right end of that
4861 // interval.
4862 lastAddr = dirtyRegion.end();
4863 // Should do something more transparent and less hacky XXX
4864 numDirtyCards =
4865 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4867 // We'll scan the cards in the dirty region (with periodic
4868 // yields for foreground GC as needed).
4869 if (!dirtyRegion.is_empty()) {
4870 assert(numDirtyCards > 0, "consistency check");
4871 HeapWord* stop_point = NULL;
4872 stopTimer();
4873 // Potential yield point
4874 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4875 bitMapLock());
4876 startTimer();
4877 {
4878 verify_work_stacks_empty();
4879 verify_overflow_empty();
4880 sample_eden();
4881 stop_point =
4882 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4883 }
4884 if (stop_point != NULL) {
4885 // The careful iteration stopped early either because it found an
4886 // uninitialized object, or because we were in the midst of an
4887 // "abortable preclean", which should now be aborted. Redirty
4888 // the bits corresponding to the partially-scanned or unscanned
4889 // cards. We'll either restart at the next block boundary or
4890 // abort the preclean.
4891 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4892 "Should only be AbortablePreclean.");
4893 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4894 if (should_abort_preclean()) {
4895 break; // out of preclean loop
4896 } else {
4897 // Compute the next address at which preclean should pick up;
4898 // might need bitMapLock in order to read P-bits.
4899 lastAddr = next_card_start_after_block(stop_point);
4900 }
4901 }
4902 } else {
4903 assert(lastAddr == endAddr, "consistency check");
4904 assert(numDirtyCards == 0, "consistency check");
4905 break;
4906 }
4907 }
4908 verify_work_stacks_empty();
4909 verify_overflow_empty();
4910 return cumNumDirtyCards;
4911 }
4913 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4914 // below are largely identical; if you need to modify
4915 // one of these methods, please check the other method too.
4917 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4918 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4919 // strategy: it's similar to precleamModUnionTable above, in that
4920 // we accumulate contiguous ranges of dirty cards, mark these cards
4921 // precleaned, then scan the region covered by these cards.
4922 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4923 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4925 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4927 size_t numDirtyCards, cumNumDirtyCards;
4928 HeapWord *lastAddr, *nextAddr;
4930 for (cumNumDirtyCards = numDirtyCards = 0,
4931 nextAddr = lastAddr = startAddr;
4932 nextAddr < endAddr;
4933 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4935 ResourceMark rm;
4936 HandleMark hm;
4938 MemRegion dirtyRegion;
4939 {
4940 // See comments in "Precleaning notes" above on why we
4941 // do this locking. XXX Could the locking overheads be
4942 // too high when dirty cards are sparse? [I don't think so.]
4943 stopTimer();
4944 CMSTokenSync x(true); // is cms thread
4945 startTimer();
4946 sample_eden();
4947 // Get and clear dirty region from card table
4948 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4949 MemRegion(nextAddr, endAddr),
4950 true,
4951 CardTableModRefBS::precleaned_card_val());
4953 assert(dirtyRegion.start() >= nextAddr,
4954 "returned region inconsistent?");
4955 }
4956 lastAddr = dirtyRegion.end();
4957 numDirtyCards =
4958 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4960 if (!dirtyRegion.is_empty()) {
4961 stopTimer();
4962 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4963 startTimer();
4964 sample_eden();
4965 verify_work_stacks_empty();
4966 verify_overflow_empty();
4967 HeapWord* stop_point =
4968 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4969 if (stop_point != NULL) {
4970 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4971 "Should only be AbortablePreclean.");
4972 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4973 if (should_abort_preclean()) {
4974 break; // out of preclean loop
4975 } else {
4976 // Compute the next address at which preclean should pick up.
4977 lastAddr = next_card_start_after_block(stop_point);
4978 }
4979 }
4980 } else {
4981 break;
4982 }
4983 }
4984 verify_work_stacks_empty();
4985 verify_overflow_empty();
4986 return cumNumDirtyCards;
4987 }
4989 class PrecleanKlassClosure : public KlassClosure {
4990 CMKlassClosure _cm_klass_closure;
4991 public:
4992 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4993 void do_klass(Klass* k) {
4994 if (k->has_accumulated_modified_oops()) {
4995 k->clear_accumulated_modified_oops();
4997 _cm_klass_closure.do_klass(k);
4998 }
4999 }
5000 };
5002 // The freelist lock is needed to prevent asserts, is it really needed?
5003 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5005 cl->set_freelistLock(freelistLock);
5007 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5009 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5010 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5011 PrecleanKlassClosure preclean_klass_closure(cl);
5012 ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5014 verify_work_stacks_empty();
5015 verify_overflow_empty();
5016 }
5018 void CMSCollector::checkpointRootsFinal(bool asynch,
5019 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5020 assert(_collectorState == FinalMarking, "incorrect state transition?");
5021 check_correct_thread_executing();
5022 // world is stopped at this checkpoint
5023 assert(SafepointSynchronize::is_at_safepoint(),
5024 "world should be stopped");
5025 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5027 verify_work_stacks_empty();
5028 verify_overflow_empty();
5030 SpecializationStats::clear();
5031 if (PrintGCDetails) {
5032 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5033 _young_gen->used() / K,
5034 _young_gen->capacity() / K);
5035 }
5036 if (asynch) {
5037 if (CMSScavengeBeforeRemark) {
5038 GenCollectedHeap* gch = GenCollectedHeap::heap();
5039 // Temporarily set flag to false, GCH->do_collection will
5040 // expect it to be false and set to true
5041 FlagSetting fl(gch->_is_gc_active, false);
5042 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5043 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5044 int level = _cmsGen->level() - 1;
5045 if (level >= 0) {
5046 gch->do_collection(true, // full (i.e. force, see below)
5047 false, // !clear_all_soft_refs
5048 0, // size
5049 false, // is_tlab
5050 level // max_level
5051 );
5052 }
5053 }
5054 FreelistLocker x(this);
5055 MutexLockerEx y(bitMapLock(),
5056 Mutex::_no_safepoint_check_flag);
5057 assert(!init_mark_was_synchronous, "but that's impossible!");
5058 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5059 } else {
5060 // already have all the locks
5061 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5062 init_mark_was_synchronous);
5063 }
5064 verify_work_stacks_empty();
5065 verify_overflow_empty();
5066 SpecializationStats::print();
5067 }
5069 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5070 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5072 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5074 assert(haveFreelistLocks(), "must have free list locks");
5075 assert_lock_strong(bitMapLock());
5077 if (UseAdaptiveSizePolicy) {
5078 size_policy()->checkpoint_roots_final_begin();
5079 }
5081 ResourceMark rm;
5082 HandleMark hm;
5084 GenCollectedHeap* gch = GenCollectedHeap::heap();
5086 if (should_unload_classes()) {
5087 CodeCache::gc_prologue();
5088 }
5089 assert(haveFreelistLocks(), "must have free list locks");
5090 assert_lock_strong(bitMapLock());
5092 if (!init_mark_was_synchronous) {
5093 // We might assume that we need not fill TLAB's when
5094 // CMSScavengeBeforeRemark is set, because we may have just done
5095 // a scavenge which would have filled all TLAB's -- and besides
5096 // Eden would be empty. This however may not always be the case --
5097 // for instance although we asked for a scavenge, it may not have
5098 // happened because of a JNI critical section. We probably need
5099 // a policy for deciding whether we can in that case wait until
5100 // the critical section releases and then do the remark following
5101 // the scavenge, and skip it here. In the absence of that policy,
5102 // or of an indication of whether the scavenge did indeed occur,
5103 // we cannot rely on TLAB's having been filled and must do
5104 // so here just in case a scavenge did not happen.
5105 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
5106 // Update the saved marks which may affect the root scans.
5107 gch->save_marks();
5109 if (CMSPrintEdenSurvivorChunks) {
5110 print_eden_and_survivor_chunk_arrays();
5111 }
5113 {
5114 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5116 // Note on the role of the mod union table:
5117 // Since the marker in "markFromRoots" marks concurrently with
5118 // mutators, it is possible for some reachable objects not to have been
5119 // scanned. For instance, an only reference to an object A was
5120 // placed in object B after the marker scanned B. Unless B is rescanned,
5121 // A would be collected. Such updates to references in marked objects
5122 // are detected via the mod union table which is the set of all cards
5123 // dirtied since the first checkpoint in this GC cycle and prior to
5124 // the most recent young generation GC, minus those cleaned up by the
5125 // concurrent precleaning.
5126 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5127 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5128 do_remark_parallel();
5129 } else {
5130 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5131 _gc_timer_cm, _gc_tracer_cm->gc_id());
5132 do_remark_non_parallel();
5133 }
5134 }
5135 } else {
5136 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5137 // The initial mark was stop-world, so there's no rescanning to
5138 // do; go straight on to the next step below.
5139 }
5140 verify_work_stacks_empty();
5141 verify_overflow_empty();
5143 {
5144 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5145 refProcessingWork(asynch, clear_all_soft_refs);
5146 }
5147 verify_work_stacks_empty();
5148 verify_overflow_empty();
5150 if (should_unload_classes()) {
5151 CodeCache::gc_epilogue();
5152 }
5153 JvmtiExport::gc_epilogue();
5155 // If we encountered any (marking stack / work queue) overflow
5156 // events during the current CMS cycle, take appropriate
5157 // remedial measures, where possible, so as to try and avoid
5158 // recurrence of that condition.
5159 assert(_markStack.isEmpty(), "No grey objects");
5160 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5161 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
5162 if (ser_ovflw > 0) {
5163 if (PrintCMSStatistics != 0) {
5164 gclog_or_tty->print_cr("Marking stack overflow (benign) "
5165 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5166 ", kac_preclean="SIZE_FORMAT")",
5167 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5168 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5169 }
5170 _markStack.expand();
5171 _ser_pmc_remark_ovflw = 0;
5172 _ser_pmc_preclean_ovflw = 0;
5173 _ser_kac_preclean_ovflw = 0;
5174 _ser_kac_ovflw = 0;
5175 }
5176 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5177 if (PrintCMSStatistics != 0) {
5178 gclog_or_tty->print_cr("Work queue overflow (benign) "
5179 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5180 _par_pmc_remark_ovflw, _par_kac_ovflw);
5181 }
5182 _par_pmc_remark_ovflw = 0;
5183 _par_kac_ovflw = 0;
5184 }
5185 if (PrintCMSStatistics != 0) {
5186 if (_markStack._hit_limit > 0) {
5187 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5188 _markStack._hit_limit);
5189 }
5190 if (_markStack._failed_double > 0) {
5191 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5192 " current capacity "SIZE_FORMAT,
5193 _markStack._failed_double,
5194 _markStack.capacity());
5195 }
5196 }
5197 _markStack._hit_limit = 0;
5198 _markStack._failed_double = 0;
5200 if ((VerifyAfterGC || VerifyDuringGC) &&
5201 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5202 verify_after_remark();
5203 }
5205 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5207 // Change under the freelistLocks.
5208 _collectorState = Sweeping;
5209 // Call isAllClear() under bitMapLock
5210 assert(_modUnionTable.isAllClear(),
5211 "Should be clear by end of the final marking");
5212 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5213 "Should be clear by end of the final marking");
5214 if (UseAdaptiveSizePolicy) {
5215 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5216 }
5217 }
5219 void CMSParInitialMarkTask::work(uint worker_id) {
5220 elapsedTimer _timer;
5221 ResourceMark rm;
5222 HandleMark hm;
5224 // ---------- scan from roots --------------
5225 _timer.start();
5226 GenCollectedHeap* gch = GenCollectedHeap::heap();
5227 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5228 CMKlassClosure klass_closure(&par_mri_cl);
5230 // ---------- young gen roots --------------
5231 {
5232 work_on_young_gen_roots(worker_id, &par_mri_cl);
5233 _timer.stop();
5234 if (PrintCMSStatistics != 0) {
5235 gclog_or_tty->print_cr(
5236 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5237 worker_id, _timer.seconds());
5238 }
5239 }
5241 // ---------- remaining roots --------------
5242 _timer.reset();
5243 _timer.start();
5244 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5245 false, // yg was scanned above
5246 false, // this is parallel code
5247 false, // not scavenging
5248 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5249 &par_mri_cl,
5250 true, // walk all of code cache if (so & SO_CodeCache)
5251 NULL,
5252 &klass_closure);
5253 assert(_collector->should_unload_classes()
5254 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5255 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5256 _timer.stop();
5257 if (PrintCMSStatistics != 0) {
5258 gclog_or_tty->print_cr(
5259 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5260 worker_id, _timer.seconds());
5261 }
5262 }
5264 // Parallel remark task
5265 class CMSParRemarkTask: public CMSParMarkTask {
5266 CompactibleFreeListSpace* _cms_space;
5268 // The per-thread work queues, available here for stealing.
5269 OopTaskQueueSet* _task_queues;
5270 ParallelTaskTerminator _term;
5272 public:
5273 // A value of 0 passed to n_workers will cause the number of
5274 // workers to be taken from the active workers in the work gang.
5275 CMSParRemarkTask(CMSCollector* collector,
5276 CompactibleFreeListSpace* cms_space,
5277 int n_workers, FlexibleWorkGang* workers,
5278 OopTaskQueueSet* task_queues):
5279 CMSParMarkTask("Rescan roots and grey objects in parallel",
5280 collector, n_workers),
5281 _cms_space(cms_space),
5282 _task_queues(task_queues),
5283 _term(n_workers, task_queues) { }
5285 OopTaskQueueSet* task_queues() { return _task_queues; }
5287 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5289 ParallelTaskTerminator* terminator() { return &_term; }
5290 int n_workers() { return _n_workers; }
5292 void work(uint worker_id);
5294 private:
5295 // ... of dirty cards in old space
5296 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5297 Par_MarkRefsIntoAndScanClosure* cl);
5299 // ... work stealing for the above
5300 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5301 };
5303 class RemarkKlassClosure : public KlassClosure {
5304 CMKlassClosure _cm_klass_closure;
5305 public:
5306 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5307 void do_klass(Klass* k) {
5308 // Check if we have modified any oops in the Klass during the concurrent marking.
5309 if (k->has_accumulated_modified_oops()) {
5310 k->clear_accumulated_modified_oops();
5312 // We could have transfered the current modified marks to the accumulated marks,
5313 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5314 } else if (k->has_modified_oops()) {
5315 // Don't clear anything, this info is needed by the next young collection.
5316 } else {
5317 // No modified oops in the Klass.
5318 return;
5319 }
5321 // The klass has modified fields, need to scan the klass.
5322 _cm_klass_closure.do_klass(k);
5323 }
5324 };
5326 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5327 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5328 EdenSpace* eden_space = dng->eden();
5329 ContiguousSpace* from_space = dng->from();
5330 ContiguousSpace* to_space = dng->to();
5332 HeapWord** eca = _collector->_eden_chunk_array;
5333 size_t ect = _collector->_eden_chunk_index;
5334 HeapWord** sca = _collector->_survivor_chunk_array;
5335 size_t sct = _collector->_survivor_chunk_index;
5337 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5338 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5340 do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5341 do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5342 do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5343 }
5345 // work_queue(i) is passed to the closure
5346 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
5347 // also is passed to do_dirty_card_rescan_tasks() and to
5348 // do_work_steal() to select the i-th task_queue.
5350 void CMSParRemarkTask::work(uint worker_id) {
5351 elapsedTimer _timer;
5352 ResourceMark rm;
5353 HandleMark hm;
5355 // ---------- rescan from roots --------------
5356 _timer.start();
5357 GenCollectedHeap* gch = GenCollectedHeap::heap();
5358 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5359 _collector->_span, _collector->ref_processor(),
5360 &(_collector->_markBitMap),
5361 work_queue(worker_id));
5363 // Rescan young gen roots first since these are likely
5364 // coarsely partitioned and may, on that account, constitute
5365 // the critical path; thus, it's best to start off that
5366 // work first.
5367 // ---------- young gen roots --------------
5368 {
5369 work_on_young_gen_roots(worker_id, &par_mrias_cl);
5370 _timer.stop();
5371 if (PrintCMSStatistics != 0) {
5372 gclog_or_tty->print_cr(
5373 "Finished young gen rescan work in %dth thread: %3.3f sec",
5374 worker_id, _timer.seconds());
5375 }
5376 }
5378 // ---------- remaining roots --------------
5379 _timer.reset();
5380 _timer.start();
5381 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5382 false, // yg was scanned above
5383 false, // this is parallel code
5384 false, // not scavenging
5385 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5386 &par_mrias_cl,
5387 true, // walk all of code cache if (so & SO_CodeCache)
5388 NULL,
5389 NULL); // The dirty klasses will be handled below
5390 assert(_collector->should_unload_classes()
5391 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5392 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5393 _timer.stop();
5394 if (PrintCMSStatistics != 0) {
5395 gclog_or_tty->print_cr(
5396 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5397 worker_id, _timer.seconds());
5398 }
5400 // ---------- unhandled CLD scanning ----------
5401 if (worker_id == 0) { // Single threaded at the moment.
5402 _timer.reset();
5403 _timer.start();
5405 // Scan all new class loader data objects and new dependencies that were
5406 // introduced during concurrent marking.
5407 ResourceMark rm;
5408 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5409 for (int i = 0; i < array->length(); i++) {
5410 par_mrias_cl.do_class_loader_data(array->at(i));
5411 }
5413 // We don't need to keep track of new CLDs anymore.
5414 ClassLoaderDataGraph::remember_new_clds(false);
5416 _timer.stop();
5417 if (PrintCMSStatistics != 0) {
5418 gclog_or_tty->print_cr(
5419 "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5420 worker_id, _timer.seconds());
5421 }
5422 }
5424 // ---------- dirty klass scanning ----------
5425 if (worker_id == 0) { // Single threaded at the moment.
5426 _timer.reset();
5427 _timer.start();
5429 // Scan all classes that was dirtied during the concurrent marking phase.
5430 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5431 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5433 _timer.stop();
5434 if (PrintCMSStatistics != 0) {
5435 gclog_or_tty->print_cr(
5436 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5437 worker_id, _timer.seconds());
5438 }
5439 }
5441 // We might have added oops to ClassLoaderData::_handles during the
5442 // concurrent marking phase. These oops point to newly allocated objects
5443 // that are guaranteed to be kept alive. Either by the direct allocation
5444 // code, or when the young collector processes the strong roots. Hence,
5445 // we don't have to revisit the _handles block during the remark phase.
5447 // ---------- rescan dirty cards ------------
5448 _timer.reset();
5449 _timer.start();
5451 // Do the rescan tasks for each of the two spaces
5452 // (cms_space) in turn.
5453 // "worker_id" is passed to select the task_queue for "worker_id"
5454 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5455 _timer.stop();
5456 if (PrintCMSStatistics != 0) {
5457 gclog_or_tty->print_cr(
5458 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5459 worker_id, _timer.seconds());
5460 }
5462 // ---------- steal work from other threads ...
5463 // ---------- ... and drain overflow list.
5464 _timer.reset();
5465 _timer.start();
5466 do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5467 _timer.stop();
5468 if (PrintCMSStatistics != 0) {
5469 gclog_or_tty->print_cr(
5470 "Finished work stealing in %dth thread: %3.3f sec",
5471 worker_id, _timer.seconds());
5472 }
5473 }
5475 // Note that parameter "i" is not used.
5476 void
5477 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5478 OopsInGenClosure* cl, ContiguousSpace* space,
5479 HeapWord** chunk_array, size_t chunk_top) {
5480 // Until all tasks completed:
5481 // . claim an unclaimed task
5482 // . compute region boundaries corresponding to task claimed
5483 // using chunk_array
5484 // . par_oop_iterate(cl) over that region
5486 ResourceMark rm;
5487 HandleMark hm;
5489 SequentialSubTasksDone* pst = space->par_seq_tasks();
5491 uint nth_task = 0;
5492 uint n_tasks = pst->n_tasks();
5494 if (n_tasks > 0) {
5495 assert(pst->valid(), "Uninitialized use?");
5496 HeapWord *start, *end;
5497 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5498 // We claimed task # nth_task; compute its boundaries.
5499 if (chunk_top == 0) { // no samples were taken
5500 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5501 start = space->bottom();
5502 end = space->top();
5503 } else if (nth_task == 0) {
5504 start = space->bottom();
5505 end = chunk_array[nth_task];
5506 } else if (nth_task < (uint)chunk_top) {
5507 assert(nth_task >= 1, "Control point invariant");
5508 start = chunk_array[nth_task - 1];
5509 end = chunk_array[nth_task];
5510 } else {
5511 assert(nth_task == (uint)chunk_top, "Control point invariant");
5512 start = chunk_array[chunk_top - 1];
5513 end = space->top();
5514 }
5515 MemRegion mr(start, end);
5516 // Verify that mr is in space
5517 assert(mr.is_empty() || space->used_region().contains(mr),
5518 "Should be in space");
5519 // Verify that "start" is an object boundary
5520 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5521 "Should be an oop");
5522 space->par_oop_iterate(mr, cl);
5523 }
5524 pst->all_tasks_completed();
5525 }
5526 }
5528 void
5529 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5530 CompactibleFreeListSpace* sp, int i,
5531 Par_MarkRefsIntoAndScanClosure* cl) {
5532 // Until all tasks completed:
5533 // . claim an unclaimed task
5534 // . compute region boundaries corresponding to task claimed
5535 // . transfer dirty bits ct->mut for that region
5536 // . apply rescanclosure to dirty mut bits for that region
5538 ResourceMark rm;
5539 HandleMark hm;
5541 OopTaskQueue* work_q = work_queue(i);
5542 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5543 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5544 // CAUTION: This closure has state that persists across calls to
5545 // the work method dirty_range_iterate_clear() in that it has
5546 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5547 // use of that state in the imbedded UpwardsObjectClosure instance
5548 // assumes that the cards are always iterated (even if in parallel
5549 // by several threads) in monotonically increasing order per each
5550 // thread. This is true of the implementation below which picks
5551 // card ranges (chunks) in monotonically increasing order globally
5552 // and, a-fortiori, in monotonically increasing order per thread
5553 // (the latter order being a subsequence of the former).
5554 // If the work code below is ever reorganized into a more chaotic
5555 // work-partitioning form than the current "sequential tasks"
5556 // paradigm, the use of that persistent state will have to be
5557 // revisited and modified appropriately. See also related
5558 // bug 4756801 work on which should examine this code to make
5559 // sure that the changes there do not run counter to the
5560 // assumptions made here and necessary for correctness and
5561 // efficiency. Note also that this code might yield inefficient
5562 // behaviour in the case of very large objects that span one or
5563 // more work chunks. Such objects would potentially be scanned
5564 // several times redundantly. Work on 4756801 should try and
5565 // address that performance anomaly if at all possible. XXX
5566 MemRegion full_span = _collector->_span;
5567 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5568 MarkFromDirtyCardsClosure
5569 greyRescanClosure(_collector, full_span, // entire span of interest
5570 sp, bm, work_q, cl);
5572 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5573 assert(pst->valid(), "Uninitialized use?");
5574 uint nth_task = 0;
5575 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5576 MemRegion span = sp->used_region();
5577 HeapWord* start_addr = span.start();
5578 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5579 alignment);
5580 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5581 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5582 start_addr, "Check alignment");
5583 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5584 chunk_size, "Check alignment");
5586 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5587 // Having claimed the nth_task, compute corresponding mem-region,
5588 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5589 // The alignment restriction ensures that we do not need any
5590 // synchronization with other gang-workers while setting or
5591 // clearing bits in thus chunk of the MUT.
5592 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5593 start_addr + (nth_task+1)*chunk_size);
5594 // The last chunk's end might be way beyond end of the
5595 // used region. In that case pull back appropriately.
5596 if (this_span.end() > end_addr) {
5597 this_span.set_end(end_addr);
5598 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5599 }
5600 // Iterate over the dirty cards covering this chunk, marking them
5601 // precleaned, and setting the corresponding bits in the mod union
5602 // table. Since we have been careful to partition at Card and MUT-word
5603 // boundaries no synchronization is needed between parallel threads.
5604 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5605 &modUnionClosure);
5607 // Having transferred these marks into the modUnionTable,
5608 // rescan the marked objects on the dirty cards in the modUnionTable.
5609 // Even if this is at a synchronous collection, the initial marking
5610 // may have been done during an asynchronous collection so there
5611 // may be dirty bits in the mod-union table.
5612 _collector->_modUnionTable.dirty_range_iterate_clear(
5613 this_span, &greyRescanClosure);
5614 _collector->_modUnionTable.verifyNoOneBitsInRange(
5615 this_span.start(),
5616 this_span.end());
5617 }
5618 pst->all_tasks_completed(); // declare that i am done
5619 }
5621 // . see if we can share work_queues with ParNew? XXX
5622 void
5623 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5624 int* seed) {
5625 OopTaskQueue* work_q = work_queue(i);
5626 NOT_PRODUCT(int num_steals = 0;)
5627 oop obj_to_scan;
5628 CMSBitMap* bm = &(_collector->_markBitMap);
5630 while (true) {
5631 // Completely finish any left over work from (an) earlier round(s)
5632 cl->trim_queue(0);
5633 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5634 (size_t)ParGCDesiredObjsFromOverflowList);
5635 // Now check if there's any work in the overflow list
5636 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5637 // only affects the number of attempts made to get work from the
5638 // overflow list and does not affect the number of workers. Just
5639 // pass ParallelGCThreads so this behavior is unchanged.
5640 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5641 work_q,
5642 ParallelGCThreads)) {
5643 // found something in global overflow list;
5644 // not yet ready to go stealing work from others.
5645 // We'd like to assert(work_q->size() != 0, ...)
5646 // because we just took work from the overflow list,
5647 // but of course we can't since all of that could have
5648 // been already stolen from us.
5649 // "He giveth and He taketh away."
5650 continue;
5651 }
5652 // Verify that we have no work before we resort to stealing
5653 assert(work_q->size() == 0, "Have work, shouldn't steal");
5654 // Try to steal from other queues that have work
5655 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5656 NOT_PRODUCT(num_steals++;)
5657 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5658 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5659 // Do scanning work
5660 obj_to_scan->oop_iterate(cl);
5661 // Loop around, finish this work, and try to steal some more
5662 } else if (terminator()->offer_termination()) {
5663 break; // nirvana from the infinite cycle
5664 }
5665 }
5666 NOT_PRODUCT(
5667 if (PrintCMSStatistics != 0) {
5668 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5669 }
5670 )
5671 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5672 "Else our work is not yet done");
5673 }
5675 // Record object boundaries in _eden_chunk_array by sampling the eden
5676 // top in the slow-path eden object allocation code path and record
5677 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5678 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5679 // sampling in sample_eden() that activates during the part of the
5680 // preclean phase.
5681 void CMSCollector::sample_eden_chunk() {
5682 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5683 if (_eden_chunk_lock->try_lock()) {
5684 // Record a sample. This is the critical section. The contents
5685 // of the _eden_chunk_array have to be non-decreasing in the
5686 // address order.
5687 _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5688 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5689 "Unexpected state of Eden");
5690 if (_eden_chunk_index == 0 ||
5691 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5692 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5693 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5694 _eden_chunk_index++; // commit sample
5695 }
5696 _eden_chunk_lock->unlock();
5697 }
5698 }
5699 }
5701 // Return a thread-local PLAB recording array, as appropriate.
5702 void* CMSCollector::get_data_recorder(int thr_num) {
5703 if (_survivor_plab_array != NULL &&
5704 (CMSPLABRecordAlways ||
5705 (_collectorState > Marking && _collectorState < FinalMarking))) {
5706 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5707 ChunkArray* ca = &_survivor_plab_array[thr_num];
5708 ca->reset(); // clear it so that fresh data is recorded
5709 return (void*) ca;
5710 } else {
5711 return NULL;
5712 }
5713 }
5715 // Reset all the thread-local PLAB recording arrays
5716 void CMSCollector::reset_survivor_plab_arrays() {
5717 for (uint i = 0; i < ParallelGCThreads; i++) {
5718 _survivor_plab_array[i].reset();
5719 }
5720 }
5722 // Merge the per-thread plab arrays into the global survivor chunk
5723 // array which will provide the partitioning of the survivor space
5724 // for CMS initial scan and rescan.
5725 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5726 int no_of_gc_threads) {
5727 assert(_survivor_plab_array != NULL, "Error");
5728 assert(_survivor_chunk_array != NULL, "Error");
5729 assert(_collectorState == FinalMarking ||
5730 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5731 for (int j = 0; j < no_of_gc_threads; j++) {
5732 _cursor[j] = 0;
5733 }
5734 HeapWord* top = surv->top();
5735 size_t i;
5736 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5737 HeapWord* min_val = top; // Higher than any PLAB address
5738 uint min_tid = 0; // position of min_val this round
5739 for (int j = 0; j < no_of_gc_threads; j++) {
5740 ChunkArray* cur_sca = &_survivor_plab_array[j];
5741 if (_cursor[j] == cur_sca->end()) {
5742 continue;
5743 }
5744 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5745 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5746 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5747 if (cur_val < min_val) {
5748 min_tid = j;
5749 min_val = cur_val;
5750 } else {
5751 assert(cur_val < top, "All recorded addresses should be less");
5752 }
5753 }
5754 // At this point min_val and min_tid are respectively
5755 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5756 // and the thread (j) that witnesses that address.
5757 // We record this address in the _survivor_chunk_array[i]
5758 // and increment _cursor[min_tid] prior to the next round i.
5759 if (min_val == top) {
5760 break;
5761 }
5762 _survivor_chunk_array[i] = min_val;
5763 _cursor[min_tid]++;
5764 }
5765 // We are all done; record the size of the _survivor_chunk_array
5766 _survivor_chunk_index = i; // exclusive: [0, i)
5767 if (PrintCMSStatistics > 0) {
5768 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5769 }
5770 // Verify that we used up all the recorded entries
5771 #ifdef ASSERT
5772 size_t total = 0;
5773 for (int j = 0; j < no_of_gc_threads; j++) {
5774 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5775 total += _cursor[j];
5776 }
5777 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5778 // Check that the merged array is in sorted order
5779 if (total > 0) {
5780 for (size_t i = 0; i < total - 1; i++) {
5781 if (PrintCMSStatistics > 0) {
5782 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5783 i, _survivor_chunk_array[i]);
5784 }
5785 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5786 "Not sorted");
5787 }
5788 }
5789 #endif // ASSERT
5790 }
5792 // Set up the space's par_seq_tasks structure for work claiming
5793 // for parallel initial scan and rescan of young gen.
5794 // See ParRescanTask where this is currently used.
5795 void
5796 CMSCollector::
5797 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5798 assert(n_threads > 0, "Unexpected n_threads argument");
5799 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5801 // Eden space
5802 if (!dng->eden()->is_empty()) {
5803 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5804 assert(!pst->valid(), "Clobbering existing data?");
5805 // Each valid entry in [0, _eden_chunk_index) represents a task.
5806 size_t n_tasks = _eden_chunk_index + 1;
5807 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5808 // Sets the condition for completion of the subtask (how many threads
5809 // need to finish in order to be done).
5810 pst->set_n_threads(n_threads);
5811 pst->set_n_tasks((int)n_tasks);
5812 }
5814 // Merge the survivor plab arrays into _survivor_chunk_array
5815 if (_survivor_plab_array != NULL) {
5816 merge_survivor_plab_arrays(dng->from(), n_threads);
5817 } else {
5818 assert(_survivor_chunk_index == 0, "Error");
5819 }
5821 // To space
5822 {
5823 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5824 assert(!pst->valid(), "Clobbering existing data?");
5825 // Sets the condition for completion of the subtask (how many threads
5826 // need to finish in order to be done).
5827 pst->set_n_threads(n_threads);
5828 pst->set_n_tasks(1);
5829 assert(pst->valid(), "Error");
5830 }
5832 // From space
5833 {
5834 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5835 assert(!pst->valid(), "Clobbering existing data?");
5836 size_t n_tasks = _survivor_chunk_index + 1;
5837 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5838 // Sets the condition for completion of the subtask (how many threads
5839 // need to finish in order to be done).
5840 pst->set_n_threads(n_threads);
5841 pst->set_n_tasks((int)n_tasks);
5842 assert(pst->valid(), "Error");
5843 }
5844 }
5846 // Parallel version of remark
5847 void CMSCollector::do_remark_parallel() {
5848 GenCollectedHeap* gch = GenCollectedHeap::heap();
5849 FlexibleWorkGang* workers = gch->workers();
5850 assert(workers != NULL, "Need parallel worker threads.");
5851 // Choose to use the number of GC workers most recently set
5852 // into "active_workers". If active_workers is not set, set it
5853 // to ParallelGCThreads.
5854 int n_workers = workers->active_workers();
5855 if (n_workers == 0) {
5856 assert(n_workers > 0, "Should have been set during scavenge");
5857 n_workers = ParallelGCThreads;
5858 workers->set_active_workers(n_workers);
5859 }
5860 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5862 CMSParRemarkTask tsk(this,
5863 cms_space,
5864 n_workers, workers, task_queues());
5866 // Set up for parallel process_strong_roots work.
5867 gch->set_par_threads(n_workers);
5868 // We won't be iterating over the cards in the card table updating
5869 // the younger_gen cards, so we shouldn't call the following else
5870 // the verification code as well as subsequent younger_refs_iterate
5871 // code would get confused. XXX
5872 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5874 // The young gen rescan work will not be done as part of
5875 // process_strong_roots (which currently doesn't knw how to
5876 // parallelize such a scan), but rather will be broken up into
5877 // a set of parallel tasks (via the sampling that the [abortable]
5878 // preclean phase did of EdenSpace, plus the [two] tasks of
5879 // scanning the [two] survivor spaces. Further fine-grain
5880 // parallelization of the scanning of the survivor spaces
5881 // themselves, and of precleaning of the younger gen itself
5882 // is deferred to the future.
5883 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5885 // The dirty card rescan work is broken up into a "sequence"
5886 // of parallel tasks (per constituent space) that are dynamically
5887 // claimed by the parallel threads.
5888 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5890 // It turns out that even when we're using 1 thread, doing the work in a
5891 // separate thread causes wide variance in run times. We can't help this
5892 // in the multi-threaded case, but we special-case n=1 here to get
5893 // repeatable measurements of the 1-thread overhead of the parallel code.
5894 if (n_workers > 1) {
5895 // Make refs discovery MT-safe, if it isn't already: it may not
5896 // necessarily be so, since it's possible that we are doing
5897 // ST marking.
5898 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5899 GenCollectedHeap::StrongRootsScope srs(gch);
5900 workers->run_task(&tsk);
5901 } else {
5902 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5903 GenCollectedHeap::StrongRootsScope srs(gch);
5904 tsk.work(0);
5905 }
5907 gch->set_par_threads(0); // 0 ==> non-parallel.
5908 // restore, single-threaded for now, any preserved marks
5909 // as a result of work_q overflow
5910 restore_preserved_marks_if_any();
5911 }
5913 // Non-parallel version of remark
5914 void CMSCollector::do_remark_non_parallel() {
5915 ResourceMark rm;
5916 HandleMark hm;
5917 GenCollectedHeap* gch = GenCollectedHeap::heap();
5918 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5920 MarkRefsIntoAndScanClosure
5921 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5922 &_markStack, this,
5923 false /* should_yield */, false /* not precleaning */);
5924 MarkFromDirtyCardsClosure
5925 markFromDirtyCardsClosure(this, _span,
5926 NULL, // space is set further below
5927 &_markBitMap, &_markStack, &mrias_cl);
5928 {
5929 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5930 // Iterate over the dirty cards, setting the corresponding bits in the
5931 // mod union table.
5932 {
5933 ModUnionClosure modUnionClosure(&_modUnionTable);
5934 _ct->ct_bs()->dirty_card_iterate(
5935 _cmsGen->used_region(),
5936 &modUnionClosure);
5937 }
5938 // Having transferred these marks into the modUnionTable, we just need
5939 // to rescan the marked objects on the dirty cards in the modUnionTable.
5940 // The initial marking may have been done during an asynchronous
5941 // collection so there may be dirty bits in the mod-union table.
5942 const int alignment =
5943 CardTableModRefBS::card_size * BitsPerWord;
5944 {
5945 // ... First handle dirty cards in CMS gen
5946 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5947 MemRegion ur = _cmsGen->used_region();
5948 HeapWord* lb = ur.start();
5949 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5950 MemRegion cms_span(lb, ub);
5951 _modUnionTable.dirty_range_iterate_clear(cms_span,
5952 &markFromDirtyCardsClosure);
5953 verify_work_stacks_empty();
5954 if (PrintCMSStatistics != 0) {
5955 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5956 markFromDirtyCardsClosure.num_dirty_cards());
5957 }
5958 }
5959 }
5960 if (VerifyDuringGC &&
5961 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5962 HandleMark hm; // Discard invalid handles created during verification
5963 Universe::verify();
5964 }
5965 {
5966 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5968 verify_work_stacks_empty();
5970 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5971 GenCollectedHeap::StrongRootsScope srs(gch);
5972 gch->gen_process_strong_roots(_cmsGen->level(),
5973 true, // younger gens as roots
5974 false, // use the local StrongRootsScope
5975 false, // not scavenging
5976 SharedHeap::ScanningOption(roots_scanning_options()),
5977 &mrias_cl,
5978 true, // walk code active on stacks
5979 NULL,
5980 NULL); // The dirty klasses will be handled below
5982 assert(should_unload_classes()
5983 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5984 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5985 }
5987 {
5988 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5990 verify_work_stacks_empty();
5992 // Scan all class loader data objects that might have been introduced
5993 // during concurrent marking.
5994 ResourceMark rm;
5995 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5996 for (int i = 0; i < array->length(); i++) {
5997 mrias_cl.do_class_loader_data(array->at(i));
5998 }
6000 // We don't need to keep track of new CLDs anymore.
6001 ClassLoaderDataGraph::remember_new_clds(false);
6003 verify_work_stacks_empty();
6004 }
6006 {
6007 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6009 verify_work_stacks_empty();
6011 RemarkKlassClosure remark_klass_closure(&mrias_cl);
6012 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6014 verify_work_stacks_empty();
6015 }
6017 // We might have added oops to ClassLoaderData::_handles during the
6018 // concurrent marking phase. These oops point to newly allocated objects
6019 // that are guaranteed to be kept alive. Either by the direct allocation
6020 // code, or when the young collector processes the strong roots. Hence,
6021 // we don't have to revisit the _handles block during the remark phase.
6023 verify_work_stacks_empty();
6024 // Restore evacuated mark words, if any, used for overflow list links
6025 if (!CMSOverflowEarlyRestoration) {
6026 restore_preserved_marks_if_any();
6027 }
6028 verify_overflow_empty();
6029 }
6031 ////////////////////////////////////////////////////////
6032 // Parallel Reference Processing Task Proxy Class
6033 ////////////////////////////////////////////////////////
6034 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6035 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6036 CMSCollector* _collector;
6037 CMSBitMap* _mark_bit_map;
6038 const MemRegion _span;
6039 ProcessTask& _task;
6041 public:
6042 CMSRefProcTaskProxy(ProcessTask& task,
6043 CMSCollector* collector,
6044 const MemRegion& span,
6045 CMSBitMap* mark_bit_map,
6046 AbstractWorkGang* workers,
6047 OopTaskQueueSet* task_queues):
6048 // XXX Should superclass AGTWOQ also know about AWG since it knows
6049 // about the task_queues used by the AWG? Then it could initialize
6050 // the terminator() object. See 6984287. The set_for_termination()
6051 // below is a temporary band-aid for the regression in 6984287.
6052 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6053 task_queues),
6054 _task(task),
6055 _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6056 {
6057 assert(_collector->_span.equals(_span) && !_span.is_empty(),
6058 "Inconsistency in _span");
6059 set_for_termination(workers->active_workers());
6060 }
6062 OopTaskQueueSet* task_queues() { return queues(); }
6064 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6066 void do_work_steal(int i,
6067 CMSParDrainMarkingStackClosure* drain,
6068 CMSParKeepAliveClosure* keep_alive,
6069 int* seed);
6071 virtual void work(uint worker_id);
6072 };
6074 void CMSRefProcTaskProxy::work(uint worker_id) {
6075 assert(_collector->_span.equals(_span), "Inconsistency in _span");
6076 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6077 _mark_bit_map,
6078 work_queue(worker_id));
6079 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6080 _mark_bit_map,
6081 work_queue(worker_id));
6082 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6083 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6084 if (_task.marks_oops_alive()) {
6085 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6086 _collector->hash_seed(worker_id));
6087 }
6088 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6089 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6090 }
6092 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6093 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6094 EnqueueTask& _task;
6096 public:
6097 CMSRefEnqueueTaskProxy(EnqueueTask& task)
6098 : AbstractGangTask("Enqueue reference objects in parallel"),
6099 _task(task)
6100 { }
6102 virtual void work(uint worker_id)
6103 {
6104 _task.work(worker_id);
6105 }
6106 };
6108 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6109 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6110 _span(span),
6111 _bit_map(bit_map),
6112 _work_queue(work_queue),
6113 _mark_and_push(collector, span, bit_map, work_queue),
6114 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6115 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6116 { }
6118 // . see if we can share work_queues with ParNew? XXX
6119 void CMSRefProcTaskProxy::do_work_steal(int i,
6120 CMSParDrainMarkingStackClosure* drain,
6121 CMSParKeepAliveClosure* keep_alive,
6122 int* seed) {
6123 OopTaskQueue* work_q = work_queue(i);
6124 NOT_PRODUCT(int num_steals = 0;)
6125 oop obj_to_scan;
6127 while (true) {
6128 // Completely finish any left over work from (an) earlier round(s)
6129 drain->trim_queue(0);
6130 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6131 (size_t)ParGCDesiredObjsFromOverflowList);
6132 // Now check if there's any work in the overflow list
6133 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6134 // only affects the number of attempts made to get work from the
6135 // overflow list and does not affect the number of workers. Just
6136 // pass ParallelGCThreads so this behavior is unchanged.
6137 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6138 work_q,
6139 ParallelGCThreads)) {
6140 // Found something in global overflow list;
6141 // not yet ready to go stealing work from others.
6142 // We'd like to assert(work_q->size() != 0, ...)
6143 // because we just took work from the overflow list,
6144 // but of course we can't, since all of that might have
6145 // been already stolen from us.
6146 continue;
6147 }
6148 // Verify that we have no work before we resort to stealing
6149 assert(work_q->size() == 0, "Have work, shouldn't steal");
6150 // Try to steal from other queues that have work
6151 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6152 NOT_PRODUCT(num_steals++;)
6153 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6154 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6155 // Do scanning work
6156 obj_to_scan->oop_iterate(keep_alive);
6157 // Loop around, finish this work, and try to steal some more
6158 } else if (terminator()->offer_termination()) {
6159 break; // nirvana from the infinite cycle
6160 }
6161 }
6162 NOT_PRODUCT(
6163 if (PrintCMSStatistics != 0) {
6164 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6165 }
6166 )
6167 }
6169 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6170 {
6171 GenCollectedHeap* gch = GenCollectedHeap::heap();
6172 FlexibleWorkGang* workers = gch->workers();
6173 assert(workers != NULL, "Need parallel worker threads.");
6174 CMSRefProcTaskProxy rp_task(task, &_collector,
6175 _collector.ref_processor()->span(),
6176 _collector.markBitMap(),
6177 workers, _collector.task_queues());
6178 workers->run_task(&rp_task);
6179 }
6181 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6182 {
6184 GenCollectedHeap* gch = GenCollectedHeap::heap();
6185 FlexibleWorkGang* workers = gch->workers();
6186 assert(workers != NULL, "Need parallel worker threads.");
6187 CMSRefEnqueueTaskProxy enq_task(task);
6188 workers->run_task(&enq_task);
6189 }
6191 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6193 ResourceMark rm;
6194 HandleMark hm;
6196 ReferenceProcessor* rp = ref_processor();
6197 assert(rp->span().equals(_span), "Spans should be equal");
6198 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6199 // Process weak references.
6200 rp->setup_policy(clear_all_soft_refs);
6201 verify_work_stacks_empty();
6203 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6204 &_markStack, false /* !preclean */);
6205 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6206 _span, &_markBitMap, &_markStack,
6207 &cmsKeepAliveClosure, false /* !preclean */);
6208 {
6209 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6211 ReferenceProcessorStats stats;
6212 if (rp->processing_is_mt()) {
6213 // Set the degree of MT here. If the discovery is done MT, there
6214 // may have been a different number of threads doing the discovery
6215 // and a different number of discovered lists may have Ref objects.
6216 // That is OK as long as the Reference lists are balanced (see
6217 // balance_all_queues() and balance_queues()).
6218 GenCollectedHeap* gch = GenCollectedHeap::heap();
6219 int active_workers = ParallelGCThreads;
6220 FlexibleWorkGang* workers = gch->workers();
6221 if (workers != NULL) {
6222 active_workers = workers->active_workers();
6223 // The expectation is that active_workers will have already
6224 // been set to a reasonable value. If it has not been set,
6225 // investigate.
6226 assert(active_workers > 0, "Should have been set during scavenge");
6227 }
6228 rp->set_active_mt_degree(active_workers);
6229 CMSRefProcTaskExecutor task_executor(*this);
6230 stats = rp->process_discovered_references(&_is_alive_closure,
6231 &cmsKeepAliveClosure,
6232 &cmsDrainMarkingStackClosure,
6233 &task_executor,
6234 _gc_timer_cm,
6235 _gc_tracer_cm->gc_id());
6236 } else {
6237 stats = rp->process_discovered_references(&_is_alive_closure,
6238 &cmsKeepAliveClosure,
6239 &cmsDrainMarkingStackClosure,
6240 NULL,
6241 _gc_timer_cm,
6242 _gc_tracer_cm->gc_id());
6243 }
6244 _gc_tracer_cm->report_gc_reference_stats(stats);
6246 }
6248 // This is the point where the entire marking should have completed.
6249 verify_work_stacks_empty();
6251 if (should_unload_classes()) {
6252 {
6253 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6255 // Unload classes and purge the SystemDictionary.
6256 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6258 // Unload nmethods.
6259 CodeCache::do_unloading(&_is_alive_closure, purged_class);
6261 // Prune dead klasses from subklass/sibling/implementor lists.
6262 Klass::clean_weak_klass_links(&_is_alive_closure);
6263 }
6265 {
6266 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6267 // Clean up unreferenced symbols in symbol table.
6268 SymbolTable::unlink();
6269 }
6270 }
6272 // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6273 // Need to check if we really scanned the StringTable.
6274 if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6275 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6276 // Delete entries for dead interned strings.
6277 StringTable::unlink(&_is_alive_closure);
6278 }
6280 // Restore any preserved marks as a result of mark stack or
6281 // work queue overflow
6282 restore_preserved_marks_if_any(); // done single-threaded for now
6284 rp->set_enqueuing_is_done(true);
6285 if (rp->processing_is_mt()) {
6286 rp->balance_all_queues();
6287 CMSRefProcTaskExecutor task_executor(*this);
6288 rp->enqueue_discovered_references(&task_executor);
6289 } else {
6290 rp->enqueue_discovered_references(NULL);
6291 }
6292 rp->verify_no_references_recorded();
6293 assert(!rp->discovery_enabled(), "should have been disabled");
6294 }
6296 #ifndef PRODUCT
6297 void CMSCollector::check_correct_thread_executing() {
6298 Thread* t = Thread::current();
6299 // Only the VM thread or the CMS thread should be here.
6300 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6301 "Unexpected thread type");
6302 // If this is the vm thread, the foreground process
6303 // should not be waiting. Note that _foregroundGCIsActive is
6304 // true while the foreground collector is waiting.
6305 if (_foregroundGCShouldWait) {
6306 // We cannot be the VM thread
6307 assert(t->is_ConcurrentGC_thread(),
6308 "Should be CMS thread");
6309 } else {
6310 // We can be the CMS thread only if we are in a stop-world
6311 // phase of CMS collection.
6312 if (t->is_ConcurrentGC_thread()) {
6313 assert(_collectorState == InitialMarking ||
6314 _collectorState == FinalMarking,
6315 "Should be a stop-world phase");
6316 // The CMS thread should be holding the CMS_token.
6317 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6318 "Potential interference with concurrently "
6319 "executing VM thread");
6320 }
6321 }
6322 }
6323 #endif
6325 void CMSCollector::sweep(bool asynch) {
6326 assert(_collectorState == Sweeping, "just checking");
6327 check_correct_thread_executing();
6328 verify_work_stacks_empty();
6329 verify_overflow_empty();
6330 increment_sweep_count();
6331 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6333 _inter_sweep_timer.stop();
6334 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6335 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6337 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6338 _intra_sweep_timer.reset();
6339 _intra_sweep_timer.start();
6340 if (asynch) {
6341 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6342 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6343 // First sweep the old gen
6344 {
6345 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6346 bitMapLock());
6347 sweepWork(_cmsGen, asynch);
6348 }
6350 // Update Universe::_heap_*_at_gc figures.
6351 // We need all the free list locks to make the abstract state
6352 // transition from Sweeping to Resetting. See detailed note
6353 // further below.
6354 {
6355 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6356 // Update heap occupancy information which is used as
6357 // input to soft ref clearing policy at the next gc.
6358 Universe::update_heap_info_at_gc();
6359 _collectorState = Resizing;
6360 }
6361 } else {
6362 // already have needed locks
6363 sweepWork(_cmsGen, asynch);
6364 // Update heap occupancy information which is used as
6365 // input to soft ref clearing policy at the next gc.
6366 Universe::update_heap_info_at_gc();
6367 _collectorState = Resizing;
6368 }
6369 verify_work_stacks_empty();
6370 verify_overflow_empty();
6372 if (should_unload_classes()) {
6373 // Delay purge to the beginning of the next safepoint. Metaspace::contains
6374 // requires that the virtual spaces are stable and not deleted.
6375 ClassLoaderDataGraph::set_should_purge(true);
6376 }
6378 _intra_sweep_timer.stop();
6379 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6381 _inter_sweep_timer.reset();
6382 _inter_sweep_timer.start();
6384 // We need to use a monotonically non-deccreasing time in ms
6385 // or we will see time-warp warnings and os::javaTimeMillis()
6386 // does not guarantee monotonicity.
6387 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6388 update_time_of_last_gc(now);
6390 // NOTE on abstract state transitions:
6391 // Mutators allocate-live and/or mark the mod-union table dirty
6392 // based on the state of the collection. The former is done in
6393 // the interval [Marking, Sweeping] and the latter in the interval
6394 // [Marking, Sweeping). Thus the transitions into the Marking state
6395 // and out of the Sweeping state must be synchronously visible
6396 // globally to the mutators.
6397 // The transition into the Marking state happens with the world
6398 // stopped so the mutators will globally see it. Sweeping is
6399 // done asynchronously by the background collector so the transition
6400 // from the Sweeping state to the Resizing state must be done
6401 // under the freelistLock (as is the check for whether to
6402 // allocate-live and whether to dirty the mod-union table).
6403 assert(_collectorState == Resizing, "Change of collector state to"
6404 " Resizing must be done under the freelistLocks (plural)");
6406 // Now that sweeping has been completed, we clear
6407 // the incremental_collection_failed flag,
6408 // thus inviting a younger gen collection to promote into
6409 // this generation. If such a promotion may still fail,
6410 // the flag will be set again when a young collection is
6411 // attempted.
6412 GenCollectedHeap* gch = GenCollectedHeap::heap();
6413 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
6414 gch->update_full_collections_completed(_collection_count_start);
6415 }
6417 // FIX ME!!! Looks like this belongs in CFLSpace, with
6418 // CMSGen merely delegating to it.
6419 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6420 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6421 HeapWord* minAddr = _cmsSpace->bottom();
6422 HeapWord* largestAddr =
6423 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6424 if (largestAddr == NULL) {
6425 // The dictionary appears to be empty. In this case
6426 // try to coalesce at the end of the heap.
6427 largestAddr = _cmsSpace->end();
6428 }
6429 size_t largestOffset = pointer_delta(largestAddr, minAddr);
6430 size_t nearLargestOffset =
6431 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6432 if (PrintFLSStatistics != 0) {
6433 gclog_or_tty->print_cr(
6434 "CMS: Large Block: " PTR_FORMAT ";"
6435 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6436 largestAddr,
6437 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6438 }
6439 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6440 }
6442 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6443 return addr >= _cmsSpace->nearLargestChunk();
6444 }
6446 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6447 return _cmsSpace->find_chunk_at_end();
6448 }
6450 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6451 bool full) {
6452 // The next lower level has been collected. Gather any statistics
6453 // that are of interest at this point.
6454 if (!full && (current_level + 1) == level()) {
6455 // Gather statistics on the young generation collection.
6456 collector()->stats().record_gc0_end(used());
6457 }
6458 }
6460 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6461 GenCollectedHeap* gch = GenCollectedHeap::heap();
6462 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6463 "Wrong type of heap");
6464 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6465 gch->gen_policy()->size_policy();
6466 assert(sp->is_gc_cms_adaptive_size_policy(),
6467 "Wrong type of size policy");
6468 return sp;
6469 }
6471 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6472 if (PrintGCDetails && Verbose) {
6473 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6474 }
6475 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6476 _debug_collection_type =
6477 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6478 if (PrintGCDetails && Verbose) {
6479 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6480 }
6481 }
6483 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6484 bool asynch) {
6485 // We iterate over the space(s) underlying this generation,
6486 // checking the mark bit map to see if the bits corresponding
6487 // to specific blocks are marked or not. Blocks that are
6488 // marked are live and are not swept up. All remaining blocks
6489 // are swept up, with coalescing on-the-fly as we sweep up
6490 // contiguous free and/or garbage blocks:
6491 // We need to ensure that the sweeper synchronizes with allocators
6492 // and stop-the-world collectors. In particular, the following
6493 // locks are used:
6494 // . CMS token: if this is held, a stop the world collection cannot occur
6495 // . freelistLock: if this is held no allocation can occur from this
6496 // generation by another thread
6497 // . bitMapLock: if this is held, no other thread can access or update
6498 //
6500 // Note that we need to hold the freelistLock if we use
6501 // block iterate below; else the iterator might go awry if
6502 // a mutator (or promotion) causes block contents to change
6503 // (for instance if the allocator divvies up a block).
6504 // If we hold the free list lock, for all practical purposes
6505 // young generation GC's can't occur (they'll usually need to
6506 // promote), so we might as well prevent all young generation
6507 // GC's while we do a sweeping step. For the same reason, we might
6508 // as well take the bit map lock for the entire duration
6510 // check that we hold the requisite locks
6511 assert(have_cms_token(), "Should hold cms token");
6512 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6513 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6514 "Should possess CMS token to sweep");
6515 assert_lock_strong(gen->freelistLock());
6516 assert_lock_strong(bitMapLock());
6518 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6519 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6520 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6521 _inter_sweep_estimate.padded_average(),
6522 _intra_sweep_estimate.padded_average());
6523 gen->setNearLargestChunk();
6525 {
6526 SweepClosure sweepClosure(this, gen, &_markBitMap,
6527 CMSYield && asynch);
6528 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6529 // We need to free-up/coalesce garbage/blocks from a
6530 // co-terminal free run. This is done in the SweepClosure
6531 // destructor; so, do not remove this scope, else the
6532 // end-of-sweep-census below will be off by a little bit.
6533 }
6534 gen->cmsSpace()->sweep_completed();
6535 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6536 if (should_unload_classes()) { // unloaded classes this cycle,
6537 _concurrent_cycles_since_last_unload = 0; // ... reset count
6538 } else { // did not unload classes,
6539 _concurrent_cycles_since_last_unload++; // ... increment count
6540 }
6541 }
6543 // Reset CMS data structures (for now just the marking bit map)
6544 // preparatory for the next cycle.
6545 void CMSCollector::reset(bool asynch) {
6546 GenCollectedHeap* gch = GenCollectedHeap::heap();
6547 CMSAdaptiveSizePolicy* sp = size_policy();
6548 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6549 if (asynch) {
6550 CMSTokenSyncWithLocks ts(true, bitMapLock());
6552 // If the state is not "Resetting", the foreground thread
6553 // has done a collection and the resetting.
6554 if (_collectorState != Resetting) {
6555 assert(_collectorState == Idling, "The state should only change"
6556 " because the foreground collector has finished the collection");
6557 return;
6558 }
6560 // Clear the mark bitmap (no grey objects to start with)
6561 // for the next cycle.
6562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6563 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6565 HeapWord* curAddr = _markBitMap.startWord();
6566 while (curAddr < _markBitMap.endWord()) {
6567 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6568 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6569 _markBitMap.clear_large_range(chunk);
6570 if (ConcurrentMarkSweepThread::should_yield() &&
6571 !foregroundGCIsActive() &&
6572 CMSYield) {
6573 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6574 "CMS thread should hold CMS token");
6575 assert_lock_strong(bitMapLock());
6576 bitMapLock()->unlock();
6577 ConcurrentMarkSweepThread::desynchronize(true);
6578 ConcurrentMarkSweepThread::acknowledge_yield_request();
6579 stopTimer();
6580 if (PrintCMSStatistics != 0) {
6581 incrementYields();
6582 }
6583 icms_wait();
6585 // See the comment in coordinator_yield()
6586 for (unsigned i = 0; i < CMSYieldSleepCount &&
6587 ConcurrentMarkSweepThread::should_yield() &&
6588 !CMSCollector::foregroundGCIsActive(); ++i) {
6589 os::sleep(Thread::current(), 1, false);
6590 ConcurrentMarkSweepThread::acknowledge_yield_request();
6591 }
6593 ConcurrentMarkSweepThread::synchronize(true);
6594 bitMapLock()->lock_without_safepoint_check();
6595 startTimer();
6596 }
6597 curAddr = chunk.end();
6598 }
6599 // A successful mostly concurrent collection has been done.
6600 // Because only the full (i.e., concurrent mode failure) collections
6601 // are being measured for gc overhead limits, clean the "near" flag
6602 // and count.
6603 sp->reset_gc_overhead_limit_count();
6604 _collectorState = Idling;
6605 } else {
6606 // already have the lock
6607 assert(_collectorState == Resetting, "just checking");
6608 assert_lock_strong(bitMapLock());
6609 _markBitMap.clear_all();
6610 _collectorState = Idling;
6611 }
6613 // Stop incremental mode after a cycle completes, so that any future cycles
6614 // are triggered by allocation.
6615 stop_icms();
6617 NOT_PRODUCT(
6618 if (RotateCMSCollectionTypes) {
6619 _cmsGen->rotate_debug_collection_type();
6620 }
6621 )
6623 register_gc_end();
6624 }
6626 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6627 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6628 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6629 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6630 TraceCollectorStats tcs(counters());
6632 switch (op) {
6633 case CMS_op_checkpointRootsInitial: {
6634 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6635 checkpointRootsInitial(true); // asynch
6636 if (PrintGC) {
6637 _cmsGen->printOccupancy("initial-mark");
6638 }
6639 break;
6640 }
6641 case CMS_op_checkpointRootsFinal: {
6642 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6643 checkpointRootsFinal(true, // asynch
6644 false, // !clear_all_soft_refs
6645 false); // !init_mark_was_synchronous
6646 if (PrintGC) {
6647 _cmsGen->printOccupancy("remark");
6648 }
6649 break;
6650 }
6651 default:
6652 fatal("No such CMS_op");
6653 }
6654 }
6656 #ifndef PRODUCT
6657 size_t const CMSCollector::skip_header_HeapWords() {
6658 return FreeChunk::header_size();
6659 }
6661 // Try and collect here conditions that should hold when
6662 // CMS thread is exiting. The idea is that the foreground GC
6663 // thread should not be blocked if it wants to terminate
6664 // the CMS thread and yet continue to run the VM for a while
6665 // after that.
6666 void CMSCollector::verify_ok_to_terminate() const {
6667 assert(Thread::current()->is_ConcurrentGC_thread(),
6668 "should be called by CMS thread");
6669 assert(!_foregroundGCShouldWait, "should be false");
6670 // We could check here that all the various low-level locks
6671 // are not held by the CMS thread, but that is overkill; see
6672 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6673 // is checked.
6674 }
6675 #endif
6677 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6678 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6679 "missing Printezis mark?");
6680 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6681 size_t size = pointer_delta(nextOneAddr + 1, addr);
6682 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6683 "alignment problem");
6684 assert(size >= 3, "Necessary for Printezis marks to work");
6685 return size;
6686 }
6688 // A variant of the above (block_size_using_printezis_bits()) except
6689 // that we return 0 if the P-bits are not yet set.
6690 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6691 if (_markBitMap.isMarked(addr + 1)) {
6692 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6693 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6694 size_t size = pointer_delta(nextOneAddr + 1, addr);
6695 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6696 "alignment problem");
6697 assert(size >= 3, "Necessary for Printezis marks to work");
6698 return size;
6699 }
6700 return 0;
6701 }
6703 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6704 size_t sz = 0;
6705 oop p = (oop)addr;
6706 if (p->klass_or_null() != NULL) {
6707 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6708 } else {
6709 sz = block_size_using_printezis_bits(addr);
6710 }
6711 assert(sz > 0, "size must be nonzero");
6712 HeapWord* next_block = addr + sz;
6713 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6714 CardTableModRefBS::card_size);
6715 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6716 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6717 "must be different cards");
6718 return next_card;
6719 }
6722 // CMS Bit Map Wrapper /////////////////////////////////////////
6724 // Construct a CMS bit map infrastructure, but don't create the
6725 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6726 // further below.
6727 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6728 _bm(),
6729 _shifter(shifter),
6730 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6731 {
6732 _bmStartWord = 0;
6733 _bmWordSize = 0;
6734 }
6736 bool CMSBitMap::allocate(MemRegion mr) {
6737 _bmStartWord = mr.start();
6738 _bmWordSize = mr.word_size();
6739 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6740 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6741 if (!brs.is_reserved()) {
6742 warning("CMS bit map allocation failure");
6743 return false;
6744 }
6745 // For now we'll just commit all of the bit map up fromt.
6746 // Later on we'll try to be more parsimonious with swap.
6747 if (!_virtual_space.initialize(brs, brs.size())) {
6748 warning("CMS bit map backing store failure");
6749 return false;
6750 }
6751 assert(_virtual_space.committed_size() == brs.size(),
6752 "didn't reserve backing store for all of CMS bit map?");
6753 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6754 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6755 _bmWordSize, "inconsistency in bit map sizing");
6756 _bm.set_size(_bmWordSize >> _shifter);
6758 // bm.clear(); // can we rely on getting zero'd memory? verify below
6759 assert(isAllClear(),
6760 "Expected zero'd memory from ReservedSpace constructor");
6761 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6762 "consistency check");
6763 return true;
6764 }
6766 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6767 HeapWord *next_addr, *end_addr, *last_addr;
6768 assert_locked();
6769 assert(covers(mr), "out-of-range error");
6770 // XXX assert that start and end are appropriately aligned
6771 for (next_addr = mr.start(), end_addr = mr.end();
6772 next_addr < end_addr; next_addr = last_addr) {
6773 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6774 last_addr = dirty_region.end();
6775 if (!dirty_region.is_empty()) {
6776 cl->do_MemRegion(dirty_region);
6777 } else {
6778 assert(last_addr == end_addr, "program logic");
6779 return;
6780 }
6781 }
6782 }
6784 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6785 _bm.print_on_error(st, prefix);
6786 }
6788 #ifndef PRODUCT
6789 void CMSBitMap::assert_locked() const {
6790 CMSLockVerifier::assert_locked(lock());
6791 }
6793 bool CMSBitMap::covers(MemRegion mr) const {
6794 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6795 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6796 "size inconsistency");
6797 return (mr.start() >= _bmStartWord) &&
6798 (mr.end() <= endWord());
6799 }
6801 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6802 return (start >= _bmStartWord && (start + size) <= endWord());
6803 }
6805 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6806 // verify that there are no 1 bits in the interval [left, right)
6807 FalseBitMapClosure falseBitMapClosure;
6808 iterate(&falseBitMapClosure, left, right);
6809 }
6811 void CMSBitMap::region_invariant(MemRegion mr)
6812 {
6813 assert_locked();
6814 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6815 assert(!mr.is_empty(), "unexpected empty region");
6816 assert(covers(mr), "mr should be covered by bit map");
6817 // convert address range into offset range
6818 size_t start_ofs = heapWordToOffset(mr.start());
6819 // Make sure that end() is appropriately aligned
6820 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6821 (1 << (_shifter+LogHeapWordSize))),
6822 "Misaligned mr.end()");
6823 size_t end_ofs = heapWordToOffset(mr.end());
6824 assert(end_ofs > start_ofs, "Should mark at least one bit");
6825 }
6827 #endif
6829 bool CMSMarkStack::allocate(size_t size) {
6830 // allocate a stack of the requisite depth
6831 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6832 size * sizeof(oop)));
6833 if (!rs.is_reserved()) {
6834 warning("CMSMarkStack allocation failure");
6835 return false;
6836 }
6837 if (!_virtual_space.initialize(rs, rs.size())) {
6838 warning("CMSMarkStack backing store failure");
6839 return false;
6840 }
6841 assert(_virtual_space.committed_size() == rs.size(),
6842 "didn't reserve backing store for all of CMS stack?");
6843 _base = (oop*)(_virtual_space.low());
6844 _index = 0;
6845 _capacity = size;
6846 NOT_PRODUCT(_max_depth = 0);
6847 return true;
6848 }
6850 // XXX FIX ME !!! In the MT case we come in here holding a
6851 // leaf lock. For printing we need to take a further lock
6852 // which has lower rank. We need to recallibrate the two
6853 // lock-ranks involved in order to be able to rpint the
6854 // messages below. (Or defer the printing to the caller.
6855 // For now we take the expedient path of just disabling the
6856 // messages for the problematic case.)
6857 void CMSMarkStack::expand() {
6858 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6859 if (_capacity == MarkStackSizeMax) {
6860 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6861 // We print a warning message only once per CMS cycle.
6862 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6863 }
6864 return;
6865 }
6866 // Double capacity if possible
6867 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6868 // Do not give up existing stack until we have managed to
6869 // get the double capacity that we desired.
6870 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6871 new_capacity * sizeof(oop)));
6872 if (rs.is_reserved()) {
6873 // Release the backing store associated with old stack
6874 _virtual_space.release();
6875 // Reinitialize virtual space for new stack
6876 if (!_virtual_space.initialize(rs, rs.size())) {
6877 fatal("Not enough swap for expanded marking stack");
6878 }
6879 _base = (oop*)(_virtual_space.low());
6880 _index = 0;
6881 _capacity = new_capacity;
6882 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6883 // Failed to double capacity, continue;
6884 // we print a detail message only once per CMS cycle.
6885 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6886 SIZE_FORMAT"K",
6887 _capacity / K, new_capacity / K);
6888 }
6889 }
6892 // Closures
6893 // XXX: there seems to be a lot of code duplication here;
6894 // should refactor and consolidate common code.
6896 // This closure is used to mark refs into the CMS generation in
6897 // the CMS bit map. Called at the first checkpoint. This closure
6898 // assumes that we do not need to re-mark dirty cards; if the CMS
6899 // generation on which this is used is not an oldest
6900 // generation then this will lose younger_gen cards!
6902 MarkRefsIntoClosure::MarkRefsIntoClosure(
6903 MemRegion span, CMSBitMap* bitMap):
6904 _span(span),
6905 _bitMap(bitMap)
6906 {
6907 assert(_ref_processor == NULL, "deliberately left NULL");
6908 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6909 }
6911 void MarkRefsIntoClosure::do_oop(oop obj) {
6912 // if p points into _span, then mark corresponding bit in _markBitMap
6913 assert(obj->is_oop(), "expected an oop");
6914 HeapWord* addr = (HeapWord*)obj;
6915 if (_span.contains(addr)) {
6916 // this should be made more efficient
6917 _bitMap->mark(addr);
6918 }
6919 }
6921 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6922 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6924 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6925 MemRegion span, CMSBitMap* bitMap):
6926 _span(span),
6927 _bitMap(bitMap)
6928 {
6929 assert(_ref_processor == NULL, "deliberately left NULL");
6930 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6931 }
6933 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6934 // if p points into _span, then mark corresponding bit in _markBitMap
6935 assert(obj->is_oop(), "expected an oop");
6936 HeapWord* addr = (HeapWord*)obj;
6937 if (_span.contains(addr)) {
6938 // this should be made more efficient
6939 _bitMap->par_mark(addr);
6940 }
6941 }
6943 void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6944 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6946 // A variant of the above, used for CMS marking verification.
6947 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6948 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6949 _span(span),
6950 _verification_bm(verification_bm),
6951 _cms_bm(cms_bm)
6952 {
6953 assert(_ref_processor == NULL, "deliberately left NULL");
6954 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6955 }
6957 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6958 // if p points into _span, then mark corresponding bit in _markBitMap
6959 assert(obj->is_oop(), "expected an oop");
6960 HeapWord* addr = (HeapWord*)obj;
6961 if (_span.contains(addr)) {
6962 _verification_bm->mark(addr);
6963 if (!_cms_bm->isMarked(addr)) {
6964 oop(addr)->print();
6965 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6966 fatal("... aborting");
6967 }
6968 }
6969 }
6971 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6972 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6974 //////////////////////////////////////////////////
6975 // MarkRefsIntoAndScanClosure
6976 //////////////////////////////////////////////////
6978 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6979 ReferenceProcessor* rp,
6980 CMSBitMap* bit_map,
6981 CMSBitMap* mod_union_table,
6982 CMSMarkStack* mark_stack,
6983 CMSCollector* collector,
6984 bool should_yield,
6985 bool concurrent_precleaning):
6986 _collector(collector),
6987 _span(span),
6988 _bit_map(bit_map),
6989 _mark_stack(mark_stack),
6990 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6991 mark_stack, concurrent_precleaning),
6992 _yield(should_yield),
6993 _concurrent_precleaning(concurrent_precleaning),
6994 _freelistLock(NULL)
6995 {
6996 _ref_processor = rp;
6997 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6998 }
7000 // This closure is used to mark refs into the CMS generation at the
7001 // second (final) checkpoint, and to scan and transitively follow
7002 // the unmarked oops. It is also used during the concurrent precleaning
7003 // phase while scanning objects on dirty cards in the CMS generation.
7004 // The marks are made in the marking bit map and the marking stack is
7005 // used for keeping the (newly) grey objects during the scan.
7006 // The parallel version (Par_...) appears further below.
7007 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7008 if (obj != NULL) {
7009 assert(obj->is_oop(), "expected an oop");
7010 HeapWord* addr = (HeapWord*)obj;
7011 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7012 assert(_collector->overflow_list_is_empty(),
7013 "overflow list should be empty");
7014 if (_span.contains(addr) &&
7015 !_bit_map->isMarked(addr)) {
7016 // mark bit map (object is now grey)
7017 _bit_map->mark(addr);
7018 // push on marking stack (stack should be empty), and drain the
7019 // stack by applying this closure to the oops in the oops popped
7020 // from the stack (i.e. blacken the grey objects)
7021 bool res = _mark_stack->push(obj);
7022 assert(res, "Should have space to push on empty stack");
7023 do {
7024 oop new_oop = _mark_stack->pop();
7025 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7026 assert(_bit_map->isMarked((HeapWord*)new_oop),
7027 "only grey objects on this stack");
7028 // iterate over the oops in this oop, marking and pushing
7029 // the ones in CMS heap (i.e. in _span).
7030 new_oop->oop_iterate(&_pushAndMarkClosure);
7031 // check if it's time to yield
7032 do_yield_check();
7033 } while (!_mark_stack->isEmpty() ||
7034 (!_concurrent_precleaning && take_from_overflow_list()));
7035 // if marking stack is empty, and we are not doing this
7036 // during precleaning, then check the overflow list
7037 }
7038 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7039 assert(_collector->overflow_list_is_empty(),
7040 "overflow list was drained above");
7041 // We could restore evacuated mark words, if any, used for
7042 // overflow list links here because the overflow list is
7043 // provably empty here. That would reduce the maximum
7044 // size requirements for preserved_{oop,mark}_stack.
7045 // But we'll just postpone it until we are all done
7046 // so we can just stream through.
7047 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7048 _collector->restore_preserved_marks_if_any();
7049 assert(_collector->no_preserved_marks(), "No preserved marks");
7050 }
7051 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7052 "All preserved marks should have been restored above");
7053 }
7054 }
7056 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7057 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7059 void MarkRefsIntoAndScanClosure::do_yield_work() {
7060 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7061 "CMS thread should hold CMS token");
7062 assert_lock_strong(_freelistLock);
7063 assert_lock_strong(_bit_map->lock());
7064 // relinquish the free_list_lock and bitMaplock()
7065 _bit_map->lock()->unlock();
7066 _freelistLock->unlock();
7067 ConcurrentMarkSweepThread::desynchronize(true);
7068 ConcurrentMarkSweepThread::acknowledge_yield_request();
7069 _collector->stopTimer();
7070 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7071 if (PrintCMSStatistics != 0) {
7072 _collector->incrementYields();
7073 }
7074 _collector->icms_wait();
7076 // See the comment in coordinator_yield()
7077 for (unsigned i = 0;
7078 i < CMSYieldSleepCount &&
7079 ConcurrentMarkSweepThread::should_yield() &&
7080 !CMSCollector::foregroundGCIsActive();
7081 ++i) {
7082 os::sleep(Thread::current(), 1, false);
7083 ConcurrentMarkSweepThread::acknowledge_yield_request();
7084 }
7086 ConcurrentMarkSweepThread::synchronize(true);
7087 _freelistLock->lock_without_safepoint_check();
7088 _bit_map->lock()->lock_without_safepoint_check();
7089 _collector->startTimer();
7090 }
7092 ///////////////////////////////////////////////////////////
7093 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7094 // MarkRefsIntoAndScanClosure
7095 ///////////////////////////////////////////////////////////
7096 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7097 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7098 CMSBitMap* bit_map, OopTaskQueue* work_queue):
7099 _span(span),
7100 _bit_map(bit_map),
7101 _work_queue(work_queue),
7102 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7103 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7104 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7105 {
7106 _ref_processor = rp;
7107 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7108 }
7110 // This closure is used to mark refs into the CMS generation at the
7111 // second (final) checkpoint, and to scan and transitively follow
7112 // the unmarked oops. The marks are made in the marking bit map and
7113 // the work_queue is used for keeping the (newly) grey objects during
7114 // the scan phase whence they are also available for stealing by parallel
7115 // threads. Since the marking bit map is shared, updates are
7116 // synchronized (via CAS).
7117 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7118 if (obj != NULL) {
7119 // Ignore mark word because this could be an already marked oop
7120 // that may be chained at the end of the overflow list.
7121 assert(obj->is_oop(true), "expected an oop");
7122 HeapWord* addr = (HeapWord*)obj;
7123 if (_span.contains(addr) &&
7124 !_bit_map->isMarked(addr)) {
7125 // mark bit map (object will become grey):
7126 // It is possible for several threads to be
7127 // trying to "claim" this object concurrently;
7128 // the unique thread that succeeds in marking the
7129 // object first will do the subsequent push on
7130 // to the work queue (or overflow list).
7131 if (_bit_map->par_mark(addr)) {
7132 // push on work_queue (which may not be empty), and trim the
7133 // queue to an appropriate length by applying this closure to
7134 // the oops in the oops popped from the stack (i.e. blacken the
7135 // grey objects)
7136 bool res = _work_queue->push(obj);
7137 assert(res, "Low water mark should be less than capacity?");
7138 trim_queue(_low_water_mark);
7139 } // Else, another thread claimed the object
7140 }
7141 }
7142 }
7144 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7145 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7147 // This closure is used to rescan the marked objects on the dirty cards
7148 // in the mod union table and the card table proper.
7149 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7150 oop p, MemRegion mr) {
7152 size_t size = 0;
7153 HeapWord* addr = (HeapWord*)p;
7154 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7155 assert(_span.contains(addr), "we are scanning the CMS generation");
7156 // check if it's time to yield
7157 if (do_yield_check()) {
7158 // We yielded for some foreground stop-world work,
7159 // and we have been asked to abort this ongoing preclean cycle.
7160 return 0;
7161 }
7162 if (_bitMap->isMarked(addr)) {
7163 // it's marked; is it potentially uninitialized?
7164 if (p->klass_or_null() != NULL) {
7165 // an initialized object; ignore mark word in verification below
7166 // since we are running concurrent with mutators
7167 assert(p->is_oop(true), "should be an oop");
7168 if (p->is_objArray()) {
7169 // objArrays are precisely marked; restrict scanning
7170 // to dirty cards only.
7171 size = CompactibleFreeListSpace::adjustObjectSize(
7172 p->oop_iterate(_scanningClosure, mr));
7173 } else {
7174 // A non-array may have been imprecisely marked; we need
7175 // to scan object in its entirety.
7176 size = CompactibleFreeListSpace::adjustObjectSize(
7177 p->oop_iterate(_scanningClosure));
7178 }
7179 #ifdef ASSERT
7180 size_t direct_size =
7181 CompactibleFreeListSpace::adjustObjectSize(p->size());
7182 assert(size == direct_size, "Inconsistency in size");
7183 assert(size >= 3, "Necessary for Printezis marks to work");
7184 if (!_bitMap->isMarked(addr+1)) {
7185 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7186 } else {
7187 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7188 assert(_bitMap->isMarked(addr+size-1),
7189 "inconsistent Printezis mark");
7190 }
7191 #endif // ASSERT
7192 } else {
7193 // an unitialized object
7194 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7195 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7196 size = pointer_delta(nextOneAddr + 1, addr);
7197 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7198 "alignment problem");
7199 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7200 // will dirty the card when the klass pointer is installed in the
7201 // object (signalling the completion of initialization).
7202 }
7203 } else {
7204 // Either a not yet marked object or an uninitialized object
7205 if (p->klass_or_null() == NULL) {
7206 // An uninitialized object, skip to the next card, since
7207 // we may not be able to read its P-bits yet.
7208 assert(size == 0, "Initial value");
7209 } else {
7210 // An object not (yet) reached by marking: we merely need to
7211 // compute its size so as to go look at the next block.
7212 assert(p->is_oop(true), "should be an oop");
7213 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7214 }
7215 }
7216 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7217 return size;
7218 }
7220 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7221 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7222 "CMS thread should hold CMS token");
7223 assert_lock_strong(_freelistLock);
7224 assert_lock_strong(_bitMap->lock());
7225 // relinquish the free_list_lock and bitMaplock()
7226 _bitMap->lock()->unlock();
7227 _freelistLock->unlock();
7228 ConcurrentMarkSweepThread::desynchronize(true);
7229 ConcurrentMarkSweepThread::acknowledge_yield_request();
7230 _collector->stopTimer();
7231 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7232 if (PrintCMSStatistics != 0) {
7233 _collector->incrementYields();
7234 }
7235 _collector->icms_wait();
7237 // See the comment in coordinator_yield()
7238 for (unsigned i = 0; i < CMSYieldSleepCount &&
7239 ConcurrentMarkSweepThread::should_yield() &&
7240 !CMSCollector::foregroundGCIsActive(); ++i) {
7241 os::sleep(Thread::current(), 1, false);
7242 ConcurrentMarkSweepThread::acknowledge_yield_request();
7243 }
7245 ConcurrentMarkSweepThread::synchronize(true);
7246 _freelistLock->lock_without_safepoint_check();
7247 _bitMap->lock()->lock_without_safepoint_check();
7248 _collector->startTimer();
7249 }
7252 //////////////////////////////////////////////////////////////////
7253 // SurvivorSpacePrecleanClosure
7254 //////////////////////////////////////////////////////////////////
7255 // This (single-threaded) closure is used to preclean the oops in
7256 // the survivor spaces.
7257 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7259 HeapWord* addr = (HeapWord*)p;
7260 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7261 assert(!_span.contains(addr), "we are scanning the survivor spaces");
7262 assert(p->klass_or_null() != NULL, "object should be initializd");
7263 // an initialized object; ignore mark word in verification below
7264 // since we are running concurrent with mutators
7265 assert(p->is_oop(true), "should be an oop");
7266 // Note that we do not yield while we iterate over
7267 // the interior oops of p, pushing the relevant ones
7268 // on our marking stack.
7269 size_t size = p->oop_iterate(_scanning_closure);
7270 do_yield_check();
7271 // Observe that below, we do not abandon the preclean
7272 // phase as soon as we should; rather we empty the
7273 // marking stack before returning. This is to satisfy
7274 // some existing assertions. In general, it may be a
7275 // good idea to abort immediately and complete the marking
7276 // from the grey objects at a later time.
7277 while (!_mark_stack->isEmpty()) {
7278 oop new_oop = _mark_stack->pop();
7279 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7280 assert(_bit_map->isMarked((HeapWord*)new_oop),
7281 "only grey objects on this stack");
7282 // iterate over the oops in this oop, marking and pushing
7283 // the ones in CMS heap (i.e. in _span).
7284 new_oop->oop_iterate(_scanning_closure);
7285 // check if it's time to yield
7286 do_yield_check();
7287 }
7288 unsigned int after_count =
7289 GenCollectedHeap::heap()->total_collections();
7290 bool abort = (_before_count != after_count) ||
7291 _collector->should_abort_preclean();
7292 return abort ? 0 : size;
7293 }
7295 void SurvivorSpacePrecleanClosure::do_yield_work() {
7296 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7297 "CMS thread should hold CMS token");
7298 assert_lock_strong(_bit_map->lock());
7299 // Relinquish the bit map lock
7300 _bit_map->lock()->unlock();
7301 ConcurrentMarkSweepThread::desynchronize(true);
7302 ConcurrentMarkSweepThread::acknowledge_yield_request();
7303 _collector->stopTimer();
7304 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7305 if (PrintCMSStatistics != 0) {
7306 _collector->incrementYields();
7307 }
7308 _collector->icms_wait();
7310 // See the comment in coordinator_yield()
7311 for (unsigned i = 0; i < CMSYieldSleepCount &&
7312 ConcurrentMarkSweepThread::should_yield() &&
7313 !CMSCollector::foregroundGCIsActive(); ++i) {
7314 os::sleep(Thread::current(), 1, false);
7315 ConcurrentMarkSweepThread::acknowledge_yield_request();
7316 }
7318 ConcurrentMarkSweepThread::synchronize(true);
7319 _bit_map->lock()->lock_without_safepoint_check();
7320 _collector->startTimer();
7321 }
7323 // This closure is used to rescan the marked objects on the dirty cards
7324 // in the mod union table and the card table proper. In the parallel
7325 // case, although the bitMap is shared, we do a single read so the
7326 // isMarked() query is "safe".
7327 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7328 // Ignore mark word because we are running concurrent with mutators
7329 assert(p->is_oop_or_null(true), "expected an oop or null");
7330 HeapWord* addr = (HeapWord*)p;
7331 assert(_span.contains(addr), "we are scanning the CMS generation");
7332 bool is_obj_array = false;
7333 #ifdef ASSERT
7334 if (!_parallel) {
7335 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7336 assert(_collector->overflow_list_is_empty(),
7337 "overflow list should be empty");
7339 }
7340 #endif // ASSERT
7341 if (_bit_map->isMarked(addr)) {
7342 // Obj arrays are precisely marked, non-arrays are not;
7343 // so we scan objArrays precisely and non-arrays in their
7344 // entirety.
7345 if (p->is_objArray()) {
7346 is_obj_array = true;
7347 if (_parallel) {
7348 p->oop_iterate(_par_scan_closure, mr);
7349 } else {
7350 p->oop_iterate(_scan_closure, mr);
7351 }
7352 } else {
7353 if (_parallel) {
7354 p->oop_iterate(_par_scan_closure);
7355 } else {
7356 p->oop_iterate(_scan_closure);
7357 }
7358 }
7359 }
7360 #ifdef ASSERT
7361 if (!_parallel) {
7362 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7363 assert(_collector->overflow_list_is_empty(),
7364 "overflow list should be empty");
7366 }
7367 #endif // ASSERT
7368 return is_obj_array;
7369 }
7371 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7372 MemRegion span,
7373 CMSBitMap* bitMap, CMSMarkStack* markStack,
7374 bool should_yield, bool verifying):
7375 _collector(collector),
7376 _span(span),
7377 _bitMap(bitMap),
7378 _mut(&collector->_modUnionTable),
7379 _markStack(markStack),
7380 _yield(should_yield),
7381 _skipBits(0)
7382 {
7383 assert(_markStack->isEmpty(), "stack should be empty");
7384 _finger = _bitMap->startWord();
7385 _threshold = _finger;
7386 assert(_collector->_restart_addr == NULL, "Sanity check");
7387 assert(_span.contains(_finger), "Out of bounds _finger?");
7388 DEBUG_ONLY(_verifying = verifying;)
7389 }
7391 void MarkFromRootsClosure::reset(HeapWord* addr) {
7392 assert(_markStack->isEmpty(), "would cause duplicates on stack");
7393 assert(_span.contains(addr), "Out of bounds _finger?");
7394 _finger = addr;
7395 _threshold = (HeapWord*)round_to(
7396 (intptr_t)_finger, CardTableModRefBS::card_size);
7397 }
7399 // Should revisit to see if this should be restructured for
7400 // greater efficiency.
7401 bool MarkFromRootsClosure::do_bit(size_t offset) {
7402 if (_skipBits > 0) {
7403 _skipBits--;
7404 return true;
7405 }
7406 // convert offset into a HeapWord*
7407 HeapWord* addr = _bitMap->startWord() + offset;
7408 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7409 "address out of range");
7410 assert(_bitMap->isMarked(addr), "tautology");
7411 if (_bitMap->isMarked(addr+1)) {
7412 // this is an allocated but not yet initialized object
7413 assert(_skipBits == 0, "tautology");
7414 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
7415 oop p = oop(addr);
7416 if (p->klass_or_null() == NULL) {
7417 DEBUG_ONLY(if (!_verifying) {)
7418 // We re-dirty the cards on which this object lies and increase
7419 // the _threshold so that we'll come back to scan this object
7420 // during the preclean or remark phase. (CMSCleanOnEnter)
7421 if (CMSCleanOnEnter) {
7422 size_t sz = _collector->block_size_using_printezis_bits(addr);
7423 HeapWord* end_card_addr = (HeapWord*)round_to(
7424 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7425 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7426 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7427 // Bump _threshold to end_card_addr; note that
7428 // _threshold cannot possibly exceed end_card_addr, anyhow.
7429 // This prevents future clearing of the card as the scan proceeds
7430 // to the right.
7431 assert(_threshold <= end_card_addr,
7432 "Because we are just scanning into this object");
7433 if (_threshold < end_card_addr) {
7434 _threshold = end_card_addr;
7435 }
7436 if (p->klass_or_null() != NULL) {
7437 // Redirty the range of cards...
7438 _mut->mark_range(redirty_range);
7439 } // ...else the setting of klass will dirty the card anyway.
7440 }
7441 DEBUG_ONLY(})
7442 return true;
7443 }
7444 }
7445 scanOopsInOop(addr);
7446 return true;
7447 }
7449 // We take a break if we've been at this for a while,
7450 // so as to avoid monopolizing the locks involved.
7451 void MarkFromRootsClosure::do_yield_work() {
7452 // First give up the locks, then yield, then re-lock
7453 // We should probably use a constructor/destructor idiom to
7454 // do this unlock/lock or modify the MutexUnlocker class to
7455 // serve our purpose. XXX
7456 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7457 "CMS thread should hold CMS token");
7458 assert_lock_strong(_bitMap->lock());
7459 _bitMap->lock()->unlock();
7460 ConcurrentMarkSweepThread::desynchronize(true);
7461 ConcurrentMarkSweepThread::acknowledge_yield_request();
7462 _collector->stopTimer();
7463 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7464 if (PrintCMSStatistics != 0) {
7465 _collector->incrementYields();
7466 }
7467 _collector->icms_wait();
7469 // See the comment in coordinator_yield()
7470 for (unsigned i = 0; i < CMSYieldSleepCount &&
7471 ConcurrentMarkSweepThread::should_yield() &&
7472 !CMSCollector::foregroundGCIsActive(); ++i) {
7473 os::sleep(Thread::current(), 1, false);
7474 ConcurrentMarkSweepThread::acknowledge_yield_request();
7475 }
7477 ConcurrentMarkSweepThread::synchronize(true);
7478 _bitMap->lock()->lock_without_safepoint_check();
7479 _collector->startTimer();
7480 }
7482 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7483 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7484 assert(_markStack->isEmpty(),
7485 "should drain stack to limit stack usage");
7486 // convert ptr to an oop preparatory to scanning
7487 oop obj = oop(ptr);
7488 // Ignore mark word in verification below, since we
7489 // may be running concurrent with mutators.
7490 assert(obj->is_oop(true), "should be an oop");
7491 assert(_finger <= ptr, "_finger runneth ahead");
7492 // advance the finger to right end of this object
7493 _finger = ptr + obj->size();
7494 assert(_finger > ptr, "we just incremented it above");
7495 // On large heaps, it may take us some time to get through
7496 // the marking phase (especially if running iCMS). During
7497 // this time it's possible that a lot of mutations have
7498 // accumulated in the card table and the mod union table --
7499 // these mutation records are redundant until we have
7500 // actually traced into the corresponding card.
7501 // Here, we check whether advancing the finger would make
7502 // us cross into a new card, and if so clear corresponding
7503 // cards in the MUT (preclean them in the card-table in the
7504 // future).
7506 DEBUG_ONLY(if (!_verifying) {)
7507 // The clean-on-enter optimization is disabled by default,
7508 // until we fix 6178663.
7509 if (CMSCleanOnEnter && (_finger > _threshold)) {
7510 // [_threshold, _finger) represents the interval
7511 // of cards to be cleared in MUT (or precleaned in card table).
7512 // The set of cards to be cleared is all those that overlap
7513 // with the interval [_threshold, _finger); note that
7514 // _threshold is always kept card-aligned but _finger isn't
7515 // always card-aligned.
7516 HeapWord* old_threshold = _threshold;
7517 assert(old_threshold == (HeapWord*)round_to(
7518 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7519 "_threshold should always be card-aligned");
7520 _threshold = (HeapWord*)round_to(
7521 (intptr_t)_finger, CardTableModRefBS::card_size);
7522 MemRegion mr(old_threshold, _threshold);
7523 assert(!mr.is_empty(), "Control point invariant");
7524 assert(_span.contains(mr), "Should clear within span");
7525 _mut->clear_range(mr);
7526 }
7527 DEBUG_ONLY(})
7528 // Note: the finger doesn't advance while we drain
7529 // the stack below.
7530 PushOrMarkClosure pushOrMarkClosure(_collector,
7531 _span, _bitMap, _markStack,
7532 _finger, this);
7533 bool res = _markStack->push(obj);
7534 assert(res, "Empty non-zero size stack should have space for single push");
7535 while (!_markStack->isEmpty()) {
7536 oop new_oop = _markStack->pop();
7537 // Skip verifying header mark word below because we are
7538 // running concurrent with mutators.
7539 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7540 // now scan this oop's oops
7541 new_oop->oop_iterate(&pushOrMarkClosure);
7542 do_yield_check();
7543 }
7544 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7545 }
7547 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7548 CMSCollector* collector, MemRegion span,
7549 CMSBitMap* bit_map,
7550 OopTaskQueue* work_queue,
7551 CMSMarkStack* overflow_stack,
7552 bool should_yield):
7553 _collector(collector),
7554 _whole_span(collector->_span),
7555 _span(span),
7556 _bit_map(bit_map),
7557 _mut(&collector->_modUnionTable),
7558 _work_queue(work_queue),
7559 _overflow_stack(overflow_stack),
7560 _yield(should_yield),
7561 _skip_bits(0),
7562 _task(task)
7563 {
7564 assert(_work_queue->size() == 0, "work_queue should be empty");
7565 _finger = span.start();
7566 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7567 assert(_span.contains(_finger), "Out of bounds _finger?");
7568 }
7570 // Should revisit to see if this should be restructured for
7571 // greater efficiency.
7572 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7573 if (_skip_bits > 0) {
7574 _skip_bits--;
7575 return true;
7576 }
7577 // convert offset into a HeapWord*
7578 HeapWord* addr = _bit_map->startWord() + offset;
7579 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7580 "address out of range");
7581 assert(_bit_map->isMarked(addr), "tautology");
7582 if (_bit_map->isMarked(addr+1)) {
7583 // this is an allocated object that might not yet be initialized
7584 assert(_skip_bits == 0, "tautology");
7585 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7586 oop p = oop(addr);
7587 if (p->klass_or_null() == NULL) {
7588 // in the case of Clean-on-Enter optimization, redirty card
7589 // and avoid clearing card by increasing the threshold.
7590 return true;
7591 }
7592 }
7593 scan_oops_in_oop(addr);
7594 return true;
7595 }
7597 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7598 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7599 // Should we assert that our work queue is empty or
7600 // below some drain limit?
7601 assert(_work_queue->size() == 0,
7602 "should drain stack to limit stack usage");
7603 // convert ptr to an oop preparatory to scanning
7604 oop obj = oop(ptr);
7605 // Ignore mark word in verification below, since we
7606 // may be running concurrent with mutators.
7607 assert(obj->is_oop(true), "should be an oop");
7608 assert(_finger <= ptr, "_finger runneth ahead");
7609 // advance the finger to right end of this object
7610 _finger = ptr + obj->size();
7611 assert(_finger > ptr, "we just incremented it above");
7612 // On large heaps, it may take us some time to get through
7613 // the marking phase (especially if running iCMS). During
7614 // this time it's possible that a lot of mutations have
7615 // accumulated in the card table and the mod union table --
7616 // these mutation records are redundant until we have
7617 // actually traced into the corresponding card.
7618 // Here, we check whether advancing the finger would make
7619 // us cross into a new card, and if so clear corresponding
7620 // cards in the MUT (preclean them in the card-table in the
7621 // future).
7623 // The clean-on-enter optimization is disabled by default,
7624 // until we fix 6178663.
7625 if (CMSCleanOnEnter && (_finger > _threshold)) {
7626 // [_threshold, _finger) represents the interval
7627 // of cards to be cleared in MUT (or precleaned in card table).
7628 // The set of cards to be cleared is all those that overlap
7629 // with the interval [_threshold, _finger); note that
7630 // _threshold is always kept card-aligned but _finger isn't
7631 // always card-aligned.
7632 HeapWord* old_threshold = _threshold;
7633 assert(old_threshold == (HeapWord*)round_to(
7634 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7635 "_threshold should always be card-aligned");
7636 _threshold = (HeapWord*)round_to(
7637 (intptr_t)_finger, CardTableModRefBS::card_size);
7638 MemRegion mr(old_threshold, _threshold);
7639 assert(!mr.is_empty(), "Control point invariant");
7640 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7641 _mut->clear_range(mr);
7642 }
7644 // Note: the local finger doesn't advance while we drain
7645 // the stack below, but the global finger sure can and will.
7646 HeapWord** gfa = _task->global_finger_addr();
7647 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7648 _span, _bit_map,
7649 _work_queue,
7650 _overflow_stack,
7651 _finger,
7652 gfa, this);
7653 bool res = _work_queue->push(obj); // overflow could occur here
7654 assert(res, "Will hold once we use workqueues");
7655 while (true) {
7656 oop new_oop;
7657 if (!_work_queue->pop_local(new_oop)) {
7658 // We emptied our work_queue; check if there's stuff that can
7659 // be gotten from the overflow stack.
7660 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7661 _overflow_stack, _work_queue)) {
7662 do_yield_check();
7663 continue;
7664 } else { // done
7665 break;
7666 }
7667 }
7668 // Skip verifying header mark word below because we are
7669 // running concurrent with mutators.
7670 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7671 // now scan this oop's oops
7672 new_oop->oop_iterate(&pushOrMarkClosure);
7673 do_yield_check();
7674 }
7675 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7676 }
7678 // Yield in response to a request from VM Thread or
7679 // from mutators.
7680 void Par_MarkFromRootsClosure::do_yield_work() {
7681 assert(_task != NULL, "sanity");
7682 _task->yield();
7683 }
7685 // A variant of the above used for verifying CMS marking work.
7686 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7687 MemRegion span,
7688 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7689 CMSMarkStack* mark_stack):
7690 _collector(collector),
7691 _span(span),
7692 _verification_bm(verification_bm),
7693 _cms_bm(cms_bm),
7694 _mark_stack(mark_stack),
7695 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7696 mark_stack)
7697 {
7698 assert(_mark_stack->isEmpty(), "stack should be empty");
7699 _finger = _verification_bm->startWord();
7700 assert(_collector->_restart_addr == NULL, "Sanity check");
7701 assert(_span.contains(_finger), "Out of bounds _finger?");
7702 }
7704 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7705 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7706 assert(_span.contains(addr), "Out of bounds _finger?");
7707 _finger = addr;
7708 }
7710 // Should revisit to see if this should be restructured for
7711 // greater efficiency.
7712 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7713 // convert offset into a HeapWord*
7714 HeapWord* addr = _verification_bm->startWord() + offset;
7715 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7716 "address out of range");
7717 assert(_verification_bm->isMarked(addr), "tautology");
7718 assert(_cms_bm->isMarked(addr), "tautology");
7720 assert(_mark_stack->isEmpty(),
7721 "should drain stack to limit stack usage");
7722 // convert addr to an oop preparatory to scanning
7723 oop obj = oop(addr);
7724 assert(obj->is_oop(), "should be an oop");
7725 assert(_finger <= addr, "_finger runneth ahead");
7726 // advance the finger to right end of this object
7727 _finger = addr + obj->size();
7728 assert(_finger > addr, "we just incremented it above");
7729 // Note: the finger doesn't advance while we drain
7730 // the stack below.
7731 bool res = _mark_stack->push(obj);
7732 assert(res, "Empty non-zero size stack should have space for single push");
7733 while (!_mark_stack->isEmpty()) {
7734 oop new_oop = _mark_stack->pop();
7735 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7736 // now scan this oop's oops
7737 new_oop->oop_iterate(&_pam_verify_closure);
7738 }
7739 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7740 return true;
7741 }
7743 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7744 CMSCollector* collector, MemRegion span,
7745 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7746 CMSMarkStack* mark_stack):
7747 CMSOopClosure(collector->ref_processor()),
7748 _collector(collector),
7749 _span(span),
7750 _verification_bm(verification_bm),
7751 _cms_bm(cms_bm),
7752 _mark_stack(mark_stack)
7753 { }
7755 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7756 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7758 // Upon stack overflow, we discard (part of) the stack,
7759 // remembering the least address amongst those discarded
7760 // in CMSCollector's _restart_address.
7761 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7762 // Remember the least grey address discarded
7763 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7764 _collector->lower_restart_addr(ra);
7765 _mark_stack->reset(); // discard stack contents
7766 _mark_stack->expand(); // expand the stack if possible
7767 }
7769 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7770 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7771 HeapWord* addr = (HeapWord*)obj;
7772 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7773 // Oop lies in _span and isn't yet grey or black
7774 _verification_bm->mark(addr); // now grey
7775 if (!_cms_bm->isMarked(addr)) {
7776 oop(addr)->print();
7777 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7778 addr);
7779 fatal("... aborting");
7780 }
7782 if (!_mark_stack->push(obj)) { // stack overflow
7783 if (PrintCMSStatistics != 0) {
7784 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7785 SIZE_FORMAT, _mark_stack->capacity());
7786 }
7787 assert(_mark_stack->isFull(), "Else push should have succeeded");
7788 handle_stack_overflow(addr);
7789 }
7790 // anything including and to the right of _finger
7791 // will be scanned as we iterate over the remainder of the
7792 // bit map
7793 }
7794 }
7796 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7797 MemRegion span,
7798 CMSBitMap* bitMap, CMSMarkStack* markStack,
7799 HeapWord* finger, MarkFromRootsClosure* parent) :
7800 CMSOopClosure(collector->ref_processor()),
7801 _collector(collector),
7802 _span(span),
7803 _bitMap(bitMap),
7804 _markStack(markStack),
7805 _finger(finger),
7806 _parent(parent)
7807 { }
7809 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7810 MemRegion span,
7811 CMSBitMap* bit_map,
7812 OopTaskQueue* work_queue,
7813 CMSMarkStack* overflow_stack,
7814 HeapWord* finger,
7815 HeapWord** global_finger_addr,
7816 Par_MarkFromRootsClosure* parent) :
7817 CMSOopClosure(collector->ref_processor()),
7818 _collector(collector),
7819 _whole_span(collector->_span),
7820 _span(span),
7821 _bit_map(bit_map),
7822 _work_queue(work_queue),
7823 _overflow_stack(overflow_stack),
7824 _finger(finger),
7825 _global_finger_addr(global_finger_addr),
7826 _parent(parent)
7827 { }
7829 // Assumes thread-safe access by callers, who are
7830 // responsible for mutual exclusion.
7831 void CMSCollector::lower_restart_addr(HeapWord* low) {
7832 assert(_span.contains(low), "Out of bounds addr");
7833 if (_restart_addr == NULL) {
7834 _restart_addr = low;
7835 } else {
7836 _restart_addr = MIN2(_restart_addr, low);
7837 }
7838 }
7840 // Upon stack overflow, we discard (part of) the stack,
7841 // remembering the least address amongst those discarded
7842 // in CMSCollector's _restart_address.
7843 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7844 // Remember the least grey address discarded
7845 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7846 _collector->lower_restart_addr(ra);
7847 _markStack->reset(); // discard stack contents
7848 _markStack->expand(); // expand the stack if possible
7849 }
7851 // Upon stack overflow, we discard (part of) the stack,
7852 // remembering the least address amongst those discarded
7853 // in CMSCollector's _restart_address.
7854 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7855 // We need to do this under a mutex to prevent other
7856 // workers from interfering with the work done below.
7857 MutexLockerEx ml(_overflow_stack->par_lock(),
7858 Mutex::_no_safepoint_check_flag);
7859 // Remember the least grey address discarded
7860 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7861 _collector->lower_restart_addr(ra);
7862 _overflow_stack->reset(); // discard stack contents
7863 _overflow_stack->expand(); // expand the stack if possible
7864 }
7866 void CMKlassClosure::do_klass(Klass* k) {
7867 assert(_oop_closure != NULL, "Not initialized?");
7868 k->oops_do(_oop_closure);
7869 }
7871 void PushOrMarkClosure::do_oop(oop obj) {
7872 // Ignore mark word because we are running concurrent with mutators.
7873 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7874 HeapWord* addr = (HeapWord*)obj;
7875 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7876 // Oop lies in _span and isn't yet grey or black
7877 _bitMap->mark(addr); // now grey
7878 if (addr < _finger) {
7879 // the bit map iteration has already either passed, or
7880 // sampled, this bit in the bit map; we'll need to
7881 // use the marking stack to scan this oop's oops.
7882 bool simulate_overflow = false;
7883 NOT_PRODUCT(
7884 if (CMSMarkStackOverflowALot &&
7885 _collector->simulate_overflow()) {
7886 // simulate a stack overflow
7887 simulate_overflow = true;
7888 }
7889 )
7890 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7891 if (PrintCMSStatistics != 0) {
7892 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7893 SIZE_FORMAT, _markStack->capacity());
7894 }
7895 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7896 handle_stack_overflow(addr);
7897 }
7898 }
7899 // anything including and to the right of _finger
7900 // will be scanned as we iterate over the remainder of the
7901 // bit map
7902 do_yield_check();
7903 }
7904 }
7906 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7907 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7909 void Par_PushOrMarkClosure::do_oop(oop obj) {
7910 // Ignore mark word because we are running concurrent with mutators.
7911 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7912 HeapWord* addr = (HeapWord*)obj;
7913 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7914 // Oop lies in _span and isn't yet grey or black
7915 // We read the global_finger (volatile read) strictly after marking oop
7916 bool res = _bit_map->par_mark(addr); // now grey
7917 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7918 // Should we push this marked oop on our stack?
7919 // -- if someone else marked it, nothing to do
7920 // -- if target oop is above global finger nothing to do
7921 // -- if target oop is in chunk and above local finger
7922 // then nothing to do
7923 // -- else push on work queue
7924 if ( !res // someone else marked it, they will deal with it
7925 || (addr >= *gfa) // will be scanned in a later task
7926 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7927 return;
7928 }
7929 // the bit map iteration has already either passed, or
7930 // sampled, this bit in the bit map; we'll need to
7931 // use the marking stack to scan this oop's oops.
7932 bool simulate_overflow = false;
7933 NOT_PRODUCT(
7934 if (CMSMarkStackOverflowALot &&
7935 _collector->simulate_overflow()) {
7936 // simulate a stack overflow
7937 simulate_overflow = true;
7938 }
7939 )
7940 if (simulate_overflow ||
7941 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7942 // stack overflow
7943 if (PrintCMSStatistics != 0) {
7944 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7945 SIZE_FORMAT, _overflow_stack->capacity());
7946 }
7947 // We cannot assert that the overflow stack is full because
7948 // it may have been emptied since.
7949 assert(simulate_overflow ||
7950 _work_queue->size() == _work_queue->max_elems(),
7951 "Else push should have succeeded");
7952 handle_stack_overflow(addr);
7953 }
7954 do_yield_check();
7955 }
7956 }
7958 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7959 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7961 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7962 MemRegion span,
7963 ReferenceProcessor* rp,
7964 CMSBitMap* bit_map,
7965 CMSBitMap* mod_union_table,
7966 CMSMarkStack* mark_stack,
7967 bool concurrent_precleaning):
7968 CMSOopClosure(rp),
7969 _collector(collector),
7970 _span(span),
7971 _bit_map(bit_map),
7972 _mod_union_table(mod_union_table),
7973 _mark_stack(mark_stack),
7974 _concurrent_precleaning(concurrent_precleaning)
7975 {
7976 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7977 }
7979 // Grey object rescan during pre-cleaning and second checkpoint phases --
7980 // the non-parallel version (the parallel version appears further below.)
7981 void PushAndMarkClosure::do_oop(oop obj) {
7982 // Ignore mark word verification. If during concurrent precleaning,
7983 // the object monitor may be locked. If during the checkpoint
7984 // phases, the object may already have been reached by a different
7985 // path and may be at the end of the global overflow list (so
7986 // the mark word may be NULL).
7987 assert(obj->is_oop_or_null(true /* ignore mark word */),
7988 "expected an oop or NULL");
7989 HeapWord* addr = (HeapWord*)obj;
7990 // Check if oop points into the CMS generation
7991 // and is not marked
7992 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7993 // a white object ...
7994 _bit_map->mark(addr); // ... now grey
7995 // push on the marking stack (grey set)
7996 bool simulate_overflow = false;
7997 NOT_PRODUCT(
7998 if (CMSMarkStackOverflowALot &&
7999 _collector->simulate_overflow()) {
8000 // simulate a stack overflow
8001 simulate_overflow = true;
8002 }
8003 )
8004 if (simulate_overflow || !_mark_stack->push(obj)) {
8005 if (_concurrent_precleaning) {
8006 // During precleaning we can just dirty the appropriate card(s)
8007 // in the mod union table, thus ensuring that the object remains
8008 // in the grey set and continue. In the case of object arrays
8009 // we need to dirty all of the cards that the object spans,
8010 // since the rescan of object arrays will be limited to the
8011 // dirty cards.
8012 // Note that no one can be intefering with us in this action
8013 // of dirtying the mod union table, so no locking or atomics
8014 // are required.
8015 if (obj->is_objArray()) {
8016 size_t sz = obj->size();
8017 HeapWord* end_card_addr = (HeapWord*)round_to(
8018 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8019 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8020 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8021 _mod_union_table->mark_range(redirty_range);
8022 } else {
8023 _mod_union_table->mark(addr);
8024 }
8025 _collector->_ser_pmc_preclean_ovflw++;
8026 } else {
8027 // During the remark phase, we need to remember this oop
8028 // in the overflow list.
8029 _collector->push_on_overflow_list(obj);
8030 _collector->_ser_pmc_remark_ovflw++;
8031 }
8032 }
8033 }
8034 }
8036 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8037 MemRegion span,
8038 ReferenceProcessor* rp,
8039 CMSBitMap* bit_map,
8040 OopTaskQueue* work_queue):
8041 CMSOopClosure(rp),
8042 _collector(collector),
8043 _span(span),
8044 _bit_map(bit_map),
8045 _work_queue(work_queue)
8046 {
8047 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8048 }
8050 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8051 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8053 // Grey object rescan during second checkpoint phase --
8054 // the parallel version.
8055 void Par_PushAndMarkClosure::do_oop(oop obj) {
8056 // In the assert below, we ignore the mark word because
8057 // this oop may point to an already visited object that is
8058 // on the overflow stack (in which case the mark word has
8059 // been hijacked for chaining into the overflow stack --
8060 // if this is the last object in the overflow stack then
8061 // its mark word will be NULL). Because this object may
8062 // have been subsequently popped off the global overflow
8063 // stack, and the mark word possibly restored to the prototypical
8064 // value, by the time we get to examined this failing assert in
8065 // the debugger, is_oop_or_null(false) may subsequently start
8066 // to hold.
8067 assert(obj->is_oop_or_null(true),
8068 "expected an oop or NULL");
8069 HeapWord* addr = (HeapWord*)obj;
8070 // Check if oop points into the CMS generation
8071 // and is not marked
8072 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8073 // a white object ...
8074 // If we manage to "claim" the object, by being the
8075 // first thread to mark it, then we push it on our
8076 // marking stack
8077 if (_bit_map->par_mark(addr)) { // ... now grey
8078 // push on work queue (grey set)
8079 bool simulate_overflow = false;
8080 NOT_PRODUCT(
8081 if (CMSMarkStackOverflowALot &&
8082 _collector->par_simulate_overflow()) {
8083 // simulate a stack overflow
8084 simulate_overflow = true;
8085 }
8086 )
8087 if (simulate_overflow || !_work_queue->push(obj)) {
8088 _collector->par_push_on_overflow_list(obj);
8089 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
8090 }
8091 } // Else, some other thread got there first
8092 }
8093 }
8095 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8096 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8098 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8099 Mutex* bml = _collector->bitMapLock();
8100 assert_lock_strong(bml);
8101 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8102 "CMS thread should hold CMS token");
8104 bml->unlock();
8105 ConcurrentMarkSweepThread::desynchronize(true);
8107 ConcurrentMarkSweepThread::acknowledge_yield_request();
8109 _collector->stopTimer();
8110 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8111 if (PrintCMSStatistics != 0) {
8112 _collector->incrementYields();
8113 }
8114 _collector->icms_wait();
8116 // See the comment in coordinator_yield()
8117 for (unsigned i = 0; i < CMSYieldSleepCount &&
8118 ConcurrentMarkSweepThread::should_yield() &&
8119 !CMSCollector::foregroundGCIsActive(); ++i) {
8120 os::sleep(Thread::current(), 1, false);
8121 ConcurrentMarkSweepThread::acknowledge_yield_request();
8122 }
8124 ConcurrentMarkSweepThread::synchronize(true);
8125 bml->lock();
8127 _collector->startTimer();
8128 }
8130 bool CMSPrecleanRefsYieldClosure::should_return() {
8131 if (ConcurrentMarkSweepThread::should_yield()) {
8132 do_yield_work();
8133 }
8134 return _collector->foregroundGCIsActive();
8135 }
8137 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8138 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8139 "mr should be aligned to start at a card boundary");
8140 // We'd like to assert:
8141 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8142 // "mr should be a range of cards");
8143 // However, that would be too strong in one case -- the last
8144 // partition ends at _unallocated_block which, in general, can be
8145 // an arbitrary boundary, not necessarily card aligned.
8146 if (PrintCMSStatistics != 0) {
8147 _num_dirty_cards +=
8148 mr.word_size()/CardTableModRefBS::card_size_in_words;
8149 }
8150 _space->object_iterate_mem(mr, &_scan_cl);
8151 }
8153 SweepClosure::SweepClosure(CMSCollector* collector,
8154 ConcurrentMarkSweepGeneration* g,
8155 CMSBitMap* bitMap, bool should_yield) :
8156 _collector(collector),
8157 _g(g),
8158 _sp(g->cmsSpace()),
8159 _limit(_sp->sweep_limit()),
8160 _freelistLock(_sp->freelistLock()),
8161 _bitMap(bitMap),
8162 _yield(should_yield),
8163 _inFreeRange(false), // No free range at beginning of sweep
8164 _freeRangeInFreeLists(false), // No free range at beginning of sweep
8165 _lastFreeRangeCoalesced(false),
8166 _freeFinger(g->used_region().start())
8167 {
8168 NOT_PRODUCT(
8169 _numObjectsFreed = 0;
8170 _numWordsFreed = 0;
8171 _numObjectsLive = 0;
8172 _numWordsLive = 0;
8173 _numObjectsAlreadyFree = 0;
8174 _numWordsAlreadyFree = 0;
8175 _last_fc = NULL;
8177 _sp->initializeIndexedFreeListArrayReturnedBytes();
8178 _sp->dictionary()->initialize_dict_returned_bytes();
8179 )
8180 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8181 "sweep _limit out of bounds");
8182 if (CMSTraceSweeper) {
8183 gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8184 _limit);
8185 }
8186 }
8188 void SweepClosure::print_on(outputStream* st) const {
8189 tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8190 _sp->bottom(), _sp->end());
8191 tty->print_cr("_limit = " PTR_FORMAT, _limit);
8192 tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8193 NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8194 tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8195 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8196 }
8198 #ifndef PRODUCT
8199 // Assertion checking only: no useful work in product mode --
8200 // however, if any of the flags below become product flags,
8201 // you may need to review this code to see if it needs to be
8202 // enabled in product mode.
8203 SweepClosure::~SweepClosure() {
8204 assert_lock_strong(_freelistLock);
8205 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8206 "sweep _limit out of bounds");
8207 if (inFreeRange()) {
8208 warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8209 print();
8210 ShouldNotReachHere();
8211 }
8212 if (Verbose && PrintGC) {
8213 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8214 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8215 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
8216 SIZE_FORMAT" bytes "
8217 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8218 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8219 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8220 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8221 * sizeof(HeapWord);
8222 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8224 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8225 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8226 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8227 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8228 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8229 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
8230 indexListReturnedBytes);
8231 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
8232 dict_returned_bytes);
8233 }
8234 }
8235 if (CMSTraceSweeper) {
8236 gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8237 _limit);
8238 }
8239 }
8240 #endif // PRODUCT
8242 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8243 bool freeRangeInFreeLists) {
8244 if (CMSTraceSweeper) {
8245 gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8246 freeFinger, freeRangeInFreeLists);
8247 }
8248 assert(!inFreeRange(), "Trampling existing free range");
8249 set_inFreeRange(true);
8250 set_lastFreeRangeCoalesced(false);
8252 set_freeFinger(freeFinger);
8253 set_freeRangeInFreeLists(freeRangeInFreeLists);
8254 if (CMSTestInFreeList) {
8255 if (freeRangeInFreeLists) {
8256 FreeChunk* fc = (FreeChunk*) freeFinger;
8257 assert(fc->is_free(), "A chunk on the free list should be free.");
8258 assert(fc->size() > 0, "Free range should have a size");
8259 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8260 }
8261 }
8262 }
8264 // Note that the sweeper runs concurrently with mutators. Thus,
8265 // it is possible for direct allocation in this generation to happen
8266 // in the middle of the sweep. Note that the sweeper also coalesces
8267 // contiguous free blocks. Thus, unless the sweeper and the allocator
8268 // synchronize appropriately freshly allocated blocks may get swept up.
8269 // This is accomplished by the sweeper locking the free lists while
8270 // it is sweeping. Thus blocks that are determined to be free are
8271 // indeed free. There is however one additional complication:
8272 // blocks that have been allocated since the final checkpoint and
8273 // mark, will not have been marked and so would be treated as
8274 // unreachable and swept up. To prevent this, the allocator marks
8275 // the bit map when allocating during the sweep phase. This leads,
8276 // however, to a further complication -- objects may have been allocated
8277 // but not yet initialized -- in the sense that the header isn't yet
8278 // installed. The sweeper can not then determine the size of the block
8279 // in order to skip over it. To deal with this case, we use a technique
8280 // (due to Printezis) to encode such uninitialized block sizes in the
8281 // bit map. Since the bit map uses a bit per every HeapWord, but the
8282 // CMS generation has a minimum object size of 3 HeapWords, it follows
8283 // that "normal marks" won't be adjacent in the bit map (there will
8284 // always be at least two 0 bits between successive 1 bits). We make use
8285 // of these "unused" bits to represent uninitialized blocks -- the bit
8286 // corresponding to the start of the uninitialized object and the next
8287 // bit are both set. Finally, a 1 bit marks the end of the object that
8288 // started with the two consecutive 1 bits to indicate its potentially
8289 // uninitialized state.
8291 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8292 FreeChunk* fc = (FreeChunk*)addr;
8293 size_t res;
8295 // Check if we are done sweeping. Below we check "addr >= _limit" rather
8296 // than "addr == _limit" because although _limit was a block boundary when
8297 // we started the sweep, it may no longer be one because heap expansion
8298 // may have caused us to coalesce the block ending at the address _limit
8299 // with a newly expanded chunk (this happens when _limit was set to the
8300 // previous _end of the space), so we may have stepped past _limit:
8301 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8302 if (addr >= _limit) { // we have swept up to or past the limit: finish up
8303 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8304 "sweep _limit out of bounds");
8305 assert(addr < _sp->end(), "addr out of bounds");
8306 // Flush any free range we might be holding as a single
8307 // coalesced chunk to the appropriate free list.
8308 if (inFreeRange()) {
8309 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8310 err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8311 flush_cur_free_chunk(freeFinger(),
8312 pointer_delta(addr, freeFinger()));
8313 if (CMSTraceSweeper) {
8314 gclog_or_tty->print("Sweep: last chunk: ");
8315 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8316 "[coalesced:"SIZE_FORMAT"]\n",
8317 freeFinger(), pointer_delta(addr, freeFinger()),
8318 lastFreeRangeCoalesced());
8319 }
8320 }
8322 // help the iterator loop finish
8323 return pointer_delta(_sp->end(), addr);
8324 }
8326 assert(addr < _limit, "sweep invariant");
8327 // check if we should yield
8328 do_yield_check(addr);
8329 if (fc->is_free()) {
8330 // Chunk that is already free
8331 res = fc->size();
8332 do_already_free_chunk(fc);
8333 debug_only(_sp->verifyFreeLists());
8334 // If we flush the chunk at hand in lookahead_and_flush()
8335 // and it's coalesced with a preceding chunk, then the
8336 // process of "mangling" the payload of the coalesced block
8337 // will cause erasure of the size information from the
8338 // (erstwhile) header of all the coalesced blocks but the
8339 // first, so the first disjunct in the assert will not hold
8340 // in that specific case (in which case the second disjunct
8341 // will hold).
8342 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8343 "Otherwise the size info doesn't change at this step");
8344 NOT_PRODUCT(
8345 _numObjectsAlreadyFree++;
8346 _numWordsAlreadyFree += res;
8347 )
8348 NOT_PRODUCT(_last_fc = fc;)
8349 } else if (!_bitMap->isMarked(addr)) {
8350 // Chunk is fresh garbage
8351 res = do_garbage_chunk(fc);
8352 debug_only(_sp->verifyFreeLists());
8353 NOT_PRODUCT(
8354 _numObjectsFreed++;
8355 _numWordsFreed += res;
8356 )
8357 } else {
8358 // Chunk that is alive.
8359 res = do_live_chunk(fc);
8360 debug_only(_sp->verifyFreeLists());
8361 NOT_PRODUCT(
8362 _numObjectsLive++;
8363 _numWordsLive += res;
8364 )
8365 }
8366 return res;
8367 }
8369 // For the smart allocation, record following
8370 // split deaths - a free chunk is removed from its free list because
8371 // it is being split into two or more chunks.
8372 // split birth - a free chunk is being added to its free list because
8373 // a larger free chunk has been split and resulted in this free chunk.
8374 // coal death - a free chunk is being removed from its free list because
8375 // it is being coalesced into a large free chunk.
8376 // coal birth - a free chunk is being added to its free list because
8377 // it was created when two or more free chunks where coalesced into
8378 // this free chunk.
8379 //
8380 // These statistics are used to determine the desired number of free
8381 // chunks of a given size. The desired number is chosen to be relative
8382 // to the end of a CMS sweep. The desired number at the end of a sweep
8383 // is the
8384 // count-at-end-of-previous-sweep (an amount that was enough)
8385 // - count-at-beginning-of-current-sweep (the excess)
8386 // + split-births (gains in this size during interval)
8387 // - split-deaths (demands on this size during interval)
8388 // where the interval is from the end of one sweep to the end of the
8389 // next.
8390 //
8391 // When sweeping the sweeper maintains an accumulated chunk which is
8392 // the chunk that is made up of chunks that have been coalesced. That
8393 // will be termed the left-hand chunk. A new chunk of garbage that
8394 // is being considered for coalescing will be referred to as the
8395 // right-hand chunk.
8396 //
8397 // When making a decision on whether to coalesce a right-hand chunk with
8398 // the current left-hand chunk, the current count vs. the desired count
8399 // of the left-hand chunk is considered. Also if the right-hand chunk
8400 // is near the large chunk at the end of the heap (see
8401 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8402 // left-hand chunk is coalesced.
8403 //
8404 // When making a decision about whether to split a chunk, the desired count
8405 // vs. the current count of the candidate to be split is also considered.
8406 // If the candidate is underpopulated (currently fewer chunks than desired)
8407 // a chunk of an overpopulated (currently more chunks than desired) size may
8408 // be chosen. The "hint" associated with a free list, if non-null, points
8409 // to a free list which may be overpopulated.
8410 //
8412 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8413 const size_t size = fc->size();
8414 // Chunks that cannot be coalesced are not in the
8415 // free lists.
8416 if (CMSTestInFreeList && !fc->cantCoalesce()) {
8417 assert(_sp->verify_chunk_in_free_list(fc),
8418 "free chunk should be in free lists");
8419 }
8420 // a chunk that is already free, should not have been
8421 // marked in the bit map
8422 HeapWord* const addr = (HeapWord*) fc;
8423 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8424 // Verify that the bit map has no bits marked between
8425 // addr and purported end of this block.
8426 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8428 // Some chunks cannot be coalesced under any circumstances.
8429 // See the definition of cantCoalesce().
8430 if (!fc->cantCoalesce()) {
8431 // This chunk can potentially be coalesced.
8432 if (_sp->adaptive_freelists()) {
8433 // All the work is done in
8434 do_post_free_or_garbage_chunk(fc, size);
8435 } else { // Not adaptive free lists
8436 // this is a free chunk that can potentially be coalesced by the sweeper;
8437 if (!inFreeRange()) {
8438 // if the next chunk is a free block that can't be coalesced
8439 // it doesn't make sense to remove this chunk from the free lists
8440 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8441 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8442 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
8443 nextChunk->is_free() && // ... which is free...
8444 nextChunk->cantCoalesce()) { // ... but can't be coalesced
8445 // nothing to do
8446 } else {
8447 // Potentially the start of a new free range:
8448 // Don't eagerly remove it from the free lists.
8449 // No need to remove it if it will just be put
8450 // back again. (Also from a pragmatic point of view
8451 // if it is a free block in a region that is beyond
8452 // any allocated blocks, an assertion will fail)
8453 // Remember the start of a free run.
8454 initialize_free_range(addr, true);
8455 // end - can coalesce with next chunk
8456 }
8457 } else {
8458 // the midst of a free range, we are coalescing
8459 print_free_block_coalesced(fc);
8460 if (CMSTraceSweeper) {
8461 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8462 }
8463 // remove it from the free lists
8464 _sp->removeFreeChunkFromFreeLists(fc);
8465 set_lastFreeRangeCoalesced(true);
8466 // If the chunk is being coalesced and the current free range is
8467 // in the free lists, remove the current free range so that it
8468 // will be returned to the free lists in its entirety - all
8469 // the coalesced pieces included.
8470 if (freeRangeInFreeLists()) {
8471 FreeChunk* ffc = (FreeChunk*) freeFinger();
8472 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8473 "Size of free range is inconsistent with chunk size.");
8474 if (CMSTestInFreeList) {
8475 assert(_sp->verify_chunk_in_free_list(ffc),
8476 "free range is not in free lists");
8477 }
8478 _sp->removeFreeChunkFromFreeLists(ffc);
8479 set_freeRangeInFreeLists(false);
8480 }
8481 }
8482 }
8483 // Note that if the chunk is not coalescable (the else arm
8484 // below), we unconditionally flush, without needing to do
8485 // a "lookahead," as we do below.
8486 if (inFreeRange()) lookahead_and_flush(fc, size);
8487 } else {
8488 // Code path common to both original and adaptive free lists.
8490 // cant coalesce with previous block; this should be treated
8491 // as the end of a free run if any
8492 if (inFreeRange()) {
8493 // we kicked some butt; time to pick up the garbage
8494 assert(freeFinger() < addr, "freeFinger points too high");
8495 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8496 }
8497 // else, nothing to do, just continue
8498 }
8499 }
8501 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8502 // This is a chunk of garbage. It is not in any free list.
8503 // Add it to a free list or let it possibly be coalesced into
8504 // a larger chunk.
8505 HeapWord* const addr = (HeapWord*) fc;
8506 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8508 if (_sp->adaptive_freelists()) {
8509 // Verify that the bit map has no bits marked between
8510 // addr and purported end of just dead object.
8511 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8513 do_post_free_or_garbage_chunk(fc, size);
8514 } else {
8515 if (!inFreeRange()) {
8516 // start of a new free range
8517 assert(size > 0, "A free range should have a size");
8518 initialize_free_range(addr, false);
8519 } else {
8520 // this will be swept up when we hit the end of the
8521 // free range
8522 if (CMSTraceSweeper) {
8523 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8524 }
8525 // If the chunk is being coalesced and the current free range is
8526 // in the free lists, remove the current free range so that it
8527 // will be returned to the free lists in its entirety - all
8528 // the coalesced pieces included.
8529 if (freeRangeInFreeLists()) {
8530 FreeChunk* ffc = (FreeChunk*)freeFinger();
8531 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8532 "Size of free range is inconsistent with chunk size.");
8533 if (CMSTestInFreeList) {
8534 assert(_sp->verify_chunk_in_free_list(ffc),
8535 "free range is not in free lists");
8536 }
8537 _sp->removeFreeChunkFromFreeLists(ffc);
8538 set_freeRangeInFreeLists(false);
8539 }
8540 set_lastFreeRangeCoalesced(true);
8541 }
8542 // this will be swept up when we hit the end of the free range
8544 // Verify that the bit map has no bits marked between
8545 // addr and purported end of just dead object.
8546 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8547 }
8548 assert(_limit >= addr + size,
8549 "A freshly garbage chunk can't possibly straddle over _limit");
8550 if (inFreeRange()) lookahead_and_flush(fc, size);
8551 return size;
8552 }
8554 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8555 HeapWord* addr = (HeapWord*) fc;
8556 // The sweeper has just found a live object. Return any accumulated
8557 // left hand chunk to the free lists.
8558 if (inFreeRange()) {
8559 assert(freeFinger() < addr, "freeFinger points too high");
8560 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8561 }
8563 // This object is live: we'd normally expect this to be
8564 // an oop, and like to assert the following:
8565 // assert(oop(addr)->is_oop(), "live block should be an oop");
8566 // However, as we commented above, this may be an object whose
8567 // header hasn't yet been initialized.
8568 size_t size;
8569 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8570 if (_bitMap->isMarked(addr + 1)) {
8571 // Determine the size from the bit map, rather than trying to
8572 // compute it from the object header.
8573 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8574 size = pointer_delta(nextOneAddr + 1, addr);
8575 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8576 "alignment problem");
8578 #ifdef ASSERT
8579 if (oop(addr)->klass_or_null() != NULL) {
8580 // Ignore mark word because we are running concurrent with mutators
8581 assert(oop(addr)->is_oop(true), "live block should be an oop");
8582 assert(size ==
8583 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8584 "P-mark and computed size do not agree");
8585 }
8586 #endif
8588 } else {
8589 // This should be an initialized object that's alive.
8590 assert(oop(addr)->klass_or_null() != NULL,
8591 "Should be an initialized object");
8592 // Ignore mark word because we are running concurrent with mutators
8593 assert(oop(addr)->is_oop(true), "live block should be an oop");
8594 // Verify that the bit map has no bits marked between
8595 // addr and purported end of this block.
8596 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8597 assert(size >= 3, "Necessary for Printezis marks to work");
8598 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8599 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8600 }
8601 return size;
8602 }
8604 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8605 size_t chunkSize) {
8606 // do_post_free_or_garbage_chunk() should only be called in the case
8607 // of the adaptive free list allocator.
8608 const bool fcInFreeLists = fc->is_free();
8609 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8610 assert((HeapWord*)fc <= _limit, "sweep invariant");
8611 if (CMSTestInFreeList && fcInFreeLists) {
8612 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8613 }
8615 if (CMSTraceSweeper) {
8616 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8617 }
8619 HeapWord* const fc_addr = (HeapWord*) fc;
8621 bool coalesce;
8622 const size_t left = pointer_delta(fc_addr, freeFinger());
8623 const size_t right = chunkSize;
8624 switch (FLSCoalescePolicy) {
8625 // numeric value forms a coalition aggressiveness metric
8626 case 0: { // never coalesce
8627 coalesce = false;
8628 break;
8629 }
8630 case 1: { // coalesce if left & right chunks on overpopulated lists
8631 coalesce = _sp->coalOverPopulated(left) &&
8632 _sp->coalOverPopulated(right);
8633 break;
8634 }
8635 case 2: { // coalesce if left chunk on overpopulated list (default)
8636 coalesce = _sp->coalOverPopulated(left);
8637 break;
8638 }
8639 case 3: { // coalesce if left OR right chunk on overpopulated list
8640 coalesce = _sp->coalOverPopulated(left) ||
8641 _sp->coalOverPopulated(right);
8642 break;
8643 }
8644 case 4: { // always coalesce
8645 coalesce = true;
8646 break;
8647 }
8648 default:
8649 ShouldNotReachHere();
8650 }
8652 // Should the current free range be coalesced?
8653 // If the chunk is in a free range and either we decided to coalesce above
8654 // or the chunk is near the large block at the end of the heap
8655 // (isNearLargestChunk() returns true), then coalesce this chunk.
8656 const bool doCoalesce = inFreeRange()
8657 && (coalesce || _g->isNearLargestChunk(fc_addr));
8658 if (doCoalesce) {
8659 // Coalesce the current free range on the left with the new
8660 // chunk on the right. If either is on a free list,
8661 // it must be removed from the list and stashed in the closure.
8662 if (freeRangeInFreeLists()) {
8663 FreeChunk* const ffc = (FreeChunk*)freeFinger();
8664 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8665 "Size of free range is inconsistent with chunk size.");
8666 if (CMSTestInFreeList) {
8667 assert(_sp->verify_chunk_in_free_list(ffc),
8668 "Chunk is not in free lists");
8669 }
8670 _sp->coalDeath(ffc->size());
8671 _sp->removeFreeChunkFromFreeLists(ffc);
8672 set_freeRangeInFreeLists(false);
8673 }
8674 if (fcInFreeLists) {
8675 _sp->coalDeath(chunkSize);
8676 assert(fc->size() == chunkSize,
8677 "The chunk has the wrong size or is not in the free lists");
8678 _sp->removeFreeChunkFromFreeLists(fc);
8679 }
8680 set_lastFreeRangeCoalesced(true);
8681 print_free_block_coalesced(fc);
8682 } else { // not in a free range and/or should not coalesce
8683 // Return the current free range and start a new one.
8684 if (inFreeRange()) {
8685 // In a free range but cannot coalesce with the right hand chunk.
8686 // Put the current free range into the free lists.
8687 flush_cur_free_chunk(freeFinger(),
8688 pointer_delta(fc_addr, freeFinger()));
8689 }
8690 // Set up for new free range. Pass along whether the right hand
8691 // chunk is in the free lists.
8692 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8693 }
8694 }
8696 // Lookahead flush:
8697 // If we are tracking a free range, and this is the last chunk that
8698 // we'll look at because its end crosses past _limit, we'll preemptively
8699 // flush it along with any free range we may be holding on to. Note that
8700 // this can be the case only for an already free or freshly garbage
8701 // chunk. If this block is an object, it can never straddle
8702 // over _limit. The "straddling" occurs when _limit is set at
8703 // the previous end of the space when this cycle started, and
8704 // a subsequent heap expansion caused the previously co-terminal
8705 // free block to be coalesced with the newly expanded portion,
8706 // thus rendering _limit a non-block-boundary making it dangerous
8707 // for the sweeper to step over and examine.
8708 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8709 assert(inFreeRange(), "Should only be called if currently in a free range.");
8710 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8711 assert(_sp->used_region().contains(eob - 1),
8712 err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
8713 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8714 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8715 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8716 if (eob >= _limit) {
8717 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8718 if (CMSTraceSweeper) {
8719 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8720 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8721 "[" PTR_FORMAT "," PTR_FORMAT ")",
8722 _limit, fc, eob, _sp->bottom(), _sp->end());
8723 }
8724 // Return the storage we are tracking back into the free lists.
8725 if (CMSTraceSweeper) {
8726 gclog_or_tty->print_cr("Flushing ... ");
8727 }
8728 assert(freeFinger() < eob, "Error");
8729 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8730 }
8731 }
8733 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8734 assert(inFreeRange(), "Should only be called if currently in a free range.");
8735 assert(size > 0,
8736 "A zero sized chunk cannot be added to the free lists.");
8737 if (!freeRangeInFreeLists()) {
8738 if (CMSTestInFreeList) {
8739 FreeChunk* fc = (FreeChunk*) chunk;
8740 fc->set_size(size);
8741 assert(!_sp->verify_chunk_in_free_list(fc),
8742 "chunk should not be in free lists yet");
8743 }
8744 if (CMSTraceSweeper) {
8745 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8746 chunk, size);
8747 }
8748 // A new free range is going to be starting. The current
8749 // free range has not been added to the free lists yet or
8750 // was removed so add it back.
8751 // If the current free range was coalesced, then the death
8752 // of the free range was recorded. Record a birth now.
8753 if (lastFreeRangeCoalesced()) {
8754 _sp->coalBirth(size);
8755 }
8756 _sp->addChunkAndRepairOffsetTable(chunk, size,
8757 lastFreeRangeCoalesced());
8758 } else if (CMSTraceSweeper) {
8759 gclog_or_tty->print_cr("Already in free list: nothing to flush");
8760 }
8761 set_inFreeRange(false);
8762 set_freeRangeInFreeLists(false);
8763 }
8765 // We take a break if we've been at this for a while,
8766 // so as to avoid monopolizing the locks involved.
8767 void SweepClosure::do_yield_work(HeapWord* addr) {
8768 // Return current free chunk being used for coalescing (if any)
8769 // to the appropriate freelist. After yielding, the next
8770 // free block encountered will start a coalescing range of
8771 // free blocks. If the next free block is adjacent to the
8772 // chunk just flushed, they will need to wait for the next
8773 // sweep to be coalesced.
8774 if (inFreeRange()) {
8775 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8776 }
8778 // First give up the locks, then yield, then re-lock.
8779 // We should probably use a constructor/destructor idiom to
8780 // do this unlock/lock or modify the MutexUnlocker class to
8781 // serve our purpose. XXX
8782 assert_lock_strong(_bitMap->lock());
8783 assert_lock_strong(_freelistLock);
8784 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8785 "CMS thread should hold CMS token");
8786 _bitMap->lock()->unlock();
8787 _freelistLock->unlock();
8788 ConcurrentMarkSweepThread::desynchronize(true);
8789 ConcurrentMarkSweepThread::acknowledge_yield_request();
8790 _collector->stopTimer();
8791 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8792 if (PrintCMSStatistics != 0) {
8793 _collector->incrementYields();
8794 }
8795 _collector->icms_wait();
8797 // See the comment in coordinator_yield()
8798 for (unsigned i = 0; i < CMSYieldSleepCount &&
8799 ConcurrentMarkSweepThread::should_yield() &&
8800 !CMSCollector::foregroundGCIsActive(); ++i) {
8801 os::sleep(Thread::current(), 1, false);
8802 ConcurrentMarkSweepThread::acknowledge_yield_request();
8803 }
8805 ConcurrentMarkSweepThread::synchronize(true);
8806 _freelistLock->lock();
8807 _bitMap->lock()->lock_without_safepoint_check();
8808 _collector->startTimer();
8809 }
8811 #ifndef PRODUCT
8812 // This is actually very useful in a product build if it can
8813 // be called from the debugger. Compile it into the product
8814 // as needed.
8815 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8816 return debug_cms_space->verify_chunk_in_free_list(fc);
8817 }
8818 #endif
8820 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8821 if (CMSTraceSweeper) {
8822 gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8823 fc, fc->size());
8824 }
8825 }
8827 // CMSIsAliveClosure
8828 bool CMSIsAliveClosure::do_object_b(oop obj) {
8829 HeapWord* addr = (HeapWord*)obj;
8830 return addr != NULL &&
8831 (!_span.contains(addr) || _bit_map->isMarked(addr));
8832 }
8835 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8836 MemRegion span,
8837 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8838 bool cpc):
8839 _collector(collector),
8840 _span(span),
8841 _bit_map(bit_map),
8842 _mark_stack(mark_stack),
8843 _concurrent_precleaning(cpc) {
8844 assert(!_span.is_empty(), "Empty span could spell trouble");
8845 }
8848 // CMSKeepAliveClosure: the serial version
8849 void CMSKeepAliveClosure::do_oop(oop obj) {
8850 HeapWord* addr = (HeapWord*)obj;
8851 if (_span.contains(addr) &&
8852 !_bit_map->isMarked(addr)) {
8853 _bit_map->mark(addr);
8854 bool simulate_overflow = false;
8855 NOT_PRODUCT(
8856 if (CMSMarkStackOverflowALot &&
8857 _collector->simulate_overflow()) {
8858 // simulate a stack overflow
8859 simulate_overflow = true;
8860 }
8861 )
8862 if (simulate_overflow || !_mark_stack->push(obj)) {
8863 if (_concurrent_precleaning) {
8864 // We dirty the overflown object and let the remark
8865 // phase deal with it.
8866 assert(_collector->overflow_list_is_empty(), "Error");
8867 // In the case of object arrays, we need to dirty all of
8868 // the cards that the object spans. No locking or atomics
8869 // are needed since no one else can be mutating the mod union
8870 // table.
8871 if (obj->is_objArray()) {
8872 size_t sz = obj->size();
8873 HeapWord* end_card_addr =
8874 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8875 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8876 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8877 _collector->_modUnionTable.mark_range(redirty_range);
8878 } else {
8879 _collector->_modUnionTable.mark(addr);
8880 }
8881 _collector->_ser_kac_preclean_ovflw++;
8882 } else {
8883 _collector->push_on_overflow_list(obj);
8884 _collector->_ser_kac_ovflw++;
8885 }
8886 }
8887 }
8888 }
8890 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8891 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8893 // CMSParKeepAliveClosure: a parallel version of the above.
8894 // The work queues are private to each closure (thread),
8895 // but (may be) available for stealing by other threads.
8896 void CMSParKeepAliveClosure::do_oop(oop obj) {
8897 HeapWord* addr = (HeapWord*)obj;
8898 if (_span.contains(addr) &&
8899 !_bit_map->isMarked(addr)) {
8900 // In general, during recursive tracing, several threads
8901 // may be concurrently getting here; the first one to
8902 // "tag" it, claims it.
8903 if (_bit_map->par_mark(addr)) {
8904 bool res = _work_queue->push(obj);
8905 assert(res, "Low water mark should be much less than capacity");
8906 // Do a recursive trim in the hope that this will keep
8907 // stack usage lower, but leave some oops for potential stealers
8908 trim_queue(_low_water_mark);
8909 } // Else, another thread got there first
8910 }
8911 }
8913 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8914 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8916 void CMSParKeepAliveClosure::trim_queue(uint max) {
8917 while (_work_queue->size() > max) {
8918 oop new_oop;
8919 if (_work_queue->pop_local(new_oop)) {
8920 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8921 assert(_bit_map->isMarked((HeapWord*)new_oop),
8922 "no white objects on this stack!");
8923 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8924 // iterate over the oops in this oop, marking and pushing
8925 // the ones in CMS heap (i.e. in _span).
8926 new_oop->oop_iterate(&_mark_and_push);
8927 }
8928 }
8929 }
8931 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8932 CMSCollector* collector,
8933 MemRegion span, CMSBitMap* bit_map,
8934 OopTaskQueue* work_queue):
8935 _collector(collector),
8936 _span(span),
8937 _bit_map(bit_map),
8938 _work_queue(work_queue) { }
8940 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8941 HeapWord* addr = (HeapWord*)obj;
8942 if (_span.contains(addr) &&
8943 !_bit_map->isMarked(addr)) {
8944 if (_bit_map->par_mark(addr)) {
8945 bool simulate_overflow = false;
8946 NOT_PRODUCT(
8947 if (CMSMarkStackOverflowALot &&
8948 _collector->par_simulate_overflow()) {
8949 // simulate a stack overflow
8950 simulate_overflow = true;
8951 }
8952 )
8953 if (simulate_overflow || !_work_queue->push(obj)) {
8954 _collector->par_push_on_overflow_list(obj);
8955 _collector->_par_kac_ovflw++;
8956 }
8957 } // Else another thread got there already
8958 }
8959 }
8961 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8962 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8964 //////////////////////////////////////////////////////////////////
8965 // CMSExpansionCause /////////////////////////////
8966 //////////////////////////////////////////////////////////////////
8967 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8968 switch (cause) {
8969 case _no_expansion:
8970 return "No expansion";
8971 case _satisfy_free_ratio:
8972 return "Free ratio";
8973 case _satisfy_promotion:
8974 return "Satisfy promotion";
8975 case _satisfy_allocation:
8976 return "allocation";
8977 case _allocate_par_lab:
8978 return "Par LAB";
8979 case _allocate_par_spooling_space:
8980 return "Par Spooling Space";
8981 case _adaptive_size_policy:
8982 return "Ergonomics";
8983 default:
8984 return "unknown";
8985 }
8986 }
8988 void CMSDrainMarkingStackClosure::do_void() {
8989 // the max number to take from overflow list at a time
8990 const size_t num = _mark_stack->capacity()/4;
8991 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8992 "Overflow list should be NULL during concurrent phases");
8993 while (!_mark_stack->isEmpty() ||
8994 // if stack is empty, check the overflow list
8995 _collector->take_from_overflow_list(num, _mark_stack)) {
8996 oop obj = _mark_stack->pop();
8997 HeapWord* addr = (HeapWord*)obj;
8998 assert(_span.contains(addr), "Should be within span");
8999 assert(_bit_map->isMarked(addr), "Should be marked");
9000 assert(obj->is_oop(), "Should be an oop");
9001 obj->oop_iterate(_keep_alive);
9002 }
9003 }
9005 void CMSParDrainMarkingStackClosure::do_void() {
9006 // drain queue
9007 trim_queue(0);
9008 }
9010 // Trim our work_queue so its length is below max at return
9011 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
9012 while (_work_queue->size() > max) {
9013 oop new_oop;
9014 if (_work_queue->pop_local(new_oop)) {
9015 assert(new_oop->is_oop(), "Expected an oop");
9016 assert(_bit_map->isMarked((HeapWord*)new_oop),
9017 "no white objects on this stack!");
9018 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9019 // iterate over the oops in this oop, marking and pushing
9020 // the ones in CMS heap (i.e. in _span).
9021 new_oop->oop_iterate(&_mark_and_push);
9022 }
9023 }
9024 }
9026 ////////////////////////////////////////////////////////////////////
9027 // Support for Marking Stack Overflow list handling and related code
9028 ////////////////////////////////////////////////////////////////////
9029 // Much of the following code is similar in shape and spirit to the
9030 // code used in ParNewGC. We should try and share that code
9031 // as much as possible in the future.
9033 #ifndef PRODUCT
9034 // Debugging support for CMSStackOverflowALot
9036 // It's OK to call this multi-threaded; the worst thing
9037 // that can happen is that we'll get a bunch of closely
9038 // spaced simulated oveflows, but that's OK, in fact
9039 // probably good as it would exercise the overflow code
9040 // under contention.
9041 bool CMSCollector::simulate_overflow() {
9042 if (_overflow_counter-- <= 0) { // just being defensive
9043 _overflow_counter = CMSMarkStackOverflowInterval;
9044 return true;
9045 } else {
9046 return false;
9047 }
9048 }
9050 bool CMSCollector::par_simulate_overflow() {
9051 return simulate_overflow();
9052 }
9053 #endif
9055 // Single-threaded
9056 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9057 assert(stack->isEmpty(), "Expected precondition");
9058 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9059 size_t i = num;
9060 oop cur = _overflow_list;
9061 const markOop proto = markOopDesc::prototype();
9062 NOT_PRODUCT(ssize_t n = 0;)
9063 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9064 next = oop(cur->mark());
9065 cur->set_mark(proto); // until proven otherwise
9066 assert(cur->is_oop(), "Should be an oop");
9067 bool res = stack->push(cur);
9068 assert(res, "Bit off more than can chew?");
9069 NOT_PRODUCT(n++;)
9070 }
9071 _overflow_list = cur;
9072 #ifndef PRODUCT
9073 assert(_num_par_pushes >= n, "Too many pops?");
9074 _num_par_pushes -=n;
9075 #endif
9076 return !stack->isEmpty();
9077 }
9079 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
9080 // (MT-safe) Get a prefix of at most "num" from the list.
9081 // The overflow list is chained through the mark word of
9082 // each object in the list. We fetch the entire list,
9083 // break off a prefix of the right size and return the
9084 // remainder. If other threads try to take objects from
9085 // the overflow list at that time, they will wait for
9086 // some time to see if data becomes available. If (and
9087 // only if) another thread places one or more object(s)
9088 // on the global list before we have returned the suffix
9089 // to the global list, we will walk down our local list
9090 // to find its end and append the global list to
9091 // our suffix before returning it. This suffix walk can
9092 // prove to be expensive (quadratic in the amount of traffic)
9093 // when there are many objects in the overflow list and
9094 // there is much producer-consumer contention on the list.
9095 // *NOTE*: The overflow list manipulation code here and
9096 // in ParNewGeneration:: are very similar in shape,
9097 // except that in the ParNew case we use the old (from/eden)
9098 // copy of the object to thread the list via its klass word.
9099 // Because of the common code, if you make any changes in
9100 // the code below, please check the ParNew version to see if
9101 // similar changes might be needed.
9102 // CR 6797058 has been filed to consolidate the common code.
9103 bool CMSCollector::par_take_from_overflow_list(size_t num,
9104 OopTaskQueue* work_q,
9105 int no_of_gc_threads) {
9106 assert(work_q->size() == 0, "First empty local work queue");
9107 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9108 if (_overflow_list == NULL) {
9109 return false;
9110 }
9111 // Grab the entire list; we'll put back a suffix
9112 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9113 Thread* tid = Thread::current();
9114 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9115 // set to ParallelGCThreads.
9116 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9117 size_t sleep_time_millis = MAX2((size_t)1, num/100);
9118 // If the list is busy, we spin for a short while,
9119 // sleeping between attempts to get the list.
9120 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9121 os::sleep(tid, sleep_time_millis, false);
9122 if (_overflow_list == NULL) {
9123 // Nothing left to take
9124 return false;
9125 } else if (_overflow_list != BUSY) {
9126 // Try and grab the prefix
9127 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9128 }
9129 }
9130 // If the list was found to be empty, or we spun long
9131 // enough, we give up and return empty-handed. If we leave
9132 // the list in the BUSY state below, it must be the case that
9133 // some other thread holds the overflow list and will set it
9134 // to a non-BUSY state in the future.
9135 if (prefix == NULL || prefix == BUSY) {
9136 // Nothing to take or waited long enough
9137 if (prefix == NULL) {
9138 // Write back the NULL in case we overwrote it with BUSY above
9139 // and it is still the same value.
9140 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9141 }
9142 return false;
9143 }
9144 assert(prefix != NULL && prefix != BUSY, "Error");
9145 size_t i = num;
9146 oop cur = prefix;
9147 // Walk down the first "num" objects, unless we reach the end.
9148 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9149 if (cur->mark() == NULL) {
9150 // We have "num" or fewer elements in the list, so there
9151 // is nothing to return to the global list.
9152 // Write back the NULL in lieu of the BUSY we wrote
9153 // above, if it is still the same value.
9154 if (_overflow_list == BUSY) {
9155 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9156 }
9157 } else {
9158 // Chop off the suffix and rerturn it to the global list.
9159 assert(cur->mark() != BUSY, "Error");
9160 oop suffix_head = cur->mark(); // suffix will be put back on global list
9161 cur->set_mark(NULL); // break off suffix
9162 // It's possible that the list is still in the empty(busy) state
9163 // we left it in a short while ago; in that case we may be
9164 // able to place back the suffix without incurring the cost
9165 // of a walk down the list.
9166 oop observed_overflow_list = _overflow_list;
9167 oop cur_overflow_list = observed_overflow_list;
9168 bool attached = false;
9169 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9170 observed_overflow_list =
9171 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9172 if (cur_overflow_list == observed_overflow_list) {
9173 attached = true;
9174 break;
9175 } else cur_overflow_list = observed_overflow_list;
9176 }
9177 if (!attached) {
9178 // Too bad, someone else sneaked in (at least) an element; we'll need
9179 // to do a splice. Find tail of suffix so we can prepend suffix to global
9180 // list.
9181 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9182 oop suffix_tail = cur;
9183 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9184 "Tautology");
9185 observed_overflow_list = _overflow_list;
9186 do {
9187 cur_overflow_list = observed_overflow_list;
9188 if (cur_overflow_list != BUSY) {
9189 // Do the splice ...
9190 suffix_tail->set_mark(markOop(cur_overflow_list));
9191 } else { // cur_overflow_list == BUSY
9192 suffix_tail->set_mark(NULL);
9193 }
9194 // ... and try to place spliced list back on overflow_list ...
9195 observed_overflow_list =
9196 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9197 } while (cur_overflow_list != observed_overflow_list);
9198 // ... until we have succeeded in doing so.
9199 }
9200 }
9202 // Push the prefix elements on work_q
9203 assert(prefix != NULL, "control point invariant");
9204 const markOop proto = markOopDesc::prototype();
9205 oop next;
9206 NOT_PRODUCT(ssize_t n = 0;)
9207 for (cur = prefix; cur != NULL; cur = next) {
9208 next = oop(cur->mark());
9209 cur->set_mark(proto); // until proven otherwise
9210 assert(cur->is_oop(), "Should be an oop");
9211 bool res = work_q->push(cur);
9212 assert(res, "Bit off more than we can chew?");
9213 NOT_PRODUCT(n++;)
9214 }
9215 #ifndef PRODUCT
9216 assert(_num_par_pushes >= n, "Too many pops?");
9217 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9218 #endif
9219 return true;
9220 }
9222 // Single-threaded
9223 void CMSCollector::push_on_overflow_list(oop p) {
9224 NOT_PRODUCT(_num_par_pushes++;)
9225 assert(p->is_oop(), "Not an oop");
9226 preserve_mark_if_necessary(p);
9227 p->set_mark((markOop)_overflow_list);
9228 _overflow_list = p;
9229 }
9231 // Multi-threaded; use CAS to prepend to overflow list
9232 void CMSCollector::par_push_on_overflow_list(oop p) {
9233 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9234 assert(p->is_oop(), "Not an oop");
9235 par_preserve_mark_if_necessary(p);
9236 oop observed_overflow_list = _overflow_list;
9237 oop cur_overflow_list;
9238 do {
9239 cur_overflow_list = observed_overflow_list;
9240 if (cur_overflow_list != BUSY) {
9241 p->set_mark(markOop(cur_overflow_list));
9242 } else {
9243 p->set_mark(NULL);
9244 }
9245 observed_overflow_list =
9246 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9247 } while (cur_overflow_list != observed_overflow_list);
9248 }
9249 #undef BUSY
9251 // Single threaded
9252 // General Note on GrowableArray: pushes may silently fail
9253 // because we are (temporarily) out of C-heap for expanding
9254 // the stack. The problem is quite ubiquitous and affects
9255 // a lot of code in the JVM. The prudent thing for GrowableArray
9256 // to do (for now) is to exit with an error. However, that may
9257 // be too draconian in some cases because the caller may be
9258 // able to recover without much harm. For such cases, we
9259 // should probably introduce a "soft_push" method which returns
9260 // an indication of success or failure with the assumption that
9261 // the caller may be able to recover from a failure; code in
9262 // the VM can then be changed, incrementally, to deal with such
9263 // failures where possible, thus, incrementally hardening the VM
9264 // in such low resource situations.
9265 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9266 _preserved_oop_stack.push(p);
9267 _preserved_mark_stack.push(m);
9268 assert(m == p->mark(), "Mark word changed");
9269 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9270 "bijection");
9271 }
9273 // Single threaded
9274 void CMSCollector::preserve_mark_if_necessary(oop p) {
9275 markOop m = p->mark();
9276 if (m->must_be_preserved(p)) {
9277 preserve_mark_work(p, m);
9278 }
9279 }
9281 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9282 markOop m = p->mark();
9283 if (m->must_be_preserved(p)) {
9284 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9285 // Even though we read the mark word without holding
9286 // the lock, we are assured that it will not change
9287 // because we "own" this oop, so no other thread can
9288 // be trying to push it on the overflow list; see
9289 // the assertion in preserve_mark_work() that checks
9290 // that m == p->mark().
9291 preserve_mark_work(p, m);
9292 }
9293 }
9295 // We should be able to do this multi-threaded,
9296 // a chunk of stack being a task (this is
9297 // correct because each oop only ever appears
9298 // once in the overflow list. However, it's
9299 // not very easy to completely overlap this with
9300 // other operations, so will generally not be done
9301 // until all work's been completed. Because we
9302 // expect the preserved oop stack (set) to be small,
9303 // it's probably fine to do this single-threaded.
9304 // We can explore cleverer concurrent/overlapped/parallel
9305 // processing of preserved marks if we feel the
9306 // need for this in the future. Stack overflow should
9307 // be so rare in practice and, when it happens, its
9308 // effect on performance so great that this will
9309 // likely just be in the noise anyway.
9310 void CMSCollector::restore_preserved_marks_if_any() {
9311 assert(SafepointSynchronize::is_at_safepoint(),
9312 "world should be stopped");
9313 assert(Thread::current()->is_ConcurrentGC_thread() ||
9314 Thread::current()->is_VM_thread(),
9315 "should be single-threaded");
9316 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9317 "bijection");
9319 while (!_preserved_oop_stack.is_empty()) {
9320 oop p = _preserved_oop_stack.pop();
9321 assert(p->is_oop(), "Should be an oop");
9322 assert(_span.contains(p), "oop should be in _span");
9323 assert(p->mark() == markOopDesc::prototype(),
9324 "Set when taken from overflow list");
9325 markOop m = _preserved_mark_stack.pop();
9326 p->set_mark(m);
9327 }
9328 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9329 "stacks were cleared above");
9330 }
9332 #ifndef PRODUCT
9333 bool CMSCollector::no_preserved_marks() const {
9334 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9335 }
9336 #endif
9338 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9339 {
9340 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9341 CMSAdaptiveSizePolicy* size_policy =
9342 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9343 assert(size_policy->is_gc_cms_adaptive_size_policy(),
9344 "Wrong type for size policy");
9345 return size_policy;
9346 }
9348 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9349 size_t desired_promo_size) {
9350 if (cur_promo_size < desired_promo_size) {
9351 size_t expand_bytes = desired_promo_size - cur_promo_size;
9352 if (PrintAdaptiveSizePolicy && Verbose) {
9353 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9354 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9355 expand_bytes);
9356 }
9357 expand(expand_bytes,
9358 MinHeapDeltaBytes,
9359 CMSExpansionCause::_adaptive_size_policy);
9360 } else if (desired_promo_size < cur_promo_size) {
9361 size_t shrink_bytes = cur_promo_size - desired_promo_size;
9362 if (PrintAdaptiveSizePolicy && Verbose) {
9363 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9364 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9365 shrink_bytes);
9366 }
9367 shrink(shrink_bytes);
9368 }
9369 }
9371 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9372 GenCollectedHeap* gch = GenCollectedHeap::heap();
9373 CMSGCAdaptivePolicyCounters* counters =
9374 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9375 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9376 "Wrong kind of counters");
9377 return counters;
9378 }
9381 void ASConcurrentMarkSweepGeneration::update_counters() {
9382 if (UsePerfData) {
9383 _space_counters->update_all();
9384 _gen_counters->update_all();
9385 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9386 GenCollectedHeap* gch = GenCollectedHeap::heap();
9387 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9388 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9389 "Wrong gc statistics type");
9390 counters->update_counters(gc_stats_l);
9391 }
9392 }
9394 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9395 if (UsePerfData) {
9396 _space_counters->update_used(used);
9397 _space_counters->update_capacity();
9398 _gen_counters->update_all();
9400 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9401 GenCollectedHeap* gch = GenCollectedHeap::heap();
9402 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9403 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9404 "Wrong gc statistics type");
9405 counters->update_counters(gc_stats_l);
9406 }
9407 }
9409 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9410 assert_locked_or_safepoint(Heap_lock);
9411 assert_lock_strong(freelistLock());
9412 HeapWord* old_end = _cmsSpace->end();
9413 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9414 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9415 FreeChunk* chunk_at_end = find_chunk_at_end();
9416 if (chunk_at_end == NULL) {
9417 // No room to shrink
9418 if (PrintGCDetails && Verbose) {
9419 gclog_or_tty->print_cr("No room to shrink: old_end "
9420 PTR_FORMAT " unallocated_start " PTR_FORMAT
9421 " chunk_at_end " PTR_FORMAT,
9422 old_end, unallocated_start, chunk_at_end);
9423 }
9424 return;
9425 } else {
9427 // Find the chunk at the end of the space and determine
9428 // how much it can be shrunk.
9429 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9430 size_t aligned_shrinkable_size_in_bytes =
9431 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9432 assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9433 "Inconsistent chunk at end of space");
9434 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9435 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9437 // Shrink the underlying space
9438 _virtual_space.shrink_by(bytes);
9439 if (PrintGCDetails && Verbose) {
9440 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9441 " desired_bytes " SIZE_FORMAT
9442 " shrinkable_size_in_bytes " SIZE_FORMAT
9443 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9444 " bytes " SIZE_FORMAT,
9445 desired_bytes, shrinkable_size_in_bytes,
9446 aligned_shrinkable_size_in_bytes, bytes);
9447 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9448 " unallocated_start " SIZE_FORMAT,
9449 old_end, unallocated_start);
9450 }
9452 // If the space did shrink (shrinking is not guaranteed),
9453 // shrink the chunk at the end by the appropriate amount.
9454 if (((HeapWord*)_virtual_space.high()) < old_end) {
9455 size_t new_word_size =
9456 heap_word_size(_virtual_space.committed_size());
9458 // Have to remove the chunk from the dictionary because it is changing
9459 // size and might be someplace elsewhere in the dictionary.
9461 // Get the chunk at end, shrink it, and put it
9462 // back.
9463 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9464 size_t word_size_change = word_size_before - new_word_size;
9465 size_t chunk_at_end_old_size = chunk_at_end->size();
9466 assert(chunk_at_end_old_size >= word_size_change,
9467 "Shrink is too large");
9468 chunk_at_end->set_size(chunk_at_end_old_size -
9469 word_size_change);
9470 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9471 word_size_change);
9473 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9475 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9476 _bts->resize(new_word_size); // resize the block offset shared array
9477 Universe::heap()->barrier_set()->resize_covered_region(mr);
9478 _cmsSpace->assert_locked();
9479 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9481 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9483 // update the space and generation capacity counters
9484 if (UsePerfData) {
9485 _space_counters->update_capacity();
9486 _gen_counters->update_all();
9487 }
9489 if (Verbose && PrintGCDetails) {
9490 size_t new_mem_size = _virtual_space.committed_size();
9491 size_t old_mem_size = new_mem_size + bytes;
9492 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9493 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9494 }
9495 }
9497 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9498 "Inconsistency at end of space");
9499 assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9500 "Shrinking is inconsistent");
9501 return;
9502 }
9503 }
9504 // Transfer some number of overflown objects to usual marking
9505 // stack. Return true if some objects were transferred.
9506 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9507 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9508 (size_t)ParGCDesiredObjsFromOverflowList);
9510 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9511 assert(_collector->overflow_list_is_empty() || res,
9512 "If list is not empty, we should have taken something");
9513 assert(!res || !_mark_stack->isEmpty(),
9514 "If we took something, it should now be on our stack");
9515 return res;
9516 }
9518 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9519 size_t res = _sp->block_size_no_stall(addr, _collector);
9520 if (_sp->block_is_obj(addr)) {
9521 if (_live_bit_map->isMarked(addr)) {
9522 // It can't have been dead in a previous cycle
9523 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9524 } else {
9525 _dead_bit_map->mark(addr); // mark the dead object
9526 }
9527 }
9528 // Could be 0, if the block size could not be computed without stalling.
9529 return res;
9530 }
9532 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9534 switch (phase) {
9535 case CMSCollector::InitialMarking:
9536 initialize(true /* fullGC */ ,
9537 cause /* cause of the GC */,
9538 true /* recordGCBeginTime */,
9539 true /* recordPreGCUsage */,
9540 false /* recordPeakUsage */,
9541 false /* recordPostGCusage */,
9542 true /* recordAccumulatedGCTime */,
9543 false /* recordGCEndTime */,
9544 false /* countCollection */ );
9545 break;
9547 case CMSCollector::FinalMarking:
9548 initialize(true /* fullGC */ ,
9549 cause /* cause of the GC */,
9550 false /* recordGCBeginTime */,
9551 false /* recordPreGCUsage */,
9552 false /* recordPeakUsage */,
9553 false /* recordPostGCusage */,
9554 true /* recordAccumulatedGCTime */,
9555 false /* recordGCEndTime */,
9556 false /* countCollection */ );
9557 break;
9559 case CMSCollector::Sweeping:
9560 initialize(true /* fullGC */ ,
9561 cause /* cause of the GC */,
9562 false /* recordGCBeginTime */,
9563 false /* recordPreGCUsage */,
9564 true /* recordPeakUsage */,
9565 true /* recordPostGCusage */,
9566 false /* recordAccumulatedGCTime */,
9567 true /* recordGCEndTime */,
9568 true /* countCollection */ );
9569 break;
9571 default:
9572 ShouldNotReachHere();
9573 }
9574 }