Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
31 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
37 #include "gc_implementation/parNew/parNewGeneration.hpp"
38 #include "gc_implementation/shared/collectorCounters.hpp"
39 #include "gc_implementation/shared/isGCActiveMark.hpp"
40 #include "gc_interface/collectedHeap.inline.hpp"
41 #include "memory/cardTableRS.hpp"
42 #include "memory/collectorPolicy.hpp"
43 #include "memory/gcLocker.inline.hpp"
44 #include "memory/genCollectedHeap.hpp"
45 #include "memory/genMarkSweep.hpp"
46 #include "memory/genOopClosures.inline.hpp"
47 #include "memory/iterator.hpp"
48 #include "memory/referencePolicy.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "prims/jvmtiExport.hpp"
52 #include "runtime/globals_extension.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "services/memoryService.hpp"
57 #include "services/runtimeService.hpp"
59 // statics
60 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
61 bool CMSCollector::_full_gc_requested = false;
63 //////////////////////////////////////////////////////////////////
64 // In support of CMS/VM thread synchronization
65 //////////////////////////////////////////////////////////////////
66 // We split use of the CGC_lock into 2 "levels".
67 // The low-level locking is of the usual CGC_lock monitor. We introduce
68 // a higher level "token" (hereafter "CMS token") built on top of the
69 // low level monitor (hereafter "CGC lock").
70 // The token-passing protocol gives priority to the VM thread. The
71 // CMS-lock doesn't provide any fairness guarantees, but clients
72 // should ensure that it is only held for very short, bounded
73 // durations.
74 //
75 // When either of the CMS thread or the VM thread is involved in
76 // collection operations during which it does not want the other
77 // thread to interfere, it obtains the CMS token.
78 //
79 // If either thread tries to get the token while the other has
80 // it, that thread waits. However, if the VM thread and CMS thread
81 // both want the token, then the VM thread gets priority while the
82 // CMS thread waits. This ensures, for instance, that the "concurrent"
83 // phases of the CMS thread's work do not block out the VM thread
84 // for long periods of time as the CMS thread continues to hog
85 // the token. (See bug 4616232).
86 //
87 // The baton-passing functions are, however, controlled by the
88 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
89 // and here the low-level CMS lock, not the high level token,
90 // ensures mutual exclusion.
91 //
92 // Two important conditions that we have to satisfy:
93 // 1. if a thread does a low-level wait on the CMS lock, then it
94 // relinquishes the CMS token if it were holding that token
95 // when it acquired the low-level CMS lock.
96 // 2. any low-level notifications on the low-level lock
97 // should only be sent when a thread has relinquished the token.
98 //
99 // In the absence of either property, we'd have potential deadlock.
100 //
101 // We protect each of the CMS (concurrent and sequential) phases
102 // with the CMS _token_, not the CMS _lock_.
103 //
104 // The only code protected by CMS lock is the token acquisition code
105 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
106 // baton-passing code.
107 //
108 // Unfortunately, i couldn't come up with a good abstraction to factor and
109 // hide the naked CGC_lock manipulation in the baton-passing code
110 // further below. That's something we should try to do. Also, the proof
111 // of correctness of this 2-level locking scheme is far from obvious,
112 // and potentially quite slippery. We have an uneasy supsicion, for instance,
113 // that there may be a theoretical possibility of delay/starvation in the
114 // low-level lock/wait/notify scheme used for the baton-passing because of
115 // potential intereference with the priority scheme embodied in the
116 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
117 // invocation further below and marked with "XXX 20011219YSR".
118 // Indeed, as we note elsewhere, this may become yet more slippery
119 // in the presence of multiple CMS and/or multiple VM threads. XXX
121 class CMSTokenSync: public StackObj {
122 private:
123 bool _is_cms_thread;
124 public:
125 CMSTokenSync(bool is_cms_thread):
126 _is_cms_thread(is_cms_thread) {
127 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
128 "Incorrect argument to constructor");
129 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
130 }
132 ~CMSTokenSync() {
133 assert(_is_cms_thread ?
134 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
135 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
136 "Incorrect state");
137 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
138 }
139 };
141 // Convenience class that does a CMSTokenSync, and then acquires
142 // upto three locks.
143 class CMSTokenSyncWithLocks: public CMSTokenSync {
144 private:
145 // Note: locks are acquired in textual declaration order
146 // and released in the opposite order
147 MutexLockerEx _locker1, _locker2, _locker3;
148 public:
149 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
150 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
151 CMSTokenSync(is_cms_thread),
152 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
153 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
154 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
155 { }
156 };
159 // Wrapper class to temporarily disable icms during a foreground cms collection.
160 class ICMSDisabler: public StackObj {
161 public:
162 // The ctor disables icms and wakes up the thread so it notices the change;
163 // the dtor re-enables icms. Note that the CMSCollector methods will check
164 // CMSIncrementalMode.
165 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
166 ~ICMSDisabler() { CMSCollector::enable_icms(); }
167 };
169 //////////////////////////////////////////////////////////////////
170 // Concurrent Mark-Sweep Generation /////////////////////////////
171 //////////////////////////////////////////////////////////////////
173 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
175 // This struct contains per-thread things necessary to support parallel
176 // young-gen collection.
177 class CMSParGCThreadState: public CHeapObj {
178 public:
179 CFLS_LAB lab;
180 PromotionInfo promo;
182 // Constructor.
183 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
184 promo.setSpace(cfls);
185 }
186 };
188 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
189 ReservedSpace rs, size_t initial_byte_size, int level,
190 CardTableRS* ct, bool use_adaptive_freelists,
191 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
192 CardGeneration(rs, initial_byte_size, level, ct),
193 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
194 _debug_collection_type(Concurrent_collection_type)
195 {
196 HeapWord* bottom = (HeapWord*) _virtual_space.low();
197 HeapWord* end = (HeapWord*) _virtual_space.high();
199 _direct_allocated_words = 0;
200 NOT_PRODUCT(
201 _numObjectsPromoted = 0;
202 _numWordsPromoted = 0;
203 _numObjectsAllocated = 0;
204 _numWordsAllocated = 0;
205 )
207 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
208 use_adaptive_freelists,
209 dictionaryChoice);
210 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
211 if (_cmsSpace == NULL) {
212 vm_exit_during_initialization(
213 "CompactibleFreeListSpace allocation failure");
214 }
215 _cmsSpace->_gen = this;
217 _gc_stats = new CMSGCStats();
219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
220 // offsets match. The ability to tell free chunks from objects
221 // depends on this property.
222 debug_only(
223 FreeChunk* junk = NULL;
224 assert(UseCompressedOops ||
225 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
226 "Offset of FreeChunk::_prev within FreeChunk must match"
227 " that of OopDesc::_klass within OopDesc");
228 )
229 if (CollectedHeap::use_parallel_gc_threads()) {
230 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
231 _par_gc_thread_states =
232 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
233 if (_par_gc_thread_states == NULL) {
234 vm_exit_during_initialization("Could not allocate par gc structs");
235 }
236 for (uint i = 0; i < ParallelGCThreads; i++) {
237 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
238 if (_par_gc_thread_states[i] == NULL) {
239 vm_exit_during_initialization("Could not allocate par gc structs");
240 }
241 }
242 } else {
243 _par_gc_thread_states = NULL;
244 }
245 _incremental_collection_failed = false;
246 // The "dilatation_factor" is the expansion that can occur on
247 // account of the fact that the minimum object size in the CMS
248 // generation may be larger than that in, say, a contiguous young
249 // generation.
250 // Ideally, in the calculation below, we'd compute the dilatation
251 // factor as: MinChunkSize/(promoting_gen's min object size)
252 // Since we do not have such a general query interface for the
253 // promoting generation, we'll instead just use the mimimum
254 // object size (which today is a header's worth of space);
255 // note that all arithmetic is in units of HeapWords.
256 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
257 assert(_dilatation_factor >= 1.0, "from previous assert");
258 }
261 // The field "_initiating_occupancy" represents the occupancy percentage
262 // at which we trigger a new collection cycle. Unless explicitly specified
263 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
264 // is calculated by:
265 //
266 // Let "f" be MinHeapFreeRatio in
267 //
268 // _intiating_occupancy = 100-f +
269 // f * (CMSTrigger[Perm]Ratio/100)
270 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
271 //
272 // That is, if we assume the heap is at its desired maximum occupancy at the
273 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
274 // space be allocated before initiating a new collection cycle.
275 //
276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
277 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
278 if (io >= 0) {
279 _initiating_occupancy = (double)io / 100.0;
280 } else {
281 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
282 (double)(tr * MinHeapFreeRatio) / 100.0)
283 / 100.0;
284 }
285 }
287 void ConcurrentMarkSweepGeneration::ref_processor_init() {
288 assert(collector() != NULL, "no collector");
289 collector()->ref_processor_init();
290 }
292 void CMSCollector::ref_processor_init() {
293 if (_ref_processor == NULL) {
294 // Allocate and initialize a reference processor
295 _ref_processor = ReferenceProcessor::create_ref_processor(
296 _span, // span
297 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
298 _cmsGen->refs_discovery_is_mt(), // mt_discovery
299 &_is_alive_closure,
300 ParallelGCThreads,
301 ParallelRefProcEnabled);
302 // Initialize the _ref_processor field of CMSGen
303 _cmsGen->set_ref_processor(_ref_processor);
305 // Allocate a dummy ref processor for perm gen.
306 ReferenceProcessor* rp2 = new ReferenceProcessor();
307 if (rp2 == NULL) {
308 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
309 }
310 _permGen->set_ref_processor(rp2);
311 }
312 }
314 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
315 GenCollectedHeap* gch = GenCollectedHeap::heap();
316 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
317 "Wrong type of heap");
318 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
319 gch->gen_policy()->size_policy();
320 assert(sp->is_gc_cms_adaptive_size_policy(),
321 "Wrong type of size policy");
322 return sp;
323 }
325 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
326 CMSGCAdaptivePolicyCounters* results =
327 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
328 assert(
329 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
330 "Wrong gc policy counter kind");
331 return results;
332 }
335 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
337 const char* gen_name = "old";
339 // Generation Counters - generation 1, 1 subspace
340 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
342 _space_counters = new GSpaceCounters(gen_name, 0,
343 _virtual_space.reserved_size(),
344 this, _gen_counters);
345 }
347 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
348 _cms_gen(cms_gen)
349 {
350 assert(alpha <= 100, "bad value");
351 _saved_alpha = alpha;
353 // Initialize the alphas to the bootstrap value of 100.
354 _gc0_alpha = _cms_alpha = 100;
356 _cms_begin_time.update();
357 _cms_end_time.update();
359 _gc0_duration = 0.0;
360 _gc0_period = 0.0;
361 _gc0_promoted = 0;
363 _cms_duration = 0.0;
364 _cms_period = 0.0;
365 _cms_allocated = 0;
367 _cms_used_at_gc0_begin = 0;
368 _cms_used_at_gc0_end = 0;
369 _allow_duty_cycle_reduction = false;
370 _valid_bits = 0;
371 _icms_duty_cycle = CMSIncrementalDutyCycle;
372 }
374 double CMSStats::cms_free_adjustment_factor(size_t free) const {
375 // TBD: CR 6909490
376 return 1.0;
377 }
379 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
380 }
382 // If promotion failure handling is on use
383 // the padded average size of the promotion for each
384 // young generation collection.
385 double CMSStats::time_until_cms_gen_full() const {
386 size_t cms_free = _cms_gen->cmsSpace()->free();
387 GenCollectedHeap* gch = GenCollectedHeap::heap();
388 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
389 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
390 if (cms_free > expected_promotion) {
391 // Start a cms collection if there isn't enough space to promote
392 // for the next minor collection. Use the padded average as
393 // a safety factor.
394 cms_free -= expected_promotion;
396 // Adjust by the safety factor.
397 double cms_free_dbl = (double)cms_free;
398 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
399 // Apply a further correction factor which tries to adjust
400 // for recent occurance of concurrent mode failures.
401 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
402 cms_free_dbl = cms_free_dbl * cms_adjustment;
404 if (PrintGCDetails && Verbose) {
405 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
406 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
407 cms_free, expected_promotion);
408 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
409 cms_free_dbl, cms_consumption_rate() + 1.0);
410 }
411 // Add 1 in case the consumption rate goes to zero.
412 return cms_free_dbl / (cms_consumption_rate() + 1.0);
413 }
414 return 0.0;
415 }
417 // Compare the duration of the cms collection to the
418 // time remaining before the cms generation is empty.
419 // Note that the time from the start of the cms collection
420 // to the start of the cms sweep (less than the total
421 // duration of the cms collection) can be used. This
422 // has been tried and some applications experienced
423 // promotion failures early in execution. This was
424 // possibly because the averages were not accurate
425 // enough at the beginning.
426 double CMSStats::time_until_cms_start() const {
427 // We add "gc0_period" to the "work" calculation
428 // below because this query is done (mostly) at the
429 // end of a scavenge, so we need to conservatively
430 // account for that much possible delay
431 // in the query so as to avoid concurrent mode failures
432 // due to starting the collection just a wee bit too
433 // late.
434 double work = cms_duration() + gc0_period();
435 double deadline = time_until_cms_gen_full();
436 // If a concurrent mode failure occurred recently, we want to be
437 // more conservative and halve our expected time_until_cms_gen_full()
438 if (work > deadline) {
439 if (Verbose && PrintGCDetails) {
440 gclog_or_tty->print(
441 " CMSCollector: collect because of anticipated promotion "
442 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
443 gc0_period(), time_until_cms_gen_full());
444 }
445 return 0.0;
446 }
447 return work - deadline;
448 }
450 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
451 // amount of change to prevent wild oscillation.
452 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
453 unsigned int new_duty_cycle) {
454 assert(old_duty_cycle <= 100, "bad input value");
455 assert(new_duty_cycle <= 100, "bad input value");
457 // Note: use subtraction with caution since it may underflow (values are
458 // unsigned). Addition is safe since we're in the range 0-100.
459 unsigned int damped_duty_cycle = new_duty_cycle;
460 if (new_duty_cycle < old_duty_cycle) {
461 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
462 if (new_duty_cycle + largest_delta < old_duty_cycle) {
463 damped_duty_cycle = old_duty_cycle - largest_delta;
464 }
465 } else if (new_duty_cycle > old_duty_cycle) {
466 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
467 if (new_duty_cycle > old_duty_cycle + largest_delta) {
468 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
469 }
470 }
471 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
473 if (CMSTraceIncrementalPacing) {
474 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
475 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
476 }
477 return damped_duty_cycle;
478 }
480 unsigned int CMSStats::icms_update_duty_cycle_impl() {
481 assert(CMSIncrementalPacing && valid(),
482 "should be handled in icms_update_duty_cycle()");
484 double cms_time_so_far = cms_timer().seconds();
485 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
486 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
488 // Avoid division by 0.
489 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
490 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
492 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
493 if (new_duty_cycle > _icms_duty_cycle) {
494 // Avoid very small duty cycles (1 or 2); 0 is allowed.
495 if (new_duty_cycle > 2) {
496 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
497 new_duty_cycle);
498 }
499 } else if (_allow_duty_cycle_reduction) {
500 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
501 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
502 // Respect the minimum duty cycle.
503 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
504 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
505 }
507 if (PrintGCDetails || CMSTraceIncrementalPacing) {
508 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
509 }
511 _allow_duty_cycle_reduction = false;
512 return _icms_duty_cycle;
513 }
515 #ifndef PRODUCT
516 void CMSStats::print_on(outputStream *st) const {
517 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
518 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
519 gc0_duration(), gc0_period(), gc0_promoted());
520 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
521 cms_duration(), cms_duration_per_mb(),
522 cms_period(), cms_allocated());
523 st->print(",cms_since_beg=%g,cms_since_end=%g",
524 cms_time_since_begin(), cms_time_since_end());
525 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
526 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
527 if (CMSIncrementalMode) {
528 st->print(",dc=%d", icms_duty_cycle());
529 }
531 if (valid()) {
532 st->print(",promo_rate=%g,cms_alloc_rate=%g",
533 promotion_rate(), cms_allocation_rate());
534 st->print(",cms_consumption_rate=%g,time_until_full=%g",
535 cms_consumption_rate(), time_until_cms_gen_full());
536 }
537 st->print(" ");
538 }
539 #endif // #ifndef PRODUCT
541 CMSCollector::CollectorState CMSCollector::_collectorState =
542 CMSCollector::Idling;
543 bool CMSCollector::_foregroundGCIsActive = false;
544 bool CMSCollector::_foregroundGCShouldWait = false;
546 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
547 ConcurrentMarkSweepGeneration* permGen,
548 CardTableRS* ct,
549 ConcurrentMarkSweepPolicy* cp):
550 _cmsGen(cmsGen),
551 _permGen(permGen),
552 _ct(ct),
553 _ref_processor(NULL), // will be set later
554 _conc_workers(NULL), // may be set later
555 _abort_preclean(false),
556 _start_sampling(false),
557 _between_prologue_and_epilogue(false),
558 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
559 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
560 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
561 -1 /* lock-free */, "No_lock" /* dummy */),
562 _modUnionClosure(&_modUnionTable),
563 _modUnionClosurePar(&_modUnionTable),
564 // Adjust my span to cover old (cms) gen and perm gen
565 _span(cmsGen->reserved()._union(permGen->reserved())),
566 // Construct the is_alive_closure with _span & markBitMap
567 _is_alive_closure(_span, &_markBitMap),
568 _restart_addr(NULL),
569 _overflow_list(NULL),
570 _stats(cmsGen),
571 _eden_chunk_array(NULL), // may be set in ctor body
572 _eden_chunk_capacity(0), // -- ditto --
573 _eden_chunk_index(0), // -- ditto --
574 _survivor_plab_array(NULL), // -- ditto --
575 _survivor_chunk_array(NULL), // -- ditto --
576 _survivor_chunk_capacity(0), // -- ditto --
577 _survivor_chunk_index(0), // -- ditto --
578 _ser_pmc_preclean_ovflw(0),
579 _ser_kac_preclean_ovflw(0),
580 _ser_pmc_remark_ovflw(0),
581 _par_pmc_remark_ovflw(0),
582 _ser_kac_ovflw(0),
583 _par_kac_ovflw(0),
584 #ifndef PRODUCT
585 _num_par_pushes(0),
586 #endif
587 _collection_count_start(0),
588 _verifying(false),
589 _icms_start_limit(NULL),
590 _icms_stop_limit(NULL),
591 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
592 _completed_initialization(false),
593 _collector_policy(cp),
594 _should_unload_classes(false),
595 _concurrent_cycles_since_last_unload(0),
596 _roots_scanning_options(0),
597 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
598 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
599 {
600 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
601 ExplicitGCInvokesConcurrent = true;
602 }
603 // Now expand the span and allocate the collection support structures
604 // (MUT, marking bit map etc.) to cover both generations subject to
605 // collection.
607 // First check that _permGen is adjacent to _cmsGen and above it.
608 assert( _cmsGen->reserved().word_size() > 0
609 && _permGen->reserved().word_size() > 0,
610 "generations should not be of zero size");
611 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
612 "_cmsGen and _permGen should not overlap");
613 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
614 "_cmsGen->end() different from _permGen->start()");
616 // For use by dirty card to oop closures.
617 _cmsGen->cmsSpace()->set_collector(this);
618 _permGen->cmsSpace()->set_collector(this);
620 // Allocate MUT and marking bit map
621 {
622 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
623 if (!_markBitMap.allocate(_span)) {
624 warning("Failed to allocate CMS Bit Map");
625 return;
626 }
627 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
628 }
629 {
630 _modUnionTable.allocate(_span);
631 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
632 }
634 if (!_markStack.allocate(MarkStackSize)) {
635 warning("Failed to allocate CMS Marking Stack");
636 return;
637 }
638 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
639 warning("Failed to allocate CMS Revisit Stack");
640 return;
641 }
643 // Support for multi-threaded concurrent phases
644 if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) {
645 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
646 // just for now
647 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
648 }
649 if (ConcGCThreads > 1) {
650 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
651 ConcGCThreads, true);
652 if (_conc_workers == NULL) {
653 warning("GC/CMS: _conc_workers allocation failure: "
654 "forcing -CMSConcurrentMTEnabled");
655 CMSConcurrentMTEnabled = false;
656 } else {
657 _conc_workers->initialize_workers();
658 }
659 } else {
660 CMSConcurrentMTEnabled = false;
661 }
662 }
663 if (!CMSConcurrentMTEnabled) {
664 ConcGCThreads = 0;
665 } else {
666 // Turn off CMSCleanOnEnter optimization temporarily for
667 // the MT case where it's not fixed yet; see 6178663.
668 CMSCleanOnEnter = false;
669 }
670 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
671 "Inconsistency");
673 // Parallel task queues; these are shared for the
674 // concurrent and stop-world phases of CMS, but
675 // are not shared with parallel scavenge (ParNew).
676 {
677 uint i;
678 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
680 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
681 || ParallelRefProcEnabled)
682 && num_queues > 0) {
683 _task_queues = new OopTaskQueueSet(num_queues);
684 if (_task_queues == NULL) {
685 warning("task_queues allocation failure.");
686 return;
687 }
688 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
689 if (_hash_seed == NULL) {
690 warning("_hash_seed array allocation failure");
691 return;
692 }
694 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
695 for (i = 0; i < num_queues; i++) {
696 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
697 if (q == NULL) {
698 warning("work_queue allocation failure.");
699 return;
700 }
701 _task_queues->register_queue(i, q);
702 }
703 for (i = 0; i < num_queues; i++) {
704 _task_queues->queue(i)->initialize();
705 _hash_seed[i] = 17; // copied from ParNew
706 }
707 }
708 }
710 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
711 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
713 // Clip CMSBootstrapOccupancy between 0 and 100.
714 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
715 /(double)100;
717 _full_gcs_since_conc_gc = 0;
719 // Now tell CMS generations the identity of their collector
720 ConcurrentMarkSweepGeneration::set_collector(this);
722 // Create & start a CMS thread for this CMS collector
723 _cmsThread = ConcurrentMarkSweepThread::start(this);
724 assert(cmsThread() != NULL, "CMS Thread should have been created");
725 assert(cmsThread()->collector() == this,
726 "CMS Thread should refer to this gen");
727 assert(CGC_lock != NULL, "Where's the CGC_lock?");
729 // Support for parallelizing young gen rescan
730 GenCollectedHeap* gch = GenCollectedHeap::heap();
731 _young_gen = gch->prev_gen(_cmsGen);
732 if (gch->supports_inline_contig_alloc()) {
733 _top_addr = gch->top_addr();
734 _end_addr = gch->end_addr();
735 assert(_young_gen != NULL, "no _young_gen");
736 _eden_chunk_index = 0;
737 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
738 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
739 if (_eden_chunk_array == NULL) {
740 _eden_chunk_capacity = 0;
741 warning("GC/CMS: _eden_chunk_array allocation failure");
742 }
743 }
744 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
746 // Support for parallelizing survivor space rescan
747 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
748 const size_t max_plab_samples =
749 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
751 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
752 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
753 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
754 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
755 || _cursor == NULL) {
756 warning("Failed to allocate survivor plab/chunk array");
757 if (_survivor_plab_array != NULL) {
758 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
759 _survivor_plab_array = NULL;
760 }
761 if (_survivor_chunk_array != NULL) {
762 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
763 _survivor_chunk_array = NULL;
764 }
765 if (_cursor != NULL) {
766 FREE_C_HEAP_ARRAY(size_t, _cursor);
767 _cursor = NULL;
768 }
769 } else {
770 _survivor_chunk_capacity = 2*max_plab_samples;
771 for (uint i = 0; i < ParallelGCThreads; i++) {
772 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
773 if (vec == NULL) {
774 warning("Failed to allocate survivor plab array");
775 for (int j = i; j > 0; j--) {
776 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
777 }
778 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
779 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
780 _survivor_plab_array = NULL;
781 _survivor_chunk_array = NULL;
782 _survivor_chunk_capacity = 0;
783 break;
784 } else {
785 ChunkArray* cur =
786 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
787 max_plab_samples);
788 assert(cur->end() == 0, "Should be 0");
789 assert(cur->array() == vec, "Should be vec");
790 assert(cur->capacity() == max_plab_samples, "Error");
791 }
792 }
793 }
794 }
795 assert( ( _survivor_plab_array != NULL
796 && _survivor_chunk_array != NULL)
797 || ( _survivor_chunk_capacity == 0
798 && _survivor_chunk_index == 0),
799 "Error");
801 // Choose what strong roots should be scanned depending on verification options
802 // and perm gen collection mode.
803 if (!CMSClassUnloadingEnabled) {
804 // If class unloading is disabled we want to include all classes into the root set.
805 add_root_scanning_option(SharedHeap::SO_AllClasses);
806 } else {
807 add_root_scanning_option(SharedHeap::SO_SystemClasses);
808 }
810 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
811 _gc_counters = new CollectorCounters("CMS", 1);
812 _completed_initialization = true;
813 _inter_sweep_timer.start(); // start of time
814 #ifdef SPARC
815 // Issue a stern warning, but allow use for experimentation and debugging.
816 if (VM_Version::is_sun4v() && UseMemSetInBOT) {
817 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
818 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
819 " on sun4v; please understand that you are using at your own risk!");
820 }
821 #endif
822 }
824 const char* ConcurrentMarkSweepGeneration::name() const {
825 return "concurrent mark-sweep generation";
826 }
827 void ConcurrentMarkSweepGeneration::update_counters() {
828 if (UsePerfData) {
829 _space_counters->update_all();
830 _gen_counters->update_all();
831 }
832 }
834 // this is an optimized version of update_counters(). it takes the
835 // used value as a parameter rather than computing it.
836 //
837 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
838 if (UsePerfData) {
839 _space_counters->update_used(used);
840 _space_counters->update_capacity();
841 _gen_counters->update_all();
842 }
843 }
845 void ConcurrentMarkSweepGeneration::print() const {
846 Generation::print();
847 cmsSpace()->print();
848 }
850 #ifndef PRODUCT
851 void ConcurrentMarkSweepGeneration::print_statistics() {
852 cmsSpace()->printFLCensus(0);
853 }
854 #endif
856 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
857 GenCollectedHeap* gch = GenCollectedHeap::heap();
858 if (PrintGCDetails) {
859 if (Verbose) {
860 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
861 level(), short_name(), s, used(), capacity());
862 } else {
863 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
864 level(), short_name(), s, used() / K, capacity() / K);
865 }
866 }
867 if (Verbose) {
868 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
869 gch->used(), gch->capacity());
870 } else {
871 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
872 gch->used() / K, gch->capacity() / K);
873 }
874 }
876 size_t
877 ConcurrentMarkSweepGeneration::contiguous_available() const {
878 // dld proposes an improvement in precision here. If the committed
879 // part of the space ends in a free block we should add that to
880 // uncommitted size in the calculation below. Will make this
881 // change later, staying with the approximation below for the
882 // time being. -- ysr.
883 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
884 }
886 size_t
887 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
888 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
889 }
891 size_t ConcurrentMarkSweepGeneration::max_available() const {
892 return free() + _virtual_space.uncommitted_size();
893 }
895 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
896 size_t available = max_available();
897 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
898 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
899 if (PrintGC && Verbose) {
900 gclog_or_tty->print_cr(
901 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
902 "max_promo("SIZE_FORMAT")",
903 res? "":" not", available, res? ">=":"<",
904 av_promo, max_promotion_in_bytes);
905 }
906 return res;
907 }
909 // At a promotion failure dump information on block layout in heap
910 // (cms old generation).
911 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
912 if (CMSDumpAtPromotionFailure) {
913 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
914 }
915 }
917 CompactibleSpace*
918 ConcurrentMarkSweepGeneration::first_compaction_space() const {
919 return _cmsSpace;
920 }
922 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
923 // Clear the promotion information. These pointers can be adjusted
924 // along with all the other pointers into the heap but
925 // compaction is expected to be a rare event with
926 // a heap using cms so don't do it without seeing the need.
927 if (CollectedHeap::use_parallel_gc_threads()) {
928 for (uint i = 0; i < ParallelGCThreads; i++) {
929 _par_gc_thread_states[i]->promo.reset();
930 }
931 }
932 }
934 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
935 blk->do_space(_cmsSpace);
936 }
938 void ConcurrentMarkSweepGeneration::compute_new_size() {
939 assert_locked_or_safepoint(Heap_lock);
941 // If incremental collection failed, we just want to expand
942 // to the limit.
943 if (incremental_collection_failed()) {
944 clear_incremental_collection_failed();
945 grow_to_reserved();
946 return;
947 }
949 size_t expand_bytes = 0;
950 double free_percentage = ((double) free()) / capacity();
951 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
952 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
954 // compute expansion delta needed for reaching desired free percentage
955 if (free_percentage < desired_free_percentage) {
956 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
957 assert(desired_capacity >= capacity(), "invalid expansion size");
958 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
959 }
960 if (expand_bytes > 0) {
961 if (PrintGCDetails && Verbose) {
962 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
963 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
964 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
965 gclog_or_tty->print_cr(" Desired free fraction %f",
966 desired_free_percentage);
967 gclog_or_tty->print_cr(" Maximum free fraction %f",
968 maximum_free_percentage);
969 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
970 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
971 desired_capacity/1000);
972 int prev_level = level() - 1;
973 if (prev_level >= 0) {
974 size_t prev_size = 0;
975 GenCollectedHeap* gch = GenCollectedHeap::heap();
976 Generation* prev_gen = gch->_gens[prev_level];
977 prev_size = prev_gen->capacity();
978 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
979 prev_size/1000);
980 }
981 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
982 unsafe_max_alloc_nogc()/1000);
983 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
984 contiguous_available()/1000);
985 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
986 expand_bytes);
987 }
988 // safe if expansion fails
989 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
990 if (PrintGCDetails && Verbose) {
991 gclog_or_tty->print_cr(" Expanded free fraction %f",
992 ((double) free()) / capacity());
993 }
994 }
995 }
997 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
998 return cmsSpace()->freelistLock();
999 }
1001 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1002 bool tlab) {
1003 CMSSynchronousYieldRequest yr;
1004 MutexLockerEx x(freelistLock(),
1005 Mutex::_no_safepoint_check_flag);
1006 return have_lock_and_allocate(size, tlab);
1007 }
1009 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1010 bool tlab /* ignored */) {
1011 assert_lock_strong(freelistLock());
1012 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1013 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1014 // Allocate the object live (grey) if the background collector has
1015 // started marking. This is necessary because the marker may
1016 // have passed this address and consequently this object will
1017 // not otherwise be greyed and would be incorrectly swept up.
1018 // Note that if this object contains references, the writing
1019 // of those references will dirty the card containing this object
1020 // allowing the object to be blackened (and its references scanned)
1021 // either during a preclean phase or at the final checkpoint.
1022 if (res != NULL) {
1023 // We may block here with an uninitialized object with
1024 // its mark-bit or P-bits not yet set. Such objects need
1025 // to be safely navigable by block_start().
1026 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1027 assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
1028 collector()->direct_allocated(res, adjustedSize);
1029 _direct_allocated_words += adjustedSize;
1030 // allocation counters
1031 NOT_PRODUCT(
1032 _numObjectsAllocated++;
1033 _numWordsAllocated += (int)adjustedSize;
1034 )
1035 }
1036 return res;
1037 }
1039 // In the case of direct allocation by mutators in a generation that
1040 // is being concurrently collected, the object must be allocated
1041 // live (grey) if the background collector has started marking.
1042 // This is necessary because the marker may
1043 // have passed this address and consequently this object will
1044 // not otherwise be greyed and would be incorrectly swept up.
1045 // Note that if this object contains references, the writing
1046 // of those references will dirty the card containing this object
1047 // allowing the object to be blackened (and its references scanned)
1048 // either during a preclean phase or at the final checkpoint.
1049 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1050 assert(_markBitMap.covers(start, size), "Out of bounds");
1051 if (_collectorState >= Marking) {
1052 MutexLockerEx y(_markBitMap.lock(),
1053 Mutex::_no_safepoint_check_flag);
1054 // [see comments preceding SweepClosure::do_blk() below for details]
1055 // 1. need to mark the object as live so it isn't collected
1056 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1057 // 3. need to mark the end of the object so marking, precleaning or sweeping
1058 // can skip over uninitialized or unparsable objects. An allocated
1059 // object is considered uninitialized for our purposes as long as
1060 // its klass word is NULL. (Unparsable objects are those which are
1061 // initialized in the sense just described, but whose sizes can still
1062 // not be correctly determined. Note that the class of unparsable objects
1063 // can only occur in the perm gen. All old gen objects are parsable
1064 // as soon as they are initialized.)
1065 _markBitMap.mark(start); // object is live
1066 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1067 _markBitMap.mark(start + size - 1);
1068 // mark end of object
1069 }
1070 // check that oop looks uninitialized
1071 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1072 }
1074 void CMSCollector::promoted(bool par, HeapWord* start,
1075 bool is_obj_array, size_t obj_size) {
1076 assert(_markBitMap.covers(start), "Out of bounds");
1077 // See comment in direct_allocated() about when objects should
1078 // be allocated live.
1079 if (_collectorState >= Marking) {
1080 // we already hold the marking bit map lock, taken in
1081 // the prologue
1082 if (par) {
1083 _markBitMap.par_mark(start);
1084 } else {
1085 _markBitMap.mark(start);
1086 }
1087 // We don't need to mark the object as uninitialized (as
1088 // in direct_allocated above) because this is being done with the
1089 // world stopped and the object will be initialized by the
1090 // time the marking, precleaning or sweeping get to look at it.
1091 // But see the code for copying objects into the CMS generation,
1092 // where we need to ensure that concurrent readers of the
1093 // block offset table are able to safely navigate a block that
1094 // is in flux from being free to being allocated (and in
1095 // transition while being copied into) and subsequently
1096 // becoming a bona-fide object when the copy/promotion is complete.
1097 assert(SafepointSynchronize::is_at_safepoint(),
1098 "expect promotion only at safepoints");
1100 if (_collectorState < Sweeping) {
1101 // Mark the appropriate cards in the modUnionTable, so that
1102 // this object gets scanned before the sweep. If this is
1103 // not done, CMS generation references in the object might
1104 // not get marked.
1105 // For the case of arrays, which are otherwise precisely
1106 // marked, we need to dirty the entire array, not just its head.
1107 if (is_obj_array) {
1108 // The [par_]mark_range() method expects mr.end() below to
1109 // be aligned to the granularity of a bit's representation
1110 // in the heap. In the case of the MUT below, that's a
1111 // card size.
1112 MemRegion mr(start,
1113 (HeapWord*)round_to((intptr_t)(start + obj_size),
1114 CardTableModRefBS::card_size /* bytes */));
1115 if (par) {
1116 _modUnionTable.par_mark_range(mr);
1117 } else {
1118 _modUnionTable.mark_range(mr);
1119 }
1120 } else { // not an obj array; we can just mark the head
1121 if (par) {
1122 _modUnionTable.par_mark(start);
1123 } else {
1124 _modUnionTable.mark(start);
1125 }
1126 }
1127 }
1128 }
1129 }
1131 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1132 {
1133 size_t delta = pointer_delta(addr, space->bottom());
1134 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1135 }
1137 void CMSCollector::icms_update_allocation_limits()
1138 {
1139 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1140 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1142 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1143 if (CMSTraceIncrementalPacing) {
1144 stats().print();
1145 }
1147 assert(duty_cycle <= 100, "invalid duty cycle");
1148 if (duty_cycle != 0) {
1149 // The duty_cycle is a percentage between 0 and 100; convert to words and
1150 // then compute the offset from the endpoints of the space.
1151 size_t free_words = eden->free() / HeapWordSize;
1152 double free_words_dbl = (double)free_words;
1153 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1154 size_t offset_words = (free_words - duty_cycle_words) / 2;
1156 _icms_start_limit = eden->top() + offset_words;
1157 _icms_stop_limit = eden->end() - offset_words;
1159 // The limits may be adjusted (shifted to the right) by
1160 // CMSIncrementalOffset, to allow the application more mutator time after a
1161 // young gen gc (when all mutators were stopped) and before CMS starts and
1162 // takes away one or more cpus.
1163 if (CMSIncrementalOffset != 0) {
1164 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1165 size_t adjustment = (size_t)adjustment_dbl;
1166 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1167 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1168 _icms_start_limit += adjustment;
1169 _icms_stop_limit = tmp_stop;
1170 }
1171 }
1172 }
1173 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1174 _icms_start_limit = _icms_stop_limit = eden->end();
1175 }
1177 // Install the new start limit.
1178 eden->set_soft_end(_icms_start_limit);
1180 if (CMSTraceIncrementalMode) {
1181 gclog_or_tty->print(" icms alloc limits: "
1182 PTR_FORMAT "," PTR_FORMAT
1183 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1184 _icms_start_limit, _icms_stop_limit,
1185 percent_of_space(eden, _icms_start_limit),
1186 percent_of_space(eden, _icms_stop_limit));
1187 if (Verbose) {
1188 gclog_or_tty->print("eden: ");
1189 eden->print_on(gclog_or_tty);
1190 }
1191 }
1192 }
1194 // Any changes here should try to maintain the invariant
1195 // that if this method is called with _icms_start_limit
1196 // and _icms_stop_limit both NULL, then it should return NULL
1197 // and not notify the icms thread.
1198 HeapWord*
1199 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1200 size_t word_size)
1201 {
1202 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1203 // nop.
1204 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1205 if (top <= _icms_start_limit) {
1206 if (CMSTraceIncrementalMode) {
1207 space->print_on(gclog_or_tty);
1208 gclog_or_tty->stamp();
1209 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1210 ", new limit=" PTR_FORMAT
1211 " (" SIZE_FORMAT "%%)",
1212 top, _icms_stop_limit,
1213 percent_of_space(space, _icms_stop_limit));
1214 }
1215 ConcurrentMarkSweepThread::start_icms();
1216 assert(top < _icms_stop_limit, "Tautology");
1217 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1218 return _icms_stop_limit;
1219 }
1221 // The allocation will cross both the _start and _stop limits, so do the
1222 // stop notification also and return end().
1223 if (CMSTraceIncrementalMode) {
1224 space->print_on(gclog_or_tty);
1225 gclog_or_tty->stamp();
1226 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1227 ", new limit=" PTR_FORMAT
1228 " (" SIZE_FORMAT "%%)",
1229 top, space->end(),
1230 percent_of_space(space, space->end()));
1231 }
1232 ConcurrentMarkSweepThread::stop_icms();
1233 return space->end();
1234 }
1236 if (top <= _icms_stop_limit) {
1237 if (CMSTraceIncrementalMode) {
1238 space->print_on(gclog_or_tty);
1239 gclog_or_tty->stamp();
1240 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1241 ", new limit=" PTR_FORMAT
1242 " (" SIZE_FORMAT "%%)",
1243 top, space->end(),
1244 percent_of_space(space, space->end()));
1245 }
1246 ConcurrentMarkSweepThread::stop_icms();
1247 return space->end();
1248 }
1250 if (CMSTraceIncrementalMode) {
1251 space->print_on(gclog_or_tty);
1252 gclog_or_tty->stamp();
1253 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1254 ", new limit=" PTR_FORMAT,
1255 top, NULL);
1256 }
1257 }
1259 return NULL;
1260 }
1262 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1263 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1264 // allocate, copy and if necessary update promoinfo --
1265 // delegate to underlying space.
1266 assert_lock_strong(freelistLock());
1268 #ifndef PRODUCT
1269 if (Universe::heap()->promotion_should_fail()) {
1270 return NULL;
1271 }
1272 #endif // #ifndef PRODUCT
1274 oop res = _cmsSpace->promote(obj, obj_size);
1275 if (res == NULL) {
1276 // expand and retry
1277 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1278 expand(s*HeapWordSize, MinHeapDeltaBytes,
1279 CMSExpansionCause::_satisfy_promotion);
1280 // Since there's currently no next generation, we don't try to promote
1281 // into a more senior generation.
1282 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1283 "is made to pass on a possibly failing "
1284 "promotion to next generation");
1285 res = _cmsSpace->promote(obj, obj_size);
1286 }
1287 if (res != NULL) {
1288 // See comment in allocate() about when objects should
1289 // be allocated live.
1290 assert(obj->is_oop(), "Will dereference klass pointer below");
1291 collector()->promoted(false, // Not parallel
1292 (HeapWord*)res, obj->is_objArray(), obj_size);
1293 // promotion counters
1294 NOT_PRODUCT(
1295 _numObjectsPromoted++;
1296 _numWordsPromoted +=
1297 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1298 )
1299 }
1300 return res;
1301 }
1304 HeapWord*
1305 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1306 HeapWord* top,
1307 size_t word_sz)
1308 {
1309 return collector()->allocation_limit_reached(space, top, word_sz);
1310 }
1312 // IMPORTANT: Notes on object size recognition in CMS.
1313 // ---------------------------------------------------
1314 // A block of storage in the CMS generation is always in
1315 // one of three states. A free block (FREE), an allocated
1316 // object (OBJECT) whose size() method reports the correct size,
1317 // and an intermediate state (TRANSIENT) in which its size cannot
1318 // be accurately determined.
1319 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1320 // -----------------------------------------------------
1321 // FREE: klass_word & 1 == 1; mark_word holds block size
1322 //
1323 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1324 // obj->size() computes correct size
1325 // [Perm Gen objects needs to be "parsable" before they can be navigated]
1326 //
1327 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1328 //
1329 // STATE IDENTIFICATION: (64 bit+COOPS)
1330 // ------------------------------------
1331 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1332 //
1333 // OBJECT: klass_word installed; klass_word != 0;
1334 // obj->size() computes correct size
1335 // [Perm Gen comment above continues to hold]
1336 //
1337 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1338 //
1339 //
1340 // STATE TRANSITION DIAGRAM
1341 //
1342 // mut / parnew mut / parnew
1343 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1344 // ^ |
1345 // |------------------------ DEAD <------------------------------------|
1346 // sweep mut
1347 //
1348 // While a block is in TRANSIENT state its size cannot be determined
1349 // so readers will either need to come back later or stall until
1350 // the size can be determined. Note that for the case of direct
1351 // allocation, P-bits, when available, may be used to determine the
1352 // size of an object that may not yet have been initialized.
1354 // Things to support parallel young-gen collection.
1355 oop
1356 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1357 oop old, markOop m,
1358 size_t word_sz) {
1359 #ifndef PRODUCT
1360 if (Universe::heap()->promotion_should_fail()) {
1361 return NULL;
1362 }
1363 #endif // #ifndef PRODUCT
1365 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1366 PromotionInfo* promoInfo = &ps->promo;
1367 // if we are tracking promotions, then first ensure space for
1368 // promotion (including spooling space for saving header if necessary).
1369 // then allocate and copy, then track promoted info if needed.
1370 // When tracking (see PromotionInfo::track()), the mark word may
1371 // be displaced and in this case restoration of the mark word
1372 // occurs in the (oop_since_save_marks_)iterate phase.
1373 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1374 // Out of space for allocating spooling buffers;
1375 // try expanding and allocating spooling buffers.
1376 if (!expand_and_ensure_spooling_space(promoInfo)) {
1377 return NULL;
1378 }
1379 }
1380 assert(promoInfo->has_spooling_space(), "Control point invariant");
1381 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1382 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1383 if (obj_ptr == NULL) {
1384 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1385 if (obj_ptr == NULL) {
1386 return NULL;
1387 }
1388 }
1389 oop obj = oop(obj_ptr);
1390 OrderAccess::storestore();
1391 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1392 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
1393 // IMPORTANT: See note on object initialization for CMS above.
1394 // Otherwise, copy the object. Here we must be careful to insert the
1395 // klass pointer last, since this marks the block as an allocated object.
1396 // Except with compressed oops it's the mark word.
1397 HeapWord* old_ptr = (HeapWord*)old;
1398 // Restore the mark word copied above.
1399 obj->set_mark(m);
1400 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1401 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
1402 OrderAccess::storestore();
1404 if (UseCompressedOops) {
1405 // Copy gap missed by (aligned) header size calculation below
1406 obj->set_klass_gap(old->klass_gap());
1407 }
1408 if (word_sz > (size_t)oopDesc::header_size()) {
1409 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1410 obj_ptr + oopDesc::header_size(),
1411 word_sz - oopDesc::header_size());
1412 }
1414 // Now we can track the promoted object, if necessary. We take care
1415 // to delay the transition from uninitialized to full object
1416 // (i.e., insertion of klass pointer) until after, so that it
1417 // atomically becomes a promoted object.
1418 if (promoInfo->tracking()) {
1419 promoInfo->track((PromotedObject*)obj, old->klass());
1420 }
1421 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1422 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
1423 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1425 // Finally, install the klass pointer (this should be volatile).
1426 OrderAccess::storestore();
1427 obj->set_klass(old->klass());
1428 // We should now be able to calculate the right size for this object
1429 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1431 collector()->promoted(true, // parallel
1432 obj_ptr, old->is_objArray(), word_sz);
1434 NOT_PRODUCT(
1435 Atomic::inc_ptr(&_numObjectsPromoted);
1436 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1437 )
1439 return obj;
1440 }
1442 void
1443 ConcurrentMarkSweepGeneration::
1444 par_promote_alloc_undo(int thread_num,
1445 HeapWord* obj, size_t word_sz) {
1446 // CMS does not support promotion undo.
1447 ShouldNotReachHere();
1448 }
1450 void
1451 ConcurrentMarkSweepGeneration::
1452 par_promote_alloc_done(int thread_num) {
1453 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1454 ps->lab.retire(thread_num);
1455 }
1457 void
1458 ConcurrentMarkSweepGeneration::
1459 par_oop_since_save_marks_iterate_done(int thread_num) {
1460 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1461 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1462 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1463 }
1465 // XXXPERM
1466 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1467 size_t size,
1468 bool tlab)
1469 {
1470 // We allow a STW collection only if a full
1471 // collection was requested.
1472 return full || should_allocate(size, tlab); // FIX ME !!!
1473 // This and promotion failure handling are connected at the
1474 // hip and should be fixed by untying them.
1475 }
1477 bool CMSCollector::shouldConcurrentCollect() {
1478 if (_full_gc_requested) {
1479 if (Verbose && PrintGCDetails) {
1480 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1481 " gc request (or gc_locker)");
1482 }
1483 return true;
1484 }
1486 // For debugging purposes, change the type of collection.
1487 // If the rotation is not on the concurrent collection
1488 // type, don't start a concurrent collection.
1489 NOT_PRODUCT(
1490 if (RotateCMSCollectionTypes &&
1491 (_cmsGen->debug_collection_type() !=
1492 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1493 assert(_cmsGen->debug_collection_type() !=
1494 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1495 "Bad cms collection type");
1496 return false;
1497 }
1498 )
1500 FreelistLocker x(this);
1501 // ------------------------------------------------------------------
1502 // Print out lots of information which affects the initiation of
1503 // a collection.
1504 if (PrintCMSInitiationStatistics && stats().valid()) {
1505 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1506 gclog_or_tty->stamp();
1507 gclog_or_tty->print_cr("");
1508 stats().print_on(gclog_or_tty);
1509 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1510 stats().time_until_cms_gen_full());
1511 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1512 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1513 _cmsGen->contiguous_available());
1514 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1515 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1516 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1517 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1518 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1519 }
1520 // ------------------------------------------------------------------
1522 // If the estimated time to complete a cms collection (cms_duration())
1523 // is less than the estimated time remaining until the cms generation
1524 // is full, start a collection.
1525 if (!UseCMSInitiatingOccupancyOnly) {
1526 if (stats().valid()) {
1527 if (stats().time_until_cms_start() == 0.0) {
1528 return true;
1529 }
1530 } else {
1531 // We want to conservatively collect somewhat early in order
1532 // to try and "bootstrap" our CMS/promotion statistics;
1533 // this branch will not fire after the first successful CMS
1534 // collection because the stats should then be valid.
1535 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1536 if (Verbose && PrintGCDetails) {
1537 gclog_or_tty->print_cr(
1538 " CMSCollector: collect for bootstrapping statistics:"
1539 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1540 _bootstrap_occupancy);
1541 }
1542 return true;
1543 }
1544 }
1545 }
1547 // Otherwise, we start a collection cycle if either the perm gen or
1548 // old gen want a collection cycle started. Each may use
1549 // an appropriate criterion for making this decision.
1550 // XXX We need to make sure that the gen expansion
1551 // criterion dovetails well with this. XXX NEED TO FIX THIS
1552 if (_cmsGen->should_concurrent_collect()) {
1553 if (Verbose && PrintGCDetails) {
1554 gclog_or_tty->print_cr("CMS old gen initiated");
1555 }
1556 return true;
1557 }
1559 // We start a collection if we believe an incremental collection may fail;
1560 // this is not likely to be productive in practice because it's probably too
1561 // late anyway.
1562 GenCollectedHeap* gch = GenCollectedHeap::heap();
1563 assert(gch->collector_policy()->is_two_generation_policy(),
1564 "You may want to check the correctness of the following");
1565 if (gch->incremental_collection_will_fail()) {
1566 if (PrintGCDetails && Verbose) {
1567 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1568 }
1569 return true;
1570 }
1572 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1573 bool res = update_should_unload_classes();
1574 if (res) {
1575 if (Verbose && PrintGCDetails) {
1576 gclog_or_tty->print_cr("CMS perm gen initiated");
1577 }
1578 return true;
1579 }
1580 }
1581 return false;
1582 }
1584 // Clear _expansion_cause fields of constituent generations
1585 void CMSCollector::clear_expansion_cause() {
1586 _cmsGen->clear_expansion_cause();
1587 _permGen->clear_expansion_cause();
1588 }
1590 // We should be conservative in starting a collection cycle. To
1591 // start too eagerly runs the risk of collecting too often in the
1592 // extreme. To collect too rarely falls back on full collections,
1593 // which works, even if not optimum in terms of concurrent work.
1594 // As a work around for too eagerly collecting, use the flag
1595 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1596 // giving the user an easily understandable way of controlling the
1597 // collections.
1598 // We want to start a new collection cycle if any of the following
1599 // conditions hold:
1600 // . our current occupancy exceeds the configured initiating occupancy
1601 // for this generation, or
1602 // . we recently needed to expand this space and have not, since that
1603 // expansion, done a collection of this generation, or
1604 // . the underlying space believes that it may be a good idea to initiate
1605 // a concurrent collection (this may be based on criteria such as the
1606 // following: the space uses linear allocation and linear allocation is
1607 // going to fail, or there is believed to be excessive fragmentation in
1608 // the generation, etc... or ...
1609 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1610 // the case of the old generation, not the perm generation; see CR 6543076):
1611 // we may be approaching a point at which allocation requests may fail because
1612 // we will be out of sufficient free space given allocation rate estimates.]
1613 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1615 assert_lock_strong(freelistLock());
1616 if (occupancy() > initiating_occupancy()) {
1617 if (PrintGCDetails && Verbose) {
1618 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1619 short_name(), occupancy(), initiating_occupancy());
1620 }
1621 return true;
1622 }
1623 if (UseCMSInitiatingOccupancyOnly) {
1624 return false;
1625 }
1626 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1627 if (PrintGCDetails && Verbose) {
1628 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1629 short_name());
1630 }
1631 return true;
1632 }
1633 if (_cmsSpace->should_concurrent_collect()) {
1634 if (PrintGCDetails && Verbose) {
1635 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1636 short_name());
1637 }
1638 return true;
1639 }
1640 return false;
1641 }
1643 void ConcurrentMarkSweepGeneration::collect(bool full,
1644 bool clear_all_soft_refs,
1645 size_t size,
1646 bool tlab)
1647 {
1648 collector()->collect(full, clear_all_soft_refs, size, tlab);
1649 }
1651 void CMSCollector::collect(bool full,
1652 bool clear_all_soft_refs,
1653 size_t size,
1654 bool tlab)
1655 {
1656 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1657 // For debugging purposes skip the collection if the state
1658 // is not currently idle
1659 if (TraceCMSState) {
1660 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1661 Thread::current(), full, _collectorState);
1662 }
1663 return;
1664 }
1666 // The following "if" branch is present for defensive reasons.
1667 // In the current uses of this interface, it can be replaced with:
1668 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1669 // But I am not placing that assert here to allow future
1670 // generality in invoking this interface.
1671 if (GC_locker::is_active()) {
1672 // A consistency test for GC_locker
1673 assert(GC_locker::needs_gc(), "Should have been set already");
1674 // Skip this foreground collection, instead
1675 // expanding the heap if necessary.
1676 // Need the free list locks for the call to free() in compute_new_size()
1677 compute_new_size();
1678 return;
1679 }
1680 acquire_control_and_collect(full, clear_all_soft_refs);
1681 _full_gcs_since_conc_gc++;
1683 }
1685 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1686 GenCollectedHeap* gch = GenCollectedHeap::heap();
1687 unsigned int gc_count = gch->total_full_collections();
1688 if (gc_count == full_gc_count) {
1689 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1690 _full_gc_requested = true;
1691 CGC_lock->notify(); // nudge CMS thread
1692 }
1693 }
1696 // The foreground and background collectors need to coordinate in order
1697 // to make sure that they do not mutually interfere with CMS collections.
1698 // When a background collection is active,
1699 // the foreground collector may need to take over (preempt) and
1700 // synchronously complete an ongoing collection. Depending on the
1701 // frequency of the background collections and the heap usage
1702 // of the application, this preemption can be seldom or frequent.
1703 // There are only certain
1704 // points in the background collection that the "collection-baton"
1705 // can be passed to the foreground collector.
1706 //
1707 // The foreground collector will wait for the baton before
1708 // starting any part of the collection. The foreground collector
1709 // will only wait at one location.
1710 //
1711 // The background collector will yield the baton before starting a new
1712 // phase of the collection (e.g., before initial marking, marking from roots,
1713 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1714 // of the loop which switches the phases. The background collector does some
1715 // of the phases (initial mark, final re-mark) with the world stopped.
1716 // Because of locking involved in stopping the world,
1717 // the foreground collector should not block waiting for the background
1718 // collector when it is doing a stop-the-world phase. The background
1719 // collector will yield the baton at an additional point just before
1720 // it enters a stop-the-world phase. Once the world is stopped, the
1721 // background collector checks the phase of the collection. If the
1722 // phase has not changed, it proceeds with the collection. If the
1723 // phase has changed, it skips that phase of the collection. See
1724 // the comments on the use of the Heap_lock in collect_in_background().
1725 //
1726 // Variable used in baton passing.
1727 // _foregroundGCIsActive - Set to true by the foreground collector when
1728 // it wants the baton. The foreground clears it when it has finished
1729 // the collection.
1730 // _foregroundGCShouldWait - Set to true by the background collector
1731 // when it is running. The foreground collector waits while
1732 // _foregroundGCShouldWait is true.
1733 // CGC_lock - monitor used to protect access to the above variables
1734 // and to notify the foreground and background collectors.
1735 // _collectorState - current state of the CMS collection.
1736 //
1737 // The foreground collector
1738 // acquires the CGC_lock
1739 // sets _foregroundGCIsActive
1740 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1741 // various locks acquired in preparation for the collection
1742 // are released so as not to block the background collector
1743 // that is in the midst of a collection
1744 // proceeds with the collection
1745 // clears _foregroundGCIsActive
1746 // returns
1747 //
1748 // The background collector in a loop iterating on the phases of the
1749 // collection
1750 // acquires the CGC_lock
1751 // sets _foregroundGCShouldWait
1752 // if _foregroundGCIsActive is set
1753 // clears _foregroundGCShouldWait, notifies _CGC_lock
1754 // waits on _CGC_lock for _foregroundGCIsActive to become false
1755 // and exits the loop.
1756 // otherwise
1757 // proceed with that phase of the collection
1758 // if the phase is a stop-the-world phase,
1759 // yield the baton once more just before enqueueing
1760 // the stop-world CMS operation (executed by the VM thread).
1761 // returns after all phases of the collection are done
1762 //
1764 void CMSCollector::acquire_control_and_collect(bool full,
1765 bool clear_all_soft_refs) {
1766 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1767 assert(!Thread::current()->is_ConcurrentGC_thread(),
1768 "shouldn't try to acquire control from self!");
1770 // Start the protocol for acquiring control of the
1771 // collection from the background collector (aka CMS thread).
1772 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1773 "VM thread should have CMS token");
1774 // Remember the possibly interrupted state of an ongoing
1775 // concurrent collection
1776 CollectorState first_state = _collectorState;
1778 // Signal to a possibly ongoing concurrent collection that
1779 // we want to do a foreground collection.
1780 _foregroundGCIsActive = true;
1782 // Disable incremental mode during a foreground collection.
1783 ICMSDisabler icms_disabler;
1785 // release locks and wait for a notify from the background collector
1786 // releasing the locks in only necessary for phases which
1787 // do yields to improve the granularity of the collection.
1788 assert_lock_strong(bitMapLock());
1789 // We need to lock the Free list lock for the space that we are
1790 // currently collecting.
1791 assert(haveFreelistLocks(), "Must be holding free list locks");
1792 bitMapLock()->unlock();
1793 releaseFreelistLocks();
1794 {
1795 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1796 if (_foregroundGCShouldWait) {
1797 // We are going to be waiting for action for the CMS thread;
1798 // it had better not be gone (for instance at shutdown)!
1799 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1800 "CMS thread must be running");
1801 // Wait here until the background collector gives us the go-ahead
1802 ConcurrentMarkSweepThread::clear_CMS_flag(
1803 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1804 // Get a possibly blocked CMS thread going:
1805 // Note that we set _foregroundGCIsActive true above,
1806 // without protection of the CGC_lock.
1807 CGC_lock->notify();
1808 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1809 "Possible deadlock");
1810 while (_foregroundGCShouldWait) {
1811 // wait for notification
1812 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1813 // Possibility of delay/starvation here, since CMS token does
1814 // not know to give priority to VM thread? Actually, i think
1815 // there wouldn't be any delay/starvation, but the proof of
1816 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1817 }
1818 ConcurrentMarkSweepThread::set_CMS_flag(
1819 ConcurrentMarkSweepThread::CMS_vm_has_token);
1820 }
1821 }
1822 // The CMS_token is already held. Get back the other locks.
1823 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1824 "VM thread should have CMS token");
1825 getFreelistLocks();
1826 bitMapLock()->lock_without_safepoint_check();
1827 if (TraceCMSState) {
1828 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1829 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1830 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1831 }
1833 // Check if we need to do a compaction, or if not, whether
1834 // we need to start the mark-sweep from scratch.
1835 bool should_compact = false;
1836 bool should_start_over = false;
1837 decide_foreground_collection_type(clear_all_soft_refs,
1838 &should_compact, &should_start_over);
1840 NOT_PRODUCT(
1841 if (RotateCMSCollectionTypes) {
1842 if (_cmsGen->debug_collection_type() ==
1843 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1844 should_compact = true;
1845 } else if (_cmsGen->debug_collection_type() ==
1846 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1847 should_compact = false;
1848 }
1849 }
1850 )
1852 if (PrintGCDetails && first_state > Idling) {
1853 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1854 if (GCCause::is_user_requested_gc(cause) ||
1855 GCCause::is_serviceability_requested_gc(cause)) {
1856 gclog_or_tty->print(" (concurrent mode interrupted)");
1857 } else {
1858 gclog_or_tty->print(" (concurrent mode failure)");
1859 }
1860 }
1862 if (should_compact) {
1863 // If the collection is being acquired from the background
1864 // collector, there may be references on the discovered
1865 // references lists that have NULL referents (being those
1866 // that were concurrently cleared by a mutator) or
1867 // that are no longer active (having been enqueued concurrently
1868 // by the mutator).
1869 // Scrub the list of those references because Mark-Sweep-Compact
1870 // code assumes referents are not NULL and that all discovered
1871 // Reference objects are active.
1872 ref_processor()->clean_up_discovered_references();
1874 do_compaction_work(clear_all_soft_refs);
1876 // Has the GC time limit been exceeded?
1877 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1878 size_t max_eden_size = young_gen->max_capacity() -
1879 young_gen->to()->capacity() -
1880 young_gen->from()->capacity();
1881 GenCollectedHeap* gch = GenCollectedHeap::heap();
1882 GCCause::Cause gc_cause = gch->gc_cause();
1883 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1884 young_gen->eden()->used(),
1885 _cmsGen->max_capacity(),
1886 max_eden_size,
1887 full,
1888 gc_cause,
1889 gch->collector_policy());
1890 } else {
1891 do_mark_sweep_work(clear_all_soft_refs, first_state,
1892 should_start_over);
1893 }
1894 // Reset the expansion cause, now that we just completed
1895 // a collection cycle.
1896 clear_expansion_cause();
1897 _foregroundGCIsActive = false;
1898 return;
1899 }
1901 // Resize the perm generation and the tenured generation
1902 // after obtaining the free list locks for the
1903 // two generations.
1904 void CMSCollector::compute_new_size() {
1905 assert_locked_or_safepoint(Heap_lock);
1906 FreelistLocker z(this);
1907 _permGen->compute_new_size();
1908 _cmsGen->compute_new_size();
1909 }
1911 // A work method used by foreground collection to determine
1912 // what type of collection (compacting or not, continuing or fresh)
1913 // it should do.
1914 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1915 // and CMSCompactWhenClearAllSoftRefs the default in the future
1916 // and do away with the flags after a suitable period.
1917 void CMSCollector::decide_foreground_collection_type(
1918 bool clear_all_soft_refs, bool* should_compact,
1919 bool* should_start_over) {
1920 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1921 // flag is set, and we have either requested a System.gc() or
1922 // the number of full gc's since the last concurrent cycle
1923 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1924 // or if an incremental collection has failed
1925 GenCollectedHeap* gch = GenCollectedHeap::heap();
1926 assert(gch->collector_policy()->is_two_generation_policy(),
1927 "You may want to check the correctness of the following");
1928 // Inform cms gen if this was due to partial collection failing.
1929 // The CMS gen may use this fact to determine its expansion policy.
1930 if (gch->incremental_collection_will_fail()) {
1931 assert(!_cmsGen->incremental_collection_failed(),
1932 "Should have been noticed, reacted to and cleared");
1933 _cmsGen->set_incremental_collection_failed();
1934 }
1935 *should_compact =
1936 UseCMSCompactAtFullCollection &&
1937 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1938 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1939 gch->incremental_collection_will_fail());
1940 *should_start_over = false;
1941 if (clear_all_soft_refs && !*should_compact) {
1942 // We are about to do a last ditch collection attempt
1943 // so it would normally make sense to do a compaction
1944 // to reclaim as much space as possible.
1945 if (CMSCompactWhenClearAllSoftRefs) {
1946 // Default: The rationale is that in this case either
1947 // we are past the final marking phase, in which case
1948 // we'd have to start over, or so little has been done
1949 // that there's little point in saving that work. Compaction
1950 // appears to be the sensible choice in either case.
1951 *should_compact = true;
1952 } else {
1953 // We have been asked to clear all soft refs, but not to
1954 // compact. Make sure that we aren't past the final checkpoint
1955 // phase, for that is where we process soft refs. If we are already
1956 // past that phase, we'll need to redo the refs discovery phase and
1957 // if necessary clear soft refs that weren't previously
1958 // cleared. We do so by remembering the phase in which
1959 // we came in, and if we are past the refs processing
1960 // phase, we'll choose to just redo the mark-sweep
1961 // collection from scratch.
1962 if (_collectorState > FinalMarking) {
1963 // We are past the refs processing phase;
1964 // start over and do a fresh synchronous CMS cycle
1965 _collectorState = Resetting; // skip to reset to start new cycle
1966 reset(false /* == !asynch */);
1967 *should_start_over = true;
1968 } // else we can continue a possibly ongoing current cycle
1969 }
1970 }
1971 }
1973 // A work method used by the foreground collector to do
1974 // a mark-sweep-compact.
1975 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1976 GenCollectedHeap* gch = GenCollectedHeap::heap();
1977 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1978 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1979 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1980 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1981 }
1983 // Sample collection interval time and reset for collection pause.
1984 if (UseAdaptiveSizePolicy) {
1985 size_policy()->msc_collection_begin();
1986 }
1988 // Temporarily widen the span of the weak reference processing to
1989 // the entire heap.
1990 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1991 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1993 // Temporarily, clear the "is_alive_non_header" field of the
1994 // reference processor.
1995 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1997 // Temporarily make reference _processing_ single threaded (non-MT).
1998 ReferenceProcessorMTProcMutator z(ref_processor(), false);
2000 // Temporarily make refs discovery atomic
2001 ReferenceProcessorAtomicMutator w(ref_processor(), true);
2003 ref_processor()->set_enqueuing_is_done(false);
2004 ref_processor()->enable_discovery();
2005 ref_processor()->setup_policy(clear_all_soft_refs);
2006 // If an asynchronous collection finishes, the _modUnionTable is
2007 // all clear. If we are assuming the collection from an asynchronous
2008 // collection, clear the _modUnionTable.
2009 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2010 "_modUnionTable should be clear if the baton was not passed");
2011 _modUnionTable.clear_all();
2013 // We must adjust the allocation statistics being maintained
2014 // in the free list space. We do so by reading and clearing
2015 // the sweep timer and updating the block flux rate estimates below.
2016 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2017 if (_inter_sweep_timer.is_active()) {
2018 _inter_sweep_timer.stop();
2019 // Note that we do not use this sample to update the _inter_sweep_estimate.
2020 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2021 _inter_sweep_estimate.padded_average(),
2022 _intra_sweep_estimate.padded_average());
2023 }
2025 {
2026 TraceCMSMemoryManagerStats();
2027 }
2028 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2029 ref_processor(), clear_all_soft_refs);
2030 #ifdef ASSERT
2031 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2032 size_t free_size = cms_space->free();
2033 assert(free_size ==
2034 pointer_delta(cms_space->end(), cms_space->compaction_top())
2035 * HeapWordSize,
2036 "All the free space should be compacted into one chunk at top");
2037 assert(cms_space->dictionary()->totalChunkSize(
2038 debug_only(cms_space->freelistLock())) == 0 ||
2039 cms_space->totalSizeInIndexedFreeLists() == 0,
2040 "All the free space should be in a single chunk");
2041 size_t num = cms_space->totalCount();
2042 assert((free_size == 0 && num == 0) ||
2043 (free_size > 0 && (num == 1 || num == 2)),
2044 "There should be at most 2 free chunks after compaction");
2045 #endif // ASSERT
2046 _collectorState = Resetting;
2047 assert(_restart_addr == NULL,
2048 "Should have been NULL'd before baton was passed");
2049 reset(false /* == !asynch */);
2050 _cmsGen->reset_after_compaction();
2051 _concurrent_cycles_since_last_unload = 0;
2053 if (verifying() && !should_unload_classes()) {
2054 perm_gen_verify_bit_map()->clear_all();
2055 }
2057 // Clear any data recorded in the PLAB chunk arrays.
2058 if (_survivor_plab_array != NULL) {
2059 reset_survivor_plab_arrays();
2060 }
2062 // Adjust the per-size allocation stats for the next epoch.
2063 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2064 // Restart the "inter sweep timer" for the next epoch.
2065 _inter_sweep_timer.reset();
2066 _inter_sweep_timer.start();
2068 // Sample collection pause time and reset for collection interval.
2069 if (UseAdaptiveSizePolicy) {
2070 size_policy()->msc_collection_end(gch->gc_cause());
2071 }
2073 // For a mark-sweep-compact, compute_new_size() will be called
2074 // in the heap's do_collection() method.
2075 }
2077 // A work method used by the foreground collector to do
2078 // a mark-sweep, after taking over from a possibly on-going
2079 // concurrent mark-sweep collection.
2080 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2081 CollectorState first_state, bool should_start_over) {
2082 if (PrintGC && Verbose) {
2083 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2084 "collector with count %d",
2085 _full_gcs_since_conc_gc);
2086 }
2087 switch (_collectorState) {
2088 case Idling:
2089 if (first_state == Idling || should_start_over) {
2090 // The background GC was not active, or should
2091 // restarted from scratch; start the cycle.
2092 _collectorState = InitialMarking;
2093 }
2094 // If first_state was not Idling, then a background GC
2095 // was in progress and has now finished. No need to do it
2096 // again. Leave the state as Idling.
2097 break;
2098 case Precleaning:
2099 // In the foreground case don't do the precleaning since
2100 // it is not done concurrently and there is extra work
2101 // required.
2102 _collectorState = FinalMarking;
2103 }
2104 if (PrintGCDetails &&
2105 (_collectorState > Idling ||
2106 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2107 gclog_or_tty->print(" (concurrent mode failure)");
2108 }
2109 collect_in_foreground(clear_all_soft_refs);
2111 // For a mark-sweep, compute_new_size() will be called
2112 // in the heap's do_collection() method.
2113 }
2116 void CMSCollector::getFreelistLocks() const {
2117 // Get locks for all free lists in all generations that this
2118 // collector is responsible for
2119 _cmsGen->freelistLock()->lock_without_safepoint_check();
2120 _permGen->freelistLock()->lock_without_safepoint_check();
2121 }
2123 void CMSCollector::releaseFreelistLocks() const {
2124 // Release locks for all free lists in all generations that this
2125 // collector is responsible for
2126 _cmsGen->freelistLock()->unlock();
2127 _permGen->freelistLock()->unlock();
2128 }
2130 bool CMSCollector::haveFreelistLocks() const {
2131 // Check locks for all free lists in all generations that this
2132 // collector is responsible for
2133 assert_lock_strong(_cmsGen->freelistLock());
2134 assert_lock_strong(_permGen->freelistLock());
2135 PRODUCT_ONLY(ShouldNotReachHere());
2136 return true;
2137 }
2139 // A utility class that is used by the CMS collector to
2140 // temporarily "release" the foreground collector from its
2141 // usual obligation to wait for the background collector to
2142 // complete an ongoing phase before proceeding.
2143 class ReleaseForegroundGC: public StackObj {
2144 private:
2145 CMSCollector* _c;
2146 public:
2147 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2148 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2149 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2150 // allow a potentially blocked foreground collector to proceed
2151 _c->_foregroundGCShouldWait = false;
2152 if (_c->_foregroundGCIsActive) {
2153 CGC_lock->notify();
2154 }
2155 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2156 "Possible deadlock");
2157 }
2159 ~ReleaseForegroundGC() {
2160 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2161 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2162 _c->_foregroundGCShouldWait = true;
2163 }
2164 };
2166 // There are separate collect_in_background and collect_in_foreground because of
2167 // the different locking requirements of the background collector and the
2168 // foreground collector. There was originally an attempt to share
2169 // one "collect" method between the background collector and the foreground
2170 // collector but the if-then-else required made it cleaner to have
2171 // separate methods.
2172 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2173 assert(Thread::current()->is_ConcurrentGC_thread(),
2174 "A CMS asynchronous collection is only allowed on a CMS thread.");
2176 GenCollectedHeap* gch = GenCollectedHeap::heap();
2177 {
2178 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2179 MutexLockerEx hl(Heap_lock, safepoint_check);
2180 FreelistLocker fll(this);
2181 MutexLockerEx x(CGC_lock, safepoint_check);
2182 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2183 // The foreground collector is active or we're
2184 // not using asynchronous collections. Skip this
2185 // background collection.
2186 assert(!_foregroundGCShouldWait, "Should be clear");
2187 return;
2188 } else {
2189 assert(_collectorState == Idling, "Should be idling before start.");
2190 _collectorState = InitialMarking;
2191 // Reset the expansion cause, now that we are about to begin
2192 // a new cycle.
2193 clear_expansion_cause();
2194 }
2195 // Decide if we want to enable class unloading as part of the
2196 // ensuing concurrent GC cycle.
2197 update_should_unload_classes();
2198 _full_gc_requested = false; // acks all outstanding full gc requests
2199 // Signal that we are about to start a collection
2200 gch->increment_total_full_collections(); // ... starting a collection cycle
2201 _collection_count_start = gch->total_full_collections();
2202 }
2204 // Used for PrintGC
2205 size_t prev_used;
2206 if (PrintGC && Verbose) {
2207 prev_used = _cmsGen->used(); // XXXPERM
2208 }
2210 // The change of the collection state is normally done at this level;
2211 // the exceptions are phases that are executed while the world is
2212 // stopped. For those phases the change of state is done while the
2213 // world is stopped. For baton passing purposes this allows the
2214 // background collector to finish the phase and change state atomically.
2215 // The foreground collector cannot wait on a phase that is done
2216 // while the world is stopped because the foreground collector already
2217 // has the world stopped and would deadlock.
2218 while (_collectorState != Idling) {
2219 if (TraceCMSState) {
2220 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2221 Thread::current(), _collectorState);
2222 }
2223 // The foreground collector
2224 // holds the Heap_lock throughout its collection.
2225 // holds the CMS token (but not the lock)
2226 // except while it is waiting for the background collector to yield.
2227 //
2228 // The foreground collector should be blocked (not for long)
2229 // if the background collector is about to start a phase
2230 // executed with world stopped. If the background
2231 // collector has already started such a phase, the
2232 // foreground collector is blocked waiting for the
2233 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2234 // are executed in the VM thread.
2235 //
2236 // The locking order is
2237 // PendingListLock (PLL) -- if applicable (FinalMarking)
2238 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2239 // CMS token (claimed in
2240 // stop_world_and_do() -->
2241 // safepoint_synchronize() -->
2242 // CMSThread::synchronize())
2244 {
2245 // Check if the FG collector wants us to yield.
2246 CMSTokenSync x(true); // is cms thread
2247 if (waitForForegroundGC()) {
2248 // We yielded to a foreground GC, nothing more to be
2249 // done this round.
2250 assert(_foregroundGCShouldWait == false, "We set it to false in "
2251 "waitForForegroundGC()");
2252 if (TraceCMSState) {
2253 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2254 " exiting collection CMS state %d",
2255 Thread::current(), _collectorState);
2256 }
2257 return;
2258 } else {
2259 // The background collector can run but check to see if the
2260 // foreground collector has done a collection while the
2261 // background collector was waiting to get the CGC_lock
2262 // above. If yes, break so that _foregroundGCShouldWait
2263 // is cleared before returning.
2264 if (_collectorState == Idling) {
2265 break;
2266 }
2267 }
2268 }
2270 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2271 "should be waiting");
2273 switch (_collectorState) {
2274 case InitialMarking:
2275 {
2276 ReleaseForegroundGC x(this);
2277 stats().record_cms_begin();
2279 VM_CMS_Initial_Mark initial_mark_op(this);
2280 VMThread::execute(&initial_mark_op);
2281 }
2282 // The collector state may be any legal state at this point
2283 // since the background collector may have yielded to the
2284 // foreground collector.
2285 break;
2286 case Marking:
2287 // initial marking in checkpointRootsInitialWork has been completed
2288 if (markFromRoots(true)) { // we were successful
2289 assert(_collectorState == Precleaning, "Collector state should "
2290 "have changed");
2291 } else {
2292 assert(_foregroundGCIsActive, "Internal state inconsistency");
2293 }
2294 break;
2295 case Precleaning:
2296 if (UseAdaptiveSizePolicy) {
2297 size_policy()->concurrent_precleaning_begin();
2298 }
2299 // marking from roots in markFromRoots has been completed
2300 preclean();
2301 if (UseAdaptiveSizePolicy) {
2302 size_policy()->concurrent_precleaning_end();
2303 }
2304 assert(_collectorState == AbortablePreclean ||
2305 _collectorState == FinalMarking,
2306 "Collector state should have changed");
2307 break;
2308 case AbortablePreclean:
2309 if (UseAdaptiveSizePolicy) {
2310 size_policy()->concurrent_phases_resume();
2311 }
2312 abortable_preclean();
2313 if (UseAdaptiveSizePolicy) {
2314 size_policy()->concurrent_precleaning_end();
2315 }
2316 assert(_collectorState == FinalMarking, "Collector state should "
2317 "have changed");
2318 break;
2319 case FinalMarking:
2320 {
2321 ReleaseForegroundGC x(this);
2323 VM_CMS_Final_Remark final_remark_op(this);
2324 VMThread::execute(&final_remark_op);
2325 }
2326 assert(_foregroundGCShouldWait, "block post-condition");
2327 break;
2328 case Sweeping:
2329 if (UseAdaptiveSizePolicy) {
2330 size_policy()->concurrent_sweeping_begin();
2331 }
2332 // final marking in checkpointRootsFinal has been completed
2333 sweep(true);
2334 assert(_collectorState == Resizing, "Collector state change "
2335 "to Resizing must be done under the free_list_lock");
2336 _full_gcs_since_conc_gc = 0;
2338 // Stop the timers for adaptive size policy for the concurrent phases
2339 if (UseAdaptiveSizePolicy) {
2340 size_policy()->concurrent_sweeping_end();
2341 size_policy()->concurrent_phases_end(gch->gc_cause(),
2342 gch->prev_gen(_cmsGen)->capacity(),
2343 _cmsGen->free());
2344 }
2346 case Resizing: {
2347 // Sweeping has been completed...
2348 // At this point the background collection has completed.
2349 // Don't move the call to compute_new_size() down
2350 // into code that might be executed if the background
2351 // collection was preempted.
2352 {
2353 ReleaseForegroundGC x(this); // unblock FG collection
2354 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2355 CMSTokenSync z(true); // not strictly needed.
2356 if (_collectorState == Resizing) {
2357 compute_new_size();
2358 _collectorState = Resetting;
2359 } else {
2360 assert(_collectorState == Idling, "The state should only change"
2361 " because the foreground collector has finished the collection");
2362 }
2363 }
2364 break;
2365 }
2366 case Resetting:
2367 // CMS heap resizing has been completed
2368 reset(true);
2369 assert(_collectorState == Idling, "Collector state should "
2370 "have changed");
2371 stats().record_cms_end();
2372 // Don't move the concurrent_phases_end() and compute_new_size()
2373 // calls to here because a preempted background collection
2374 // has it's state set to "Resetting".
2375 break;
2376 case Idling:
2377 default:
2378 ShouldNotReachHere();
2379 break;
2380 }
2381 if (TraceCMSState) {
2382 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2383 Thread::current(), _collectorState);
2384 }
2385 assert(_foregroundGCShouldWait, "block post-condition");
2386 }
2388 // Should this be in gc_epilogue?
2389 collector_policy()->counters()->update_counters();
2391 {
2392 // Clear _foregroundGCShouldWait and, in the event that the
2393 // foreground collector is waiting, notify it, before
2394 // returning.
2395 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2396 _foregroundGCShouldWait = false;
2397 if (_foregroundGCIsActive) {
2398 CGC_lock->notify();
2399 }
2400 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2401 "Possible deadlock");
2402 }
2403 if (TraceCMSState) {
2404 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2405 " exiting collection CMS state %d",
2406 Thread::current(), _collectorState);
2407 }
2408 if (PrintGC && Verbose) {
2409 _cmsGen->print_heap_change(prev_used);
2410 }
2411 }
2413 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2414 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2415 "Foreground collector should be waiting, not executing");
2416 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2417 "may only be done by the VM Thread with the world stopped");
2418 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2419 "VM thread should have CMS token");
2421 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2422 true, gclog_or_tty);)
2423 if (UseAdaptiveSizePolicy) {
2424 size_policy()->ms_collection_begin();
2425 }
2426 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2428 HandleMark hm; // Discard invalid handles created during verification
2430 if (VerifyBeforeGC &&
2431 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2432 Universe::verify(true);
2433 }
2435 // Snapshot the soft reference policy to be used in this collection cycle.
2436 ref_processor()->setup_policy(clear_all_soft_refs);
2438 bool init_mark_was_synchronous = false; // until proven otherwise
2439 while (_collectorState != Idling) {
2440 if (TraceCMSState) {
2441 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2442 Thread::current(), _collectorState);
2443 }
2444 switch (_collectorState) {
2445 case InitialMarking:
2446 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2447 checkpointRootsInitial(false);
2448 assert(_collectorState == Marking, "Collector state should have changed"
2449 " within checkpointRootsInitial()");
2450 break;
2451 case Marking:
2452 // initial marking in checkpointRootsInitialWork has been completed
2453 if (VerifyDuringGC &&
2454 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2455 gclog_or_tty->print("Verify before initial mark: ");
2456 Universe::verify(true);
2457 }
2458 {
2459 bool res = markFromRoots(false);
2460 assert(res && _collectorState == FinalMarking, "Collector state should "
2461 "have changed");
2462 break;
2463 }
2464 case FinalMarking:
2465 if (VerifyDuringGC &&
2466 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2467 gclog_or_tty->print("Verify before re-mark: ");
2468 Universe::verify(true);
2469 }
2470 checkpointRootsFinal(false, clear_all_soft_refs,
2471 init_mark_was_synchronous);
2472 assert(_collectorState == Sweeping, "Collector state should not "
2473 "have changed within checkpointRootsFinal()");
2474 break;
2475 case Sweeping:
2476 // final marking in checkpointRootsFinal has been completed
2477 if (VerifyDuringGC &&
2478 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2479 gclog_or_tty->print("Verify before sweep: ");
2480 Universe::verify(true);
2481 }
2482 sweep(false);
2483 assert(_collectorState == Resizing, "Incorrect state");
2484 break;
2485 case Resizing: {
2486 // Sweeping has been completed; the actual resize in this case
2487 // is done separately; nothing to be done in this state.
2488 _collectorState = Resetting;
2489 break;
2490 }
2491 case Resetting:
2492 // The heap has been resized.
2493 if (VerifyDuringGC &&
2494 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2495 gclog_or_tty->print("Verify before reset: ");
2496 Universe::verify(true);
2497 }
2498 reset(false);
2499 assert(_collectorState == Idling, "Collector state should "
2500 "have changed");
2501 break;
2502 case Precleaning:
2503 case AbortablePreclean:
2504 // Elide the preclean phase
2505 _collectorState = FinalMarking;
2506 break;
2507 default:
2508 ShouldNotReachHere();
2509 }
2510 if (TraceCMSState) {
2511 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2512 Thread::current(), _collectorState);
2513 }
2514 }
2516 if (UseAdaptiveSizePolicy) {
2517 GenCollectedHeap* gch = GenCollectedHeap::heap();
2518 size_policy()->ms_collection_end(gch->gc_cause());
2519 }
2521 if (VerifyAfterGC &&
2522 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2523 Universe::verify(true);
2524 }
2525 if (TraceCMSState) {
2526 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2527 " exiting collection CMS state %d",
2528 Thread::current(), _collectorState);
2529 }
2530 }
2532 bool CMSCollector::waitForForegroundGC() {
2533 bool res = false;
2534 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2535 "CMS thread should have CMS token");
2536 // Block the foreground collector until the
2537 // background collectors decides whether to
2538 // yield.
2539 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2540 _foregroundGCShouldWait = true;
2541 if (_foregroundGCIsActive) {
2542 // The background collector yields to the
2543 // foreground collector and returns a value
2544 // indicating that it has yielded. The foreground
2545 // collector can proceed.
2546 res = true;
2547 _foregroundGCShouldWait = false;
2548 ConcurrentMarkSweepThread::clear_CMS_flag(
2549 ConcurrentMarkSweepThread::CMS_cms_has_token);
2550 ConcurrentMarkSweepThread::set_CMS_flag(
2551 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2552 // Get a possibly blocked foreground thread going
2553 CGC_lock->notify();
2554 if (TraceCMSState) {
2555 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2556 Thread::current(), _collectorState);
2557 }
2558 while (_foregroundGCIsActive) {
2559 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2560 }
2561 ConcurrentMarkSweepThread::set_CMS_flag(
2562 ConcurrentMarkSweepThread::CMS_cms_has_token);
2563 ConcurrentMarkSweepThread::clear_CMS_flag(
2564 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2565 }
2566 if (TraceCMSState) {
2567 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2568 Thread::current(), _collectorState);
2569 }
2570 return res;
2571 }
2573 // Because of the need to lock the free lists and other structures in
2574 // the collector, common to all the generations that the collector is
2575 // collecting, we need the gc_prologues of individual CMS generations
2576 // delegate to their collector. It may have been simpler had the
2577 // current infrastructure allowed one to call a prologue on a
2578 // collector. In the absence of that we have the generation's
2579 // prologue delegate to the collector, which delegates back
2580 // some "local" work to a worker method in the individual generations
2581 // that it's responsible for collecting, while itself doing any
2582 // work common to all generations it's responsible for. A similar
2583 // comment applies to the gc_epilogue()'s.
2584 // The role of the varaible _between_prologue_and_epilogue is to
2585 // enforce the invocation protocol.
2586 void CMSCollector::gc_prologue(bool full) {
2587 // Call gc_prologue_work() for each CMSGen and PermGen that
2588 // we are responsible for.
2590 // The following locking discipline assumes that we are only called
2591 // when the world is stopped.
2592 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2594 // The CMSCollector prologue must call the gc_prologues for the
2595 // "generations" (including PermGen if any) that it's responsible
2596 // for.
2598 assert( Thread::current()->is_VM_thread()
2599 || ( CMSScavengeBeforeRemark
2600 && Thread::current()->is_ConcurrentGC_thread()),
2601 "Incorrect thread type for prologue execution");
2603 if (_between_prologue_and_epilogue) {
2604 // We have already been invoked; this is a gc_prologue delegation
2605 // from yet another CMS generation that we are responsible for, just
2606 // ignore it since all relevant work has already been done.
2607 return;
2608 }
2610 // set a bit saying prologue has been called; cleared in epilogue
2611 _between_prologue_and_epilogue = true;
2612 // Claim locks for common data structures, then call gc_prologue_work()
2613 // for each CMSGen and PermGen that we are responsible for.
2615 getFreelistLocks(); // gets free list locks on constituent spaces
2616 bitMapLock()->lock_without_safepoint_check();
2618 // Should call gc_prologue_work() for all cms gens we are responsible for
2619 bool registerClosure = _collectorState >= Marking
2620 && _collectorState < Sweeping;
2621 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2622 &_modUnionClosurePar
2623 : &_modUnionClosure;
2624 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2625 _permGen->gc_prologue_work(full, registerClosure, muc);
2627 if (!full) {
2628 stats().record_gc0_begin();
2629 }
2630 }
2632 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2633 // Delegate to CMScollector which knows how to coordinate between
2634 // this and any other CMS generations that it is responsible for
2635 // collecting.
2636 collector()->gc_prologue(full);
2637 }
2639 // This is a "private" interface for use by this generation's CMSCollector.
2640 // Not to be called directly by any other entity (for instance,
2641 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2642 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2643 bool registerClosure, ModUnionClosure* modUnionClosure) {
2644 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2645 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2646 "Should be NULL");
2647 if (registerClosure) {
2648 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2649 }
2650 cmsSpace()->gc_prologue();
2651 // Clear stat counters
2652 NOT_PRODUCT(
2653 assert(_numObjectsPromoted == 0, "check");
2654 assert(_numWordsPromoted == 0, "check");
2655 if (Verbose && PrintGC) {
2656 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2657 SIZE_FORMAT" bytes concurrently",
2658 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2659 }
2660 _numObjectsAllocated = 0;
2661 _numWordsAllocated = 0;
2662 )
2663 }
2665 void CMSCollector::gc_epilogue(bool full) {
2666 // The following locking discipline assumes that we are only called
2667 // when the world is stopped.
2668 assert(SafepointSynchronize::is_at_safepoint(),
2669 "world is stopped assumption");
2671 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2672 // if linear allocation blocks need to be appropriately marked to allow the
2673 // the blocks to be parsable. We also check here whether we need to nudge the
2674 // CMS collector thread to start a new cycle (if it's not already active).
2675 assert( Thread::current()->is_VM_thread()
2676 || ( CMSScavengeBeforeRemark
2677 && Thread::current()->is_ConcurrentGC_thread()),
2678 "Incorrect thread type for epilogue execution");
2680 if (!_between_prologue_and_epilogue) {
2681 // We have already been invoked; this is a gc_epilogue delegation
2682 // from yet another CMS generation that we are responsible for, just
2683 // ignore it since all relevant work has already been done.
2684 return;
2685 }
2686 assert(haveFreelistLocks(), "must have freelist locks");
2687 assert_lock_strong(bitMapLock());
2689 _cmsGen->gc_epilogue_work(full);
2690 _permGen->gc_epilogue_work(full);
2692 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2693 // in case sampling was not already enabled, enable it
2694 _start_sampling = true;
2695 }
2696 // reset _eden_chunk_array so sampling starts afresh
2697 _eden_chunk_index = 0;
2699 size_t cms_used = _cmsGen->cmsSpace()->used();
2700 size_t perm_used = _permGen->cmsSpace()->used();
2702 // update performance counters - this uses a special version of
2703 // update_counters() that allows the utilization to be passed as a
2704 // parameter, avoiding multiple calls to used().
2705 //
2706 _cmsGen->update_counters(cms_used);
2707 _permGen->update_counters(perm_used);
2709 if (CMSIncrementalMode) {
2710 icms_update_allocation_limits();
2711 }
2713 bitMapLock()->unlock();
2714 releaseFreelistLocks();
2716 _between_prologue_and_epilogue = false; // ready for next cycle
2717 }
2719 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2720 collector()->gc_epilogue(full);
2722 // Also reset promotion tracking in par gc thread states.
2723 if (CollectedHeap::use_parallel_gc_threads()) {
2724 for (uint i = 0; i < ParallelGCThreads; i++) {
2725 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2726 }
2727 }
2728 }
2730 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2731 assert(!incremental_collection_failed(), "Should have been cleared");
2732 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2733 cmsSpace()->gc_epilogue();
2734 // Print stat counters
2735 NOT_PRODUCT(
2736 assert(_numObjectsAllocated == 0, "check");
2737 assert(_numWordsAllocated == 0, "check");
2738 if (Verbose && PrintGC) {
2739 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2740 SIZE_FORMAT" bytes",
2741 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2742 }
2743 _numObjectsPromoted = 0;
2744 _numWordsPromoted = 0;
2745 )
2747 if (PrintGC && Verbose) {
2748 // Call down the chain in contiguous_available needs the freelistLock
2749 // so print this out before releasing the freeListLock.
2750 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2751 contiguous_available());
2752 }
2753 }
2755 #ifndef PRODUCT
2756 bool CMSCollector::have_cms_token() {
2757 Thread* thr = Thread::current();
2758 if (thr->is_VM_thread()) {
2759 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2760 } else if (thr->is_ConcurrentGC_thread()) {
2761 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2762 } else if (thr->is_GC_task_thread()) {
2763 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2764 ParGCRareEvent_lock->owned_by_self();
2765 }
2766 return false;
2767 }
2768 #endif
2770 // Check reachability of the given heap address in CMS generation,
2771 // treating all other generations as roots.
2772 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2773 // We could "guarantee" below, rather than assert, but i'll
2774 // leave these as "asserts" so that an adventurous debugger
2775 // could try this in the product build provided some subset of
2776 // the conditions were met, provided they were intersted in the
2777 // results and knew that the computation below wouldn't interfere
2778 // with other concurrent computations mutating the structures
2779 // being read or written.
2780 assert(SafepointSynchronize::is_at_safepoint(),
2781 "Else mutations in object graph will make answer suspect");
2782 assert(have_cms_token(), "Should hold cms token");
2783 assert(haveFreelistLocks(), "must hold free list locks");
2784 assert_lock_strong(bitMapLock());
2786 // Clear the marking bit map array before starting, but, just
2787 // for kicks, first report if the given address is already marked
2788 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2789 _markBitMap.isMarked(addr) ? "" : " not");
2791 if (verify_after_remark()) {
2792 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2793 bool result = verification_mark_bm()->isMarked(addr);
2794 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2795 result ? "IS" : "is NOT");
2796 return result;
2797 } else {
2798 gclog_or_tty->print_cr("Could not compute result");
2799 return false;
2800 }
2801 }
2803 ////////////////////////////////////////////////////////
2804 // CMS Verification Support
2805 ////////////////////////////////////////////////////////
2806 // Following the remark phase, the following invariant
2807 // should hold -- each object in the CMS heap which is
2808 // marked in markBitMap() should be marked in the verification_mark_bm().
2810 class VerifyMarkedClosure: public BitMapClosure {
2811 CMSBitMap* _marks;
2812 bool _failed;
2814 public:
2815 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2817 bool do_bit(size_t offset) {
2818 HeapWord* addr = _marks->offsetToHeapWord(offset);
2819 if (!_marks->isMarked(addr)) {
2820 oop(addr)->print_on(gclog_or_tty);
2821 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2822 _failed = true;
2823 }
2824 return true;
2825 }
2827 bool failed() { return _failed; }
2828 };
2830 bool CMSCollector::verify_after_remark() {
2831 gclog_or_tty->print(" [Verifying CMS Marking... ");
2832 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2833 static bool init = false;
2835 assert(SafepointSynchronize::is_at_safepoint(),
2836 "Else mutations in object graph will make answer suspect");
2837 assert(have_cms_token(),
2838 "Else there may be mutual interference in use of "
2839 " verification data structures");
2840 assert(_collectorState > Marking && _collectorState <= Sweeping,
2841 "Else marking info checked here may be obsolete");
2842 assert(haveFreelistLocks(), "must hold free list locks");
2843 assert_lock_strong(bitMapLock());
2846 // Allocate marking bit map if not already allocated
2847 if (!init) { // first time
2848 if (!verification_mark_bm()->allocate(_span)) {
2849 return false;
2850 }
2851 init = true;
2852 }
2854 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2856 // Turn off refs discovery -- so we will be tracing through refs.
2857 // This is as intended, because by this time
2858 // GC must already have cleared any refs that need to be cleared,
2859 // and traced those that need to be marked; moreover,
2860 // the marking done here is not going to intefere in any
2861 // way with the marking information used by GC.
2862 NoRefDiscovery no_discovery(ref_processor());
2864 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2866 // Clear any marks from a previous round
2867 verification_mark_bm()->clear_all();
2868 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2869 verify_work_stacks_empty();
2871 GenCollectedHeap* gch = GenCollectedHeap::heap();
2872 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2873 // Update the saved marks which may affect the root scans.
2874 gch->save_marks();
2876 if (CMSRemarkVerifyVariant == 1) {
2877 // In this first variant of verification, we complete
2878 // all marking, then check if the new marks-verctor is
2879 // a subset of the CMS marks-vector.
2880 verify_after_remark_work_1();
2881 } else if (CMSRemarkVerifyVariant == 2) {
2882 // In this second variant of verification, we flag an error
2883 // (i.e. an object reachable in the new marks-vector not reachable
2884 // in the CMS marks-vector) immediately, also indicating the
2885 // identify of an object (A) that references the unmarked object (B) --
2886 // presumably, a mutation to A failed to be picked up by preclean/remark?
2887 verify_after_remark_work_2();
2888 } else {
2889 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2890 CMSRemarkVerifyVariant);
2891 }
2892 gclog_or_tty->print(" done] ");
2893 return true;
2894 }
2896 void CMSCollector::verify_after_remark_work_1() {
2897 ResourceMark rm;
2898 HandleMark hm;
2899 GenCollectedHeap* gch = GenCollectedHeap::heap();
2901 // Mark from roots one level into CMS
2902 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2903 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2905 gch->gen_process_strong_roots(_cmsGen->level(),
2906 true, // younger gens are roots
2907 true, // activate StrongRootsScope
2908 true, // collecting perm gen
2909 SharedHeap::ScanningOption(roots_scanning_options()),
2910 ¬Older,
2911 true, // walk code active on stacks
2912 NULL);
2914 // Now mark from the roots
2915 assert(_revisitStack.isEmpty(), "Should be empty");
2916 MarkFromRootsClosure markFromRootsClosure(this, _span,
2917 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2918 false /* don't yield */, true /* verifying */);
2919 assert(_restart_addr == NULL, "Expected pre-condition");
2920 verification_mark_bm()->iterate(&markFromRootsClosure);
2921 while (_restart_addr != NULL) {
2922 // Deal with stack overflow: by restarting at the indicated
2923 // address.
2924 HeapWord* ra = _restart_addr;
2925 markFromRootsClosure.reset(ra);
2926 _restart_addr = NULL;
2927 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2928 }
2929 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2930 verify_work_stacks_empty();
2931 // Should reset the revisit stack above, since no class tree
2932 // surgery is forthcoming.
2933 _revisitStack.reset(); // throwing away all contents
2935 // Marking completed -- now verify that each bit marked in
2936 // verification_mark_bm() is also marked in markBitMap(); flag all
2937 // errors by printing corresponding objects.
2938 VerifyMarkedClosure vcl(markBitMap());
2939 verification_mark_bm()->iterate(&vcl);
2940 if (vcl.failed()) {
2941 gclog_or_tty->print("Verification failed");
2942 Universe::heap()->print_on(gclog_or_tty);
2943 fatal("CMS: failed marking verification after remark");
2944 }
2945 }
2947 void CMSCollector::verify_after_remark_work_2() {
2948 ResourceMark rm;
2949 HandleMark hm;
2950 GenCollectedHeap* gch = GenCollectedHeap::heap();
2952 // Mark from roots one level into CMS
2953 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2954 markBitMap());
2955 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2956 gch->gen_process_strong_roots(_cmsGen->level(),
2957 true, // younger gens are roots
2958 true, // activate StrongRootsScope
2959 true, // collecting perm gen
2960 SharedHeap::ScanningOption(roots_scanning_options()),
2961 ¬Older,
2962 true, // walk code active on stacks
2963 NULL);
2965 // Now mark from the roots
2966 assert(_revisitStack.isEmpty(), "Should be empty");
2967 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2968 verification_mark_bm(), markBitMap(), verification_mark_stack());
2969 assert(_restart_addr == NULL, "Expected pre-condition");
2970 verification_mark_bm()->iterate(&markFromRootsClosure);
2971 while (_restart_addr != NULL) {
2972 // Deal with stack overflow: by restarting at the indicated
2973 // address.
2974 HeapWord* ra = _restart_addr;
2975 markFromRootsClosure.reset(ra);
2976 _restart_addr = NULL;
2977 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2978 }
2979 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2980 verify_work_stacks_empty();
2981 // Should reset the revisit stack above, since no class tree
2982 // surgery is forthcoming.
2983 _revisitStack.reset(); // throwing away all contents
2985 // Marking completed -- now verify that each bit marked in
2986 // verification_mark_bm() is also marked in markBitMap(); flag all
2987 // errors by printing corresponding objects.
2988 VerifyMarkedClosure vcl(markBitMap());
2989 verification_mark_bm()->iterate(&vcl);
2990 assert(!vcl.failed(), "Else verification above should not have succeeded");
2991 }
2993 void ConcurrentMarkSweepGeneration::save_marks() {
2994 // delegate to CMS space
2995 cmsSpace()->save_marks();
2996 for (uint i = 0; i < ParallelGCThreads; i++) {
2997 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2998 }
2999 }
3001 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3002 return cmsSpace()->no_allocs_since_save_marks();
3003 }
3005 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
3006 \
3007 void ConcurrentMarkSweepGeneration:: \
3008 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
3009 cl->set_generation(this); \
3010 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
3011 cl->reset_generation(); \
3012 save_marks(); \
3013 }
3015 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3017 void
3018 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
3019 {
3020 // Not currently implemented; need to do the following. -- ysr.
3021 // dld -- I think that is used for some sort of allocation profiler. So it
3022 // really means the objects allocated by the mutator since the last
3023 // GC. We could potentially implement this cheaply by recording only
3024 // the direct allocations in a side data structure.
3025 //
3026 // I think we probably ought not to be required to support these
3027 // iterations at any arbitrary point; I think there ought to be some
3028 // call to enable/disable allocation profiling in a generation/space,
3029 // and the iterator ought to return the objects allocated in the
3030 // gen/space since the enable call, or the last iterator call (which
3031 // will probably be at a GC.) That way, for gens like CM&S that would
3032 // require some extra data structure to support this, we only pay the
3033 // cost when it's in use...
3034 cmsSpace()->object_iterate_since_last_GC(blk);
3035 }
3037 void
3038 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3039 cl->set_generation(this);
3040 younger_refs_in_space_iterate(_cmsSpace, cl);
3041 cl->reset_generation();
3042 }
3044 void
3045 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
3046 if (freelistLock()->owned_by_self()) {
3047 Generation::oop_iterate(mr, cl);
3048 } else {
3049 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3050 Generation::oop_iterate(mr, cl);
3051 }
3052 }
3054 void
3055 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3056 if (freelistLock()->owned_by_self()) {
3057 Generation::oop_iterate(cl);
3058 } else {
3059 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3060 Generation::oop_iterate(cl);
3061 }
3062 }
3064 void
3065 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3066 if (freelistLock()->owned_by_self()) {
3067 Generation::object_iterate(cl);
3068 } else {
3069 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3070 Generation::object_iterate(cl);
3071 }
3072 }
3074 void
3075 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3076 if (freelistLock()->owned_by_self()) {
3077 Generation::safe_object_iterate(cl);
3078 } else {
3079 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3080 Generation::safe_object_iterate(cl);
3081 }
3082 }
3084 void
3085 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3086 }
3088 void
3089 ConcurrentMarkSweepGeneration::post_compact() {
3090 }
3092 void
3093 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3094 // Fix the linear allocation blocks to look like free blocks.
3096 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3097 // are not called when the heap is verified during universe initialization and
3098 // at vm shutdown.
3099 if (freelistLock()->owned_by_self()) {
3100 cmsSpace()->prepare_for_verify();
3101 } else {
3102 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3103 cmsSpace()->prepare_for_verify();
3104 }
3105 }
3107 void
3108 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3109 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3110 // are not called when the heap is verified during universe initialization and
3111 // at vm shutdown.
3112 if (freelistLock()->owned_by_self()) {
3113 cmsSpace()->verify(false /* ignored */);
3114 } else {
3115 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3116 cmsSpace()->verify(false /* ignored */);
3117 }
3118 }
3120 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3121 _cmsGen->verify(allow_dirty);
3122 _permGen->verify(allow_dirty);
3123 }
3125 #ifndef PRODUCT
3126 bool CMSCollector::overflow_list_is_empty() const {
3127 assert(_num_par_pushes >= 0, "Inconsistency");
3128 if (_overflow_list == NULL) {
3129 assert(_num_par_pushes == 0, "Inconsistency");
3130 }
3131 return _overflow_list == NULL;
3132 }
3134 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3135 // merely consolidate assertion checks that appear to occur together frequently.
3136 void CMSCollector::verify_work_stacks_empty() const {
3137 assert(_markStack.isEmpty(), "Marking stack should be empty");
3138 assert(overflow_list_is_empty(), "Overflow list should be empty");
3139 }
3141 void CMSCollector::verify_overflow_empty() const {
3142 assert(overflow_list_is_empty(), "Overflow list should be empty");
3143 assert(no_preserved_marks(), "No preserved marks");
3144 }
3145 #endif // PRODUCT
3147 // Decide if we want to enable class unloading as part of the
3148 // ensuing concurrent GC cycle. We will collect the perm gen and
3149 // unload classes if it's the case that:
3150 // (1) an explicit gc request has been made and the flag
3151 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3152 // (2) (a) class unloading is enabled at the command line, and
3153 // (b) (i) perm gen threshold has been crossed, or
3154 // (ii) old gen is getting really full, or
3155 // (iii) the previous N CMS collections did not collect the
3156 // perm gen
3157 // NOTE: Provided there is no change in the state of the heap between
3158 // calls to this method, it should have idempotent results. Moreover,
3159 // its results should be monotonically increasing (i.e. going from 0 to 1,
3160 // but not 1 to 0) between successive calls between which the heap was
3161 // not collected. For the implementation below, it must thus rely on
3162 // the property that concurrent_cycles_since_last_unload()
3163 // will not decrease unless a collection cycle happened and that
3164 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3165 // themselves also monotonic in that sense. See check_monotonicity()
3166 // below.
3167 bool CMSCollector::update_should_unload_classes() {
3168 _should_unload_classes = false;
3169 // Condition 1 above
3170 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3171 _should_unload_classes = true;
3172 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3173 // Disjuncts 2.b.(i,ii,iii) above
3174 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3175 CMSClassUnloadingMaxInterval)
3176 || _permGen->should_concurrent_collect()
3177 || _cmsGen->is_too_full();
3178 }
3179 return _should_unload_classes;
3180 }
3182 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3183 bool res = should_concurrent_collect();
3184 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3185 return res;
3186 }
3188 void CMSCollector::setup_cms_unloading_and_verification_state() {
3189 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3190 || VerifyBeforeExit;
3191 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3192 | SharedHeap::SO_CodeCache;
3194 if (should_unload_classes()) { // Should unload classes this cycle
3195 remove_root_scanning_option(rso); // Shrink the root set appropriately
3196 set_verifying(should_verify); // Set verification state for this cycle
3197 return; // Nothing else needs to be done at this time
3198 }
3200 // Not unloading classes this cycle
3201 assert(!should_unload_classes(), "Inconsitency!");
3202 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3203 // We were not verifying, or we _were_ unloading classes in the last cycle,
3204 // AND some verification options are enabled this cycle; in this case,
3205 // we must make sure that the deadness map is allocated if not already so,
3206 // and cleared (if already allocated previously --
3207 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3208 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3209 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3210 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3211 "permanent generation verification disabled");
3212 return; // Note that we leave verification disabled, so we'll retry this
3213 // allocation next cycle. We _could_ remember this failure
3214 // and skip further attempts and permanently disable verification
3215 // attempts if that is considered more desirable.
3216 }
3217 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3218 "_perm_gen_ver_bit_map inconsistency?");
3219 } else {
3220 perm_gen_verify_bit_map()->clear_all();
3221 }
3222 // Include symbols, strings and code cache elements to prevent their resurrection.
3223 add_root_scanning_option(rso);
3224 set_verifying(true);
3225 } else if (verifying() && !should_verify) {
3226 // We were verifying, but some verification flags got disabled.
3227 set_verifying(false);
3228 // Exclude symbols, strings and code cache elements from root scanning to
3229 // reduce IM and RM pauses.
3230 remove_root_scanning_option(rso);
3231 }
3232 }
3235 #ifndef PRODUCT
3236 HeapWord* CMSCollector::block_start(const void* p) const {
3237 const HeapWord* addr = (HeapWord*)p;
3238 if (_span.contains(p)) {
3239 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3240 return _cmsGen->cmsSpace()->block_start(p);
3241 } else {
3242 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3243 "Inconsistent _span?");
3244 return _permGen->cmsSpace()->block_start(p);
3245 }
3246 }
3247 return NULL;
3248 }
3249 #endif
3251 HeapWord*
3252 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3253 bool tlab,
3254 bool parallel) {
3255 CMSSynchronousYieldRequest yr;
3256 assert(!tlab, "Can't deal with TLAB allocation");
3257 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3258 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3259 CMSExpansionCause::_satisfy_allocation);
3260 if (GCExpandToAllocateDelayMillis > 0) {
3261 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3262 }
3263 return have_lock_and_allocate(word_size, tlab);
3264 }
3266 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3267 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3268 // to CardGeneration and share it...
3269 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3270 return CardGeneration::expand(bytes, expand_bytes);
3271 }
3273 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3274 CMSExpansionCause::Cause cause)
3275 {
3277 bool success = expand(bytes, expand_bytes);
3279 // remember why we expanded; this information is used
3280 // by shouldConcurrentCollect() when making decisions on whether to start
3281 // a new CMS cycle.
3282 if (success) {
3283 set_expansion_cause(cause);
3284 if (PrintGCDetails && Verbose) {
3285 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3286 CMSExpansionCause::to_string(cause));
3287 }
3288 }
3289 }
3291 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3292 HeapWord* res = NULL;
3293 MutexLocker x(ParGCRareEvent_lock);
3294 while (true) {
3295 // Expansion by some other thread might make alloc OK now:
3296 res = ps->lab.alloc(word_sz);
3297 if (res != NULL) return res;
3298 // If there's not enough expansion space available, give up.
3299 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3300 return NULL;
3301 }
3302 // Otherwise, we try expansion.
3303 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3304 CMSExpansionCause::_allocate_par_lab);
3305 // Now go around the loop and try alloc again;
3306 // A competing par_promote might beat us to the expansion space,
3307 // so we may go around the loop again if promotion fails agaion.
3308 if (GCExpandToAllocateDelayMillis > 0) {
3309 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3310 }
3311 }
3312 }
3315 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3316 PromotionInfo* promo) {
3317 MutexLocker x(ParGCRareEvent_lock);
3318 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3319 while (true) {
3320 // Expansion by some other thread might make alloc OK now:
3321 if (promo->ensure_spooling_space()) {
3322 assert(promo->has_spooling_space(),
3323 "Post-condition of successful ensure_spooling_space()");
3324 return true;
3325 }
3326 // If there's not enough expansion space available, give up.
3327 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3328 return false;
3329 }
3330 // Otherwise, we try expansion.
3331 expand(refill_size_bytes, MinHeapDeltaBytes,
3332 CMSExpansionCause::_allocate_par_spooling_space);
3333 // Now go around the loop and try alloc again;
3334 // A competing allocation might beat us to the expansion space,
3335 // so we may go around the loop again if allocation fails again.
3336 if (GCExpandToAllocateDelayMillis > 0) {
3337 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3338 }
3339 }
3340 }
3344 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3345 assert_locked_or_safepoint(Heap_lock);
3346 size_t size = ReservedSpace::page_align_size_down(bytes);
3347 if (size > 0) {
3348 shrink_by(size);
3349 }
3350 }
3352 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3353 assert_locked_or_safepoint(Heap_lock);
3354 bool result = _virtual_space.expand_by(bytes);
3355 if (result) {
3356 HeapWord* old_end = _cmsSpace->end();
3357 size_t new_word_size =
3358 heap_word_size(_virtual_space.committed_size());
3359 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3360 _bts->resize(new_word_size); // resize the block offset shared array
3361 Universe::heap()->barrier_set()->resize_covered_region(mr);
3362 // Hmmmm... why doesn't CFLS::set_end verify locking?
3363 // This is quite ugly; FIX ME XXX
3364 _cmsSpace->assert_locked(freelistLock());
3365 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3367 // update the space and generation capacity counters
3368 if (UsePerfData) {
3369 _space_counters->update_capacity();
3370 _gen_counters->update_all();
3371 }
3373 if (Verbose && PrintGC) {
3374 size_t new_mem_size = _virtual_space.committed_size();
3375 size_t old_mem_size = new_mem_size - bytes;
3376 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3377 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3378 }
3379 }
3380 return result;
3381 }
3383 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3384 assert_locked_or_safepoint(Heap_lock);
3385 bool success = true;
3386 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3387 if (remaining_bytes > 0) {
3388 success = grow_by(remaining_bytes);
3389 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3390 }
3391 return success;
3392 }
3394 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3395 assert_locked_or_safepoint(Heap_lock);
3396 assert_lock_strong(freelistLock());
3397 // XXX Fix when compaction is implemented.
3398 warning("Shrinking of CMS not yet implemented");
3399 return;
3400 }
3403 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3404 // phases.
3405 class CMSPhaseAccounting: public StackObj {
3406 public:
3407 CMSPhaseAccounting(CMSCollector *collector,
3408 const char *phase,
3409 bool print_cr = true);
3410 ~CMSPhaseAccounting();
3412 private:
3413 CMSCollector *_collector;
3414 const char *_phase;
3415 elapsedTimer _wallclock;
3416 bool _print_cr;
3418 public:
3419 // Not MT-safe; so do not pass around these StackObj's
3420 // where they may be accessed by other threads.
3421 jlong wallclock_millis() {
3422 assert(_wallclock.is_active(), "Wall clock should not stop");
3423 _wallclock.stop(); // to record time
3424 jlong ret = _wallclock.milliseconds();
3425 _wallclock.start(); // restart
3426 return ret;
3427 }
3428 };
3430 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3431 const char *phase,
3432 bool print_cr) :
3433 _collector(collector), _phase(phase), _print_cr(print_cr) {
3435 if (PrintCMSStatistics != 0) {
3436 _collector->resetYields();
3437 }
3438 if (PrintGCDetails && PrintGCTimeStamps) {
3439 gclog_or_tty->date_stamp(PrintGCDateStamps);
3440 gclog_or_tty->stamp();
3441 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3442 _collector->cmsGen()->short_name(), _phase);
3443 }
3444 _collector->resetTimer();
3445 _wallclock.start();
3446 _collector->startTimer();
3447 }
3449 CMSPhaseAccounting::~CMSPhaseAccounting() {
3450 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3451 _collector->stopTimer();
3452 _wallclock.stop();
3453 if (PrintGCDetails) {
3454 gclog_or_tty->date_stamp(PrintGCDateStamps);
3455 if (PrintGCTimeStamps) {
3456 gclog_or_tty->stamp();
3457 gclog_or_tty->print(": ");
3458 }
3459 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3460 _collector->cmsGen()->short_name(),
3461 _phase, _collector->timerValue(), _wallclock.seconds());
3462 if (_print_cr) {
3463 gclog_or_tty->print_cr("");
3464 }
3465 if (PrintCMSStatistics != 0) {
3466 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3467 _collector->yields());
3468 }
3469 }
3470 }
3472 // CMS work
3474 // Checkpoint the roots into this generation from outside
3475 // this generation. [Note this initial checkpoint need only
3476 // be approximate -- we'll do a catch up phase subsequently.]
3477 void CMSCollector::checkpointRootsInitial(bool asynch) {
3478 assert(_collectorState == InitialMarking, "Wrong collector state");
3479 check_correct_thread_executing();
3480 TraceCMSMemoryManagerStats tms(_collectorState);
3481 ReferenceProcessor* rp = ref_processor();
3482 SpecializationStats::clear();
3483 assert(_restart_addr == NULL, "Control point invariant");
3484 if (asynch) {
3485 // acquire locks for subsequent manipulations
3486 MutexLockerEx x(bitMapLock(),
3487 Mutex::_no_safepoint_check_flag);
3488 checkpointRootsInitialWork(asynch);
3489 rp->verify_no_references_recorded();
3490 rp->enable_discovery(); // enable ("weak") refs discovery
3491 _collectorState = Marking;
3492 } else {
3493 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3494 // which recognizes if we are a CMS generation, and doesn't try to turn on
3495 // discovery; verify that they aren't meddling.
3496 assert(!rp->discovery_is_atomic(),
3497 "incorrect setting of discovery predicate");
3498 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3499 "ref discovery for this generation kind");
3500 // already have locks
3501 checkpointRootsInitialWork(asynch);
3502 rp->enable_discovery(); // now enable ("weak") refs discovery
3503 _collectorState = Marking;
3504 }
3505 SpecializationStats::print();
3506 }
3508 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3509 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3510 assert(_collectorState == InitialMarking, "just checking");
3512 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3513 // precede our marking with a collection of all
3514 // younger generations to keep floating garbage to a minimum.
3515 // XXX: we won't do this for now -- it's an optimization to be done later.
3517 // already have locks
3518 assert_lock_strong(bitMapLock());
3519 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3521 // Setup the verification and class unloading state for this
3522 // CMS collection cycle.
3523 setup_cms_unloading_and_verification_state();
3525 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3526 PrintGCDetails && Verbose, true, gclog_or_tty);)
3527 if (UseAdaptiveSizePolicy) {
3528 size_policy()->checkpoint_roots_initial_begin();
3529 }
3531 // Reset all the PLAB chunk arrays if necessary.
3532 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3533 reset_survivor_plab_arrays();
3534 }
3536 ResourceMark rm;
3537 HandleMark hm;
3539 FalseClosure falseClosure;
3540 // In the case of a synchronous collection, we will elide the
3541 // remark step, so it's important to catch all the nmethod oops
3542 // in this step.
3543 // The final 'true' flag to gen_process_strong_roots will ensure this.
3544 // If 'async' is true, we can relax the nmethod tracing.
3545 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3546 GenCollectedHeap* gch = GenCollectedHeap::heap();
3548 verify_work_stacks_empty();
3549 verify_overflow_empty();
3551 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3552 // Update the saved marks which may affect the root scans.
3553 gch->save_marks();
3555 // weak reference processing has not started yet.
3556 ref_processor()->set_enqueuing_is_done(false);
3558 {
3559 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3560 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3561 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3562 gch->gen_process_strong_roots(_cmsGen->level(),
3563 true, // younger gens are roots
3564 true, // activate StrongRootsScope
3565 true, // collecting perm gen
3566 SharedHeap::ScanningOption(roots_scanning_options()),
3567 ¬Older,
3568 true, // walk all of code cache if (so & SO_CodeCache)
3569 NULL);
3570 }
3572 // Clear mod-union table; it will be dirtied in the prologue of
3573 // CMS generation per each younger generation collection.
3575 assert(_modUnionTable.isAllClear(),
3576 "Was cleared in most recent final checkpoint phase"
3577 " or no bits are set in the gc_prologue before the start of the next "
3578 "subsequent marking phase.");
3580 // Temporarily disabled, since pre/post-consumption closures don't
3581 // care about precleaned cards
3582 #if 0
3583 {
3584 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3585 (HeapWord*)_virtual_space.high());
3586 _ct->ct_bs()->preclean_dirty_cards(mr);
3587 }
3588 #endif
3590 // Save the end of the used_region of the constituent generations
3591 // to be used to limit the extent of sweep in each generation.
3592 save_sweep_limits();
3593 if (UseAdaptiveSizePolicy) {
3594 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3595 }
3596 verify_overflow_empty();
3597 }
3599 bool CMSCollector::markFromRoots(bool asynch) {
3600 // we might be tempted to assert that:
3601 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3602 // "inconsistent argument?");
3603 // However that wouldn't be right, because it's possible that
3604 // a safepoint is indeed in progress as a younger generation
3605 // stop-the-world GC happens even as we mark in this generation.
3606 assert(_collectorState == Marking, "inconsistent state?");
3607 check_correct_thread_executing();
3608 verify_overflow_empty();
3610 bool res;
3611 if (asynch) {
3613 // Start the timers for adaptive size policy for the concurrent phases
3614 // Do it here so that the foreground MS can use the concurrent
3615 // timer since a foreground MS might has the sweep done concurrently
3616 // or STW.
3617 if (UseAdaptiveSizePolicy) {
3618 size_policy()->concurrent_marking_begin();
3619 }
3621 // Weak ref discovery note: We may be discovering weak
3622 // refs in this generation concurrent (but interleaved) with
3623 // weak ref discovery by a younger generation collector.
3625 CMSTokenSyncWithLocks ts(true, bitMapLock());
3626 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3627 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3628 res = markFromRootsWork(asynch);
3629 if (res) {
3630 _collectorState = Precleaning;
3631 } else { // We failed and a foreground collection wants to take over
3632 assert(_foregroundGCIsActive, "internal state inconsistency");
3633 assert(_restart_addr == NULL, "foreground will restart from scratch");
3634 if (PrintGCDetails) {
3635 gclog_or_tty->print_cr("bailing out to foreground collection");
3636 }
3637 }
3638 if (UseAdaptiveSizePolicy) {
3639 size_policy()->concurrent_marking_end();
3640 }
3641 } else {
3642 assert(SafepointSynchronize::is_at_safepoint(),
3643 "inconsistent with asynch == false");
3644 if (UseAdaptiveSizePolicy) {
3645 size_policy()->ms_collection_marking_begin();
3646 }
3647 // already have locks
3648 res = markFromRootsWork(asynch);
3649 _collectorState = FinalMarking;
3650 if (UseAdaptiveSizePolicy) {
3651 GenCollectedHeap* gch = GenCollectedHeap::heap();
3652 size_policy()->ms_collection_marking_end(gch->gc_cause());
3653 }
3654 }
3655 verify_overflow_empty();
3656 return res;
3657 }
3659 bool CMSCollector::markFromRootsWork(bool asynch) {
3660 // iterate over marked bits in bit map, doing a full scan and mark
3661 // from these roots using the following algorithm:
3662 // . if oop is to the right of the current scan pointer,
3663 // mark corresponding bit (we'll process it later)
3664 // . else (oop is to left of current scan pointer)
3665 // push oop on marking stack
3666 // . drain the marking stack
3668 // Note that when we do a marking step we need to hold the
3669 // bit map lock -- recall that direct allocation (by mutators)
3670 // and promotion (by younger generation collectors) is also
3671 // marking the bit map. [the so-called allocate live policy.]
3672 // Because the implementation of bit map marking is not
3673 // robust wrt simultaneous marking of bits in the same word,
3674 // we need to make sure that there is no such interference
3675 // between concurrent such updates.
3677 // already have locks
3678 assert_lock_strong(bitMapLock());
3680 // Clear the revisit stack, just in case there are any
3681 // obsolete contents from a short-circuited previous CMS cycle.
3682 _revisitStack.reset();
3683 verify_work_stacks_empty();
3684 verify_overflow_empty();
3685 assert(_revisitStack.isEmpty(), "tabula rasa");
3686 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3687 bool result = false;
3688 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3689 result = do_marking_mt(asynch);
3690 } else {
3691 result = do_marking_st(asynch);
3692 }
3693 return result;
3694 }
3696 // Forward decl
3697 class CMSConcMarkingTask;
3699 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3700 CMSCollector* _collector;
3701 CMSConcMarkingTask* _task;
3702 public:
3703 virtual void yield();
3705 // "n_threads" is the number of threads to be terminated.
3706 // "queue_set" is a set of work queues of other threads.
3707 // "collector" is the CMS collector associated with this task terminator.
3708 // "yield" indicates whether we need the gang as a whole to yield.
3709 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3710 ParallelTaskTerminator(n_threads, queue_set),
3711 _collector(collector) { }
3713 void set_task(CMSConcMarkingTask* task) {
3714 _task = task;
3715 }
3716 };
3718 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3719 CMSConcMarkingTask* _task;
3720 public:
3721 bool should_exit_termination();
3722 void set_task(CMSConcMarkingTask* task) {
3723 _task = task;
3724 }
3725 };
3727 // MT Concurrent Marking Task
3728 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3729 CMSCollector* _collector;
3730 int _n_workers; // requested/desired # workers
3731 bool _asynch;
3732 bool _result;
3733 CompactibleFreeListSpace* _cms_space;
3734 CompactibleFreeListSpace* _perm_space;
3735 char _pad_front[64]; // padding to ...
3736 HeapWord* _global_finger; // ... avoid sharing cache line
3737 char _pad_back[64];
3738 HeapWord* _restart_addr;
3740 // Exposed here for yielding support
3741 Mutex* const _bit_map_lock;
3743 // The per thread work queues, available here for stealing
3744 OopTaskQueueSet* _task_queues;
3746 // Termination (and yielding) support
3747 CMSConcMarkingTerminator _term;
3748 CMSConcMarkingTerminatorTerminator _term_term;
3750 public:
3751 CMSConcMarkingTask(CMSCollector* collector,
3752 CompactibleFreeListSpace* cms_space,
3753 CompactibleFreeListSpace* perm_space,
3754 bool asynch,
3755 YieldingFlexibleWorkGang* workers,
3756 OopTaskQueueSet* task_queues):
3757 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3758 _collector(collector),
3759 _cms_space(cms_space),
3760 _perm_space(perm_space),
3761 _asynch(asynch), _n_workers(0), _result(true),
3762 _task_queues(task_queues),
3763 _term(_n_workers, task_queues, _collector),
3764 _bit_map_lock(collector->bitMapLock())
3765 {
3766 _requested_size = _n_workers;
3767 _term.set_task(this);
3768 _term_term.set_task(this);
3769 assert(_cms_space->bottom() < _perm_space->bottom(),
3770 "Finger incorrectly initialized below");
3771 _restart_addr = _global_finger = _cms_space->bottom();
3772 }
3775 OopTaskQueueSet* task_queues() { return _task_queues; }
3777 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3779 HeapWord** global_finger_addr() { return &_global_finger; }
3781 CMSConcMarkingTerminator* terminator() { return &_term; }
3783 virtual void set_for_termination(int active_workers) {
3784 terminator()->reset_for_reuse(active_workers);
3785 }
3787 void work(int i);
3788 bool should_yield() {
3789 return ConcurrentMarkSweepThread::should_yield()
3790 && !_collector->foregroundGCIsActive()
3791 && _asynch;
3792 }
3794 virtual void coordinator_yield(); // stuff done by coordinator
3795 bool result() { return _result; }
3797 void reset(HeapWord* ra) {
3798 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3799 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3800 assert(ra < _perm_space->end(), "ra too large");
3801 _restart_addr = _global_finger = ra;
3802 _term.reset_for_reuse();
3803 }
3805 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3806 OopTaskQueue* work_q);
3808 private:
3809 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3810 void do_work_steal(int i);
3811 void bump_global_finger(HeapWord* f);
3812 };
3814 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3815 assert(_task != NULL, "Error");
3816 return _task->yielding();
3817 // Note that we do not need the disjunct || _task->should_yield() above
3818 // because we want terminating threads to yield only if the task
3819 // is already in the midst of yielding, which happens only after at least one
3820 // thread has yielded.
3821 }
3823 void CMSConcMarkingTerminator::yield() {
3824 if (_task->should_yield()) {
3825 _task->yield();
3826 } else {
3827 ParallelTaskTerminator::yield();
3828 }
3829 }
3831 ////////////////////////////////////////////////////////////////
3832 // Concurrent Marking Algorithm Sketch
3833 ////////////////////////////////////////////////////////////////
3834 // Until all tasks exhausted (both spaces):
3835 // -- claim next available chunk
3836 // -- bump global finger via CAS
3837 // -- find first object that starts in this chunk
3838 // and start scanning bitmap from that position
3839 // -- scan marked objects for oops
3840 // -- CAS-mark target, and if successful:
3841 // . if target oop is above global finger (volatile read)
3842 // nothing to do
3843 // . if target oop is in chunk and above local finger
3844 // then nothing to do
3845 // . else push on work-queue
3846 // -- Deal with possible overflow issues:
3847 // . local work-queue overflow causes stuff to be pushed on
3848 // global (common) overflow queue
3849 // . always first empty local work queue
3850 // . then get a batch of oops from global work queue if any
3851 // . then do work stealing
3852 // -- When all tasks claimed (both spaces)
3853 // and local work queue empty,
3854 // then in a loop do:
3855 // . check global overflow stack; steal a batch of oops and trace
3856 // . try to steal from other threads oif GOS is empty
3857 // . if neither is available, offer termination
3858 // -- Terminate and return result
3859 //
3860 void CMSConcMarkingTask::work(int i) {
3861 elapsedTimer _timer;
3862 ResourceMark rm;
3863 HandleMark hm;
3865 DEBUG_ONLY(_collector->verify_overflow_empty();)
3867 // Before we begin work, our work queue should be empty
3868 assert(work_queue(i)->size() == 0, "Expected to be empty");
3869 // Scan the bitmap covering _cms_space, tracing through grey objects.
3870 _timer.start();
3871 do_scan_and_mark(i, _cms_space);
3872 _timer.stop();
3873 if (PrintCMSStatistics != 0) {
3874 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3875 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3876 }
3878 // ... do the same for the _perm_space
3879 _timer.reset();
3880 _timer.start();
3881 do_scan_and_mark(i, _perm_space);
3882 _timer.stop();
3883 if (PrintCMSStatistics != 0) {
3884 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3885 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3886 }
3888 // ... do work stealing
3889 _timer.reset();
3890 _timer.start();
3891 do_work_steal(i);
3892 _timer.stop();
3893 if (PrintCMSStatistics != 0) {
3894 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3895 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3896 }
3897 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3898 assert(work_queue(i)->size() == 0, "Should have been emptied");
3899 // Note that under the current task protocol, the
3900 // following assertion is true even of the spaces
3901 // expanded since the completion of the concurrent
3902 // marking. XXX This will likely change under a strict
3903 // ABORT semantics.
3904 assert(_global_finger > _cms_space->end() &&
3905 _global_finger >= _perm_space->end(),
3906 "All tasks have been completed");
3907 DEBUG_ONLY(_collector->verify_overflow_empty();)
3908 }
3910 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3911 HeapWord* read = _global_finger;
3912 HeapWord* cur = read;
3913 while (f > read) {
3914 cur = read;
3915 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3916 if (cur == read) {
3917 // our cas succeeded
3918 assert(_global_finger >= f, "protocol consistency");
3919 break;
3920 }
3921 }
3922 }
3924 // This is really inefficient, and should be redone by
3925 // using (not yet available) block-read and -write interfaces to the
3926 // stack and the work_queue. XXX FIX ME !!!
3927 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3928 OopTaskQueue* work_q) {
3929 // Fast lock-free check
3930 if (ovflw_stk->length() == 0) {
3931 return false;
3932 }
3933 assert(work_q->size() == 0, "Shouldn't steal");
3934 MutexLockerEx ml(ovflw_stk->par_lock(),
3935 Mutex::_no_safepoint_check_flag);
3936 // Grab up to 1/4 the size of the work queue
3937 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3938 (size_t)ParGCDesiredObjsFromOverflowList);
3939 num = MIN2(num, ovflw_stk->length());
3940 for (int i = (int) num; i > 0; i--) {
3941 oop cur = ovflw_stk->pop();
3942 assert(cur != NULL, "Counted wrong?");
3943 work_q->push(cur);
3944 }
3945 return num > 0;
3946 }
3948 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3949 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3950 int n_tasks = pst->n_tasks();
3951 // We allow that there may be no tasks to do here because
3952 // we are restarting after a stack overflow.
3953 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3954 int nth_task = 0;
3956 HeapWord* aligned_start = sp->bottom();
3957 if (sp->used_region().contains(_restart_addr)) {
3958 // Align down to a card boundary for the start of 0th task
3959 // for this space.
3960 aligned_start =
3961 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3962 CardTableModRefBS::card_size);
3963 }
3965 size_t chunk_size = sp->marking_task_size();
3966 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3967 // Having claimed the nth task in this space,
3968 // compute the chunk that it corresponds to:
3969 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3970 aligned_start + (nth_task+1)*chunk_size);
3971 // Try and bump the global finger via a CAS;
3972 // note that we need to do the global finger bump
3973 // _before_ taking the intersection below, because
3974 // the task corresponding to that region will be
3975 // deemed done even if the used_region() expands
3976 // because of allocation -- as it almost certainly will
3977 // during start-up while the threads yield in the
3978 // closure below.
3979 HeapWord* finger = span.end();
3980 bump_global_finger(finger); // atomically
3981 // There are null tasks here corresponding to chunks
3982 // beyond the "top" address of the space.
3983 span = span.intersection(sp->used_region());
3984 if (!span.is_empty()) { // Non-null task
3985 HeapWord* prev_obj;
3986 assert(!span.contains(_restart_addr) || nth_task == 0,
3987 "Inconsistency");
3988 if (nth_task == 0) {
3989 // For the 0th task, we'll not need to compute a block_start.
3990 if (span.contains(_restart_addr)) {
3991 // In the case of a restart because of stack overflow,
3992 // we might additionally skip a chunk prefix.
3993 prev_obj = _restart_addr;
3994 } else {
3995 prev_obj = span.start();
3996 }
3997 } else {
3998 // We want to skip the first object because
3999 // the protocol is to scan any object in its entirety
4000 // that _starts_ in this span; a fortiori, any
4001 // object starting in an earlier span is scanned
4002 // as part of an earlier claimed task.
4003 // Below we use the "careful" version of block_start
4004 // so we do not try to navigate uninitialized objects.
4005 prev_obj = sp->block_start_careful(span.start());
4006 // Below we use a variant of block_size that uses the
4007 // Printezis bits to avoid waiting for allocated
4008 // objects to become initialized/parsable.
4009 while (prev_obj < span.start()) {
4010 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4011 if (sz > 0) {
4012 prev_obj += sz;
4013 } else {
4014 // In this case we may end up doing a bit of redundant
4015 // scanning, but that appears unavoidable, short of
4016 // locking the free list locks; see bug 6324141.
4017 break;
4018 }
4019 }
4020 }
4021 if (prev_obj < span.end()) {
4022 MemRegion my_span = MemRegion(prev_obj, span.end());
4023 // Do the marking work within a non-empty span --
4024 // the last argument to the constructor indicates whether the
4025 // iteration should be incremental with periodic yields.
4026 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4027 &_collector->_markBitMap,
4028 work_queue(i),
4029 &_collector->_markStack,
4030 &_collector->_revisitStack,
4031 _asynch);
4032 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4033 } // else nothing to do for this task
4034 } // else nothing to do for this task
4035 }
4036 // We'd be tempted to assert here that since there are no
4037 // more tasks left to claim in this space, the global_finger
4038 // must exceed space->top() and a fortiori space->end(). However,
4039 // that would not quite be correct because the bumping of
4040 // global_finger occurs strictly after the claiming of a task,
4041 // so by the time we reach here the global finger may not yet
4042 // have been bumped up by the thread that claimed the last
4043 // task.
4044 pst->all_tasks_completed();
4045 }
4047 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
4048 private:
4049 CMSConcMarkingTask* _task;
4050 MemRegion _span;
4051 CMSBitMap* _bit_map;
4052 CMSMarkStack* _overflow_stack;
4053 OopTaskQueue* _work_queue;
4054 protected:
4055 DO_OOP_WORK_DEFN
4056 public:
4057 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4058 CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
4059 CMSMarkStack* revisit_stack):
4060 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
4061 _task(task),
4062 _span(collector->_span),
4063 _work_queue(work_queue),
4064 _bit_map(bit_map),
4065 _overflow_stack(overflow_stack)
4066 { }
4067 virtual void do_oop(oop* p);
4068 virtual void do_oop(narrowOop* p);
4069 void trim_queue(size_t max);
4070 void handle_stack_overflow(HeapWord* lost);
4071 void do_yield_check() {
4072 if (_task->should_yield()) {
4073 _task->yield();
4074 }
4075 }
4076 };
4078 // Grey object scanning during work stealing phase --
4079 // the salient assumption here is that any references
4080 // that are in these stolen objects being scanned must
4081 // already have been initialized (else they would not have
4082 // been published), so we do not need to check for
4083 // uninitialized objects before pushing here.
4084 void Par_ConcMarkingClosure::do_oop(oop obj) {
4085 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4086 HeapWord* addr = (HeapWord*)obj;
4087 // Check if oop points into the CMS generation
4088 // and is not marked
4089 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4090 // a white object ...
4091 // If we manage to "claim" the object, by being the
4092 // first thread to mark it, then we push it on our
4093 // marking stack
4094 if (_bit_map->par_mark(addr)) { // ... now grey
4095 // push on work queue (grey set)
4096 bool simulate_overflow = false;
4097 NOT_PRODUCT(
4098 if (CMSMarkStackOverflowALot &&
4099 _collector->simulate_overflow()) {
4100 // simulate a stack overflow
4101 simulate_overflow = true;
4102 }
4103 )
4104 if (simulate_overflow ||
4105 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4106 // stack overflow
4107 if (PrintCMSStatistics != 0) {
4108 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4109 SIZE_FORMAT, _overflow_stack->capacity());
4110 }
4111 // We cannot assert that the overflow stack is full because
4112 // it may have been emptied since.
4113 assert(simulate_overflow ||
4114 _work_queue->size() == _work_queue->max_elems(),
4115 "Else push should have succeeded");
4116 handle_stack_overflow(addr);
4117 }
4118 } // Else, some other thread got there first
4119 do_yield_check();
4120 }
4121 }
4123 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4124 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4126 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4127 while (_work_queue->size() > max) {
4128 oop new_oop;
4129 if (_work_queue->pop_local(new_oop)) {
4130 assert(new_oop->is_oop(), "Should be an oop");
4131 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4132 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4133 assert(new_oop->is_parsable(), "Should be parsable");
4134 new_oop->oop_iterate(this); // do_oop() above
4135 do_yield_check();
4136 }
4137 }
4138 }
4140 // Upon stack overflow, we discard (part of) the stack,
4141 // remembering the least address amongst those discarded
4142 // in CMSCollector's _restart_address.
4143 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4144 // We need to do this under a mutex to prevent other
4145 // workers from interfering with the work done below.
4146 MutexLockerEx ml(_overflow_stack->par_lock(),
4147 Mutex::_no_safepoint_check_flag);
4148 // Remember the least grey address discarded
4149 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4150 _collector->lower_restart_addr(ra);
4151 _overflow_stack->reset(); // discard stack contents
4152 _overflow_stack->expand(); // expand the stack if possible
4153 }
4156 void CMSConcMarkingTask::do_work_steal(int i) {
4157 OopTaskQueue* work_q = work_queue(i);
4158 oop obj_to_scan;
4159 CMSBitMap* bm = &(_collector->_markBitMap);
4160 CMSMarkStack* ovflw = &(_collector->_markStack);
4161 CMSMarkStack* revisit = &(_collector->_revisitStack);
4162 int* seed = _collector->hash_seed(i);
4163 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit);
4164 while (true) {
4165 cl.trim_queue(0);
4166 assert(work_q->size() == 0, "Should have been emptied above");
4167 if (get_work_from_overflow_stack(ovflw, work_q)) {
4168 // Can't assert below because the work obtained from the
4169 // overflow stack may already have been stolen from us.
4170 // assert(work_q->size() > 0, "Work from overflow stack");
4171 continue;
4172 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4173 assert(obj_to_scan->is_oop(), "Should be an oop");
4174 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4175 obj_to_scan->oop_iterate(&cl);
4176 } else if (terminator()->offer_termination(&_term_term)) {
4177 assert(work_q->size() == 0, "Impossible!");
4178 break;
4179 } else if (yielding() || should_yield()) {
4180 yield();
4181 }
4182 }
4183 }
4185 // This is run by the CMS (coordinator) thread.
4186 void CMSConcMarkingTask::coordinator_yield() {
4187 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4188 "CMS thread should hold CMS token");
4189 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4190 // First give up the locks, then yield, then re-lock
4191 // We should probably use a constructor/destructor idiom to
4192 // do this unlock/lock or modify the MutexUnlocker class to
4193 // serve our purpose. XXX
4194 assert_lock_strong(_bit_map_lock);
4195 _bit_map_lock->unlock();
4196 ConcurrentMarkSweepThread::desynchronize(true);
4197 ConcurrentMarkSweepThread::acknowledge_yield_request();
4198 _collector->stopTimer();
4199 if (PrintCMSStatistics != 0) {
4200 _collector->incrementYields();
4201 }
4202 _collector->icms_wait();
4204 // It is possible for whichever thread initiated the yield request
4205 // not to get a chance to wake up and take the bitmap lock between
4206 // this thread releasing it and reacquiring it. So, while the
4207 // should_yield() flag is on, let's sleep for a bit to give the
4208 // other thread a chance to wake up. The limit imposed on the number
4209 // of iterations is defensive, to avoid any unforseen circumstances
4210 // putting us into an infinite loop. Since it's always been this
4211 // (coordinator_yield()) method that was observed to cause the
4212 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4213 // which is by default non-zero. For the other seven methods that
4214 // also perform the yield operation, as are using a different
4215 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4216 // can enable the sleeping for those methods too, if necessary.
4217 // See 6442774.
4218 //
4219 // We really need to reconsider the synchronization between the GC
4220 // thread and the yield-requesting threads in the future and we
4221 // should really use wait/notify, which is the recommended
4222 // way of doing this type of interaction. Additionally, we should
4223 // consolidate the eight methods that do the yield operation and they
4224 // are almost identical into one for better maintenability and
4225 // readability. See 6445193.
4226 //
4227 // Tony 2006.06.29
4228 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4229 ConcurrentMarkSweepThread::should_yield() &&
4230 !CMSCollector::foregroundGCIsActive(); ++i) {
4231 os::sleep(Thread::current(), 1, false);
4232 ConcurrentMarkSweepThread::acknowledge_yield_request();
4233 }
4235 ConcurrentMarkSweepThread::synchronize(true);
4236 _bit_map_lock->lock_without_safepoint_check();
4237 _collector->startTimer();
4238 }
4240 bool CMSCollector::do_marking_mt(bool asynch) {
4241 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4242 // In the future this would be determined ergonomically, based
4243 // on #cpu's, # active mutator threads (and load), and mutation rate.
4244 int num_workers = ConcGCThreads;
4246 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4247 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4249 CMSConcMarkingTask tsk(this,
4250 cms_space,
4251 perm_space,
4252 asynch,
4253 conc_workers(),
4254 task_queues());
4256 // Since the actual number of workers we get may be different
4257 // from the number we requested above, do we need to do anything different
4258 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4259 // class?? XXX
4260 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4261 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4263 // Refs discovery is already non-atomic.
4264 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4265 // Mutate the Refs discovery so it is MT during the
4266 // multi-threaded marking phase.
4267 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4268 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4269 conc_workers()->start_task(&tsk);
4270 while (tsk.yielded()) {
4271 tsk.coordinator_yield();
4272 conc_workers()->continue_task(&tsk);
4273 }
4274 // If the task was aborted, _restart_addr will be non-NULL
4275 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4276 while (_restart_addr != NULL) {
4277 // XXX For now we do not make use of ABORTED state and have not
4278 // yet implemented the right abort semantics (even in the original
4279 // single-threaded CMS case). That needs some more investigation
4280 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4281 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4282 // If _restart_addr is non-NULL, a marking stack overflow
4283 // occurred; we need to do a fresh marking iteration from the
4284 // indicated restart address.
4285 if (_foregroundGCIsActive && asynch) {
4286 // We may be running into repeated stack overflows, having
4287 // reached the limit of the stack size, while making very
4288 // slow forward progress. It may be best to bail out and
4289 // let the foreground collector do its job.
4290 // Clear _restart_addr, so that foreground GC
4291 // works from scratch. This avoids the headache of
4292 // a "rescan" which would otherwise be needed because
4293 // of the dirty mod union table & card table.
4294 _restart_addr = NULL;
4295 return false;
4296 }
4297 // Adjust the task to restart from _restart_addr
4298 tsk.reset(_restart_addr);
4299 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4300 _restart_addr);
4301 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4302 _restart_addr);
4303 _restart_addr = NULL;
4304 // Get the workers going again
4305 conc_workers()->start_task(&tsk);
4306 while (tsk.yielded()) {
4307 tsk.coordinator_yield();
4308 conc_workers()->continue_task(&tsk);
4309 }
4310 }
4311 assert(tsk.completed(), "Inconsistency");
4312 assert(tsk.result() == true, "Inconsistency");
4313 return true;
4314 }
4316 bool CMSCollector::do_marking_st(bool asynch) {
4317 ResourceMark rm;
4318 HandleMark hm;
4320 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4321 &_markStack, &_revisitStack, CMSYield && asynch);
4322 // the last argument to iterate indicates whether the iteration
4323 // should be incremental with periodic yields.
4324 _markBitMap.iterate(&markFromRootsClosure);
4325 // If _restart_addr is non-NULL, a marking stack overflow
4326 // occurred; we need to do a fresh iteration from the
4327 // indicated restart address.
4328 while (_restart_addr != NULL) {
4329 if (_foregroundGCIsActive && asynch) {
4330 // We may be running into repeated stack overflows, having
4331 // reached the limit of the stack size, while making very
4332 // slow forward progress. It may be best to bail out and
4333 // let the foreground collector do its job.
4334 // Clear _restart_addr, so that foreground GC
4335 // works from scratch. This avoids the headache of
4336 // a "rescan" which would otherwise be needed because
4337 // of the dirty mod union table & card table.
4338 _restart_addr = NULL;
4339 return false; // indicating failure to complete marking
4340 }
4341 // Deal with stack overflow:
4342 // we restart marking from _restart_addr
4343 HeapWord* ra = _restart_addr;
4344 markFromRootsClosure.reset(ra);
4345 _restart_addr = NULL;
4346 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4347 }
4348 return true;
4349 }
4351 void CMSCollector::preclean() {
4352 check_correct_thread_executing();
4353 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4354 verify_work_stacks_empty();
4355 verify_overflow_empty();
4356 _abort_preclean = false;
4357 if (CMSPrecleaningEnabled) {
4358 // Precleaning is currently not MT but the reference processor
4359 // may be set for MT. Disable it temporarily here.
4360 ReferenceProcessor* rp = ref_processor();
4361 ReferenceProcessorMTProcMutator z(rp, false);
4362 _eden_chunk_index = 0;
4363 size_t used = get_eden_used();
4364 size_t capacity = get_eden_capacity();
4365 // Don't start sampling unless we will get sufficiently
4366 // many samples.
4367 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4368 * CMSScheduleRemarkEdenPenetration)) {
4369 _start_sampling = true;
4370 } else {
4371 _start_sampling = false;
4372 }
4373 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4374 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4375 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4376 }
4377 CMSTokenSync x(true); // is cms thread
4378 if (CMSPrecleaningEnabled) {
4379 sample_eden();
4380 _collectorState = AbortablePreclean;
4381 } else {
4382 _collectorState = FinalMarking;
4383 }
4384 verify_work_stacks_empty();
4385 verify_overflow_empty();
4386 }
4388 // Try and schedule the remark such that young gen
4389 // occupancy is CMSScheduleRemarkEdenPenetration %.
4390 void CMSCollector::abortable_preclean() {
4391 check_correct_thread_executing();
4392 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4393 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4395 // If Eden's current occupancy is below this threshold,
4396 // immediately schedule the remark; else preclean
4397 // past the next scavenge in an effort to
4398 // schedule the pause as described avove. By choosing
4399 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4400 // we will never do an actual abortable preclean cycle.
4401 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4402 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4403 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4404 // We need more smarts in the abortable preclean
4405 // loop below to deal with cases where allocation
4406 // in young gen is very very slow, and our precleaning
4407 // is running a losing race against a horde of
4408 // mutators intent on flooding us with CMS updates
4409 // (dirty cards).
4410 // One, admittedly dumb, strategy is to give up
4411 // after a certain number of abortable precleaning loops
4412 // or after a certain maximum time. We want to make
4413 // this smarter in the next iteration.
4414 // XXX FIX ME!!! YSR
4415 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4416 while (!(should_abort_preclean() ||
4417 ConcurrentMarkSweepThread::should_terminate())) {
4418 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4419 cumworkdone += workdone;
4420 loops++;
4421 // Voluntarily terminate abortable preclean phase if we have
4422 // been at it for too long.
4423 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4424 loops >= CMSMaxAbortablePrecleanLoops) {
4425 if (PrintGCDetails) {
4426 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4427 }
4428 break;
4429 }
4430 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4431 if (PrintGCDetails) {
4432 gclog_or_tty->print(" CMS: abort preclean due to time ");
4433 }
4434 break;
4435 }
4436 // If we are doing little work each iteration, we should
4437 // take a short break.
4438 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4439 // Sleep for some time, waiting for work to accumulate
4440 stopTimer();
4441 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4442 startTimer();
4443 waited++;
4444 }
4445 }
4446 if (PrintCMSStatistics > 0) {
4447 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4448 loops, waited, cumworkdone);
4449 }
4450 }
4451 CMSTokenSync x(true); // is cms thread
4452 if (_collectorState != Idling) {
4453 assert(_collectorState == AbortablePreclean,
4454 "Spontaneous state transition?");
4455 _collectorState = FinalMarking;
4456 } // Else, a foreground collection completed this CMS cycle.
4457 return;
4458 }
4460 // Respond to an Eden sampling opportunity
4461 void CMSCollector::sample_eden() {
4462 // Make sure a young gc cannot sneak in between our
4463 // reading and recording of a sample.
4464 assert(Thread::current()->is_ConcurrentGC_thread(),
4465 "Only the cms thread may collect Eden samples");
4466 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4467 "Should collect samples while holding CMS token");
4468 if (!_start_sampling) {
4469 return;
4470 }
4471 if (_eden_chunk_array) {
4472 if (_eden_chunk_index < _eden_chunk_capacity) {
4473 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4474 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4475 "Unexpected state of Eden");
4476 // We'd like to check that what we just sampled is an oop-start address;
4477 // however, we cannot do that here since the object may not yet have been
4478 // initialized. So we'll instead do the check when we _use_ this sample
4479 // later.
4480 if (_eden_chunk_index == 0 ||
4481 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4482 _eden_chunk_array[_eden_chunk_index-1])
4483 >= CMSSamplingGrain)) {
4484 _eden_chunk_index++; // commit sample
4485 }
4486 }
4487 }
4488 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4489 size_t used = get_eden_used();
4490 size_t capacity = get_eden_capacity();
4491 assert(used <= capacity, "Unexpected state of Eden");
4492 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4493 _abort_preclean = true;
4494 }
4495 }
4496 }
4499 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4500 assert(_collectorState == Precleaning ||
4501 _collectorState == AbortablePreclean, "incorrect state");
4502 ResourceMark rm;
4503 HandleMark hm;
4504 // Do one pass of scrubbing the discovered reference lists
4505 // to remove any reference objects with strongly-reachable
4506 // referents.
4507 if (clean_refs) {
4508 ReferenceProcessor* rp = ref_processor();
4509 CMSPrecleanRefsYieldClosure yield_cl(this);
4510 assert(rp->span().equals(_span), "Spans should be equal");
4511 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4512 &_markStack, &_revisitStack,
4513 true /* preclean */);
4514 CMSDrainMarkingStackClosure complete_trace(this,
4515 _span, &_markBitMap, &_markStack,
4516 &keep_alive, true /* preclean */);
4518 // We don't want this step to interfere with a young
4519 // collection because we don't want to take CPU
4520 // or memory bandwidth away from the young GC threads
4521 // (which may be as many as there are CPUs).
4522 // Note that we don't need to protect ourselves from
4523 // interference with mutators because they can't
4524 // manipulate the discovered reference lists nor affect
4525 // the computed reachability of the referents, the
4526 // only properties manipulated by the precleaning
4527 // of these reference lists.
4528 stopTimer();
4529 CMSTokenSyncWithLocks x(true /* is cms thread */,
4530 bitMapLock());
4531 startTimer();
4532 sample_eden();
4534 // The following will yield to allow foreground
4535 // collection to proceed promptly. XXX YSR:
4536 // The code in this method may need further
4537 // tweaking for better performance and some restructuring
4538 // for cleaner interfaces.
4539 rp->preclean_discovered_references(
4540 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4541 &yield_cl, should_unload_classes());
4542 }
4544 if (clean_survivor) { // preclean the active survivor space(s)
4545 assert(_young_gen->kind() == Generation::DefNew ||
4546 _young_gen->kind() == Generation::ParNew ||
4547 _young_gen->kind() == Generation::ASParNew,
4548 "incorrect type for cast");
4549 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4550 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4551 &_markBitMap, &_modUnionTable,
4552 &_markStack, &_revisitStack,
4553 true /* precleaning phase */);
4554 stopTimer();
4555 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4556 bitMapLock());
4557 startTimer();
4558 unsigned int before_count =
4559 GenCollectedHeap::heap()->total_collections();
4560 SurvivorSpacePrecleanClosure
4561 sss_cl(this, _span, &_markBitMap, &_markStack,
4562 &pam_cl, before_count, CMSYield);
4563 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4564 dng->from()->object_iterate_careful(&sss_cl);
4565 dng->to()->object_iterate_careful(&sss_cl);
4566 }
4567 MarkRefsIntoAndScanClosure
4568 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4569 &_markStack, &_revisitStack, this, CMSYield,
4570 true /* precleaning phase */);
4571 // CAUTION: The following closure has persistent state that may need to
4572 // be reset upon a decrease in the sequence of addresses it
4573 // processes.
4574 ScanMarkedObjectsAgainCarefullyClosure
4575 smoac_cl(this, _span,
4576 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4578 // Preclean dirty cards in ModUnionTable and CardTable using
4579 // appropriate convergence criterion;
4580 // repeat CMSPrecleanIter times unless we find that
4581 // we are losing.
4582 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4583 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4584 "Bad convergence multiplier");
4585 assert(CMSPrecleanThreshold >= 100,
4586 "Unreasonably low CMSPrecleanThreshold");
4588 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4589 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4590 numIter < CMSPrecleanIter;
4591 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4592 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4593 if (CMSPermGenPrecleaningEnabled) {
4594 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4595 }
4596 if (Verbose && PrintGCDetails) {
4597 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4598 }
4599 // Either there are very few dirty cards, so re-mark
4600 // pause will be small anyway, or our pre-cleaning isn't
4601 // that much faster than the rate at which cards are being
4602 // dirtied, so we might as well stop and re-mark since
4603 // precleaning won't improve our re-mark time by much.
4604 if (curNumCards <= CMSPrecleanThreshold ||
4605 (numIter > 0 &&
4606 (curNumCards * CMSPrecleanDenominator >
4607 lastNumCards * CMSPrecleanNumerator))) {
4608 numIter++;
4609 cumNumCards += curNumCards;
4610 break;
4611 }
4612 }
4613 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4614 if (CMSPermGenPrecleaningEnabled) {
4615 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4616 }
4617 cumNumCards += curNumCards;
4618 if (PrintGCDetails && PrintCMSStatistics != 0) {
4619 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4620 curNumCards, cumNumCards, numIter);
4621 }
4622 return cumNumCards; // as a measure of useful work done
4623 }
4625 // PRECLEANING NOTES:
4626 // Precleaning involves:
4627 // . reading the bits of the modUnionTable and clearing the set bits.
4628 // . For the cards corresponding to the set bits, we scan the
4629 // objects on those cards. This means we need the free_list_lock
4630 // so that we can safely iterate over the CMS space when scanning
4631 // for oops.
4632 // . When we scan the objects, we'll be both reading and setting
4633 // marks in the marking bit map, so we'll need the marking bit map.
4634 // . For protecting _collector_state transitions, we take the CGC_lock.
4635 // Note that any races in the reading of of card table entries by the
4636 // CMS thread on the one hand and the clearing of those entries by the
4637 // VM thread or the setting of those entries by the mutator threads on the
4638 // other are quite benign. However, for efficiency it makes sense to keep
4639 // the VM thread from racing with the CMS thread while the latter is
4640 // dirty card info to the modUnionTable. We therefore also use the
4641 // CGC_lock to protect the reading of the card table and the mod union
4642 // table by the CM thread.
4643 // . We run concurrently with mutator updates, so scanning
4644 // needs to be done carefully -- we should not try to scan
4645 // potentially uninitialized objects.
4646 //
4647 // Locking strategy: While holding the CGC_lock, we scan over and
4648 // reset a maximal dirty range of the mod union / card tables, then lock
4649 // the free_list_lock and bitmap lock to do a full marking, then
4650 // release these locks; and repeat the cycle. This allows for a
4651 // certain amount of fairness in the sharing of these locks between
4652 // the CMS collector on the one hand, and the VM thread and the
4653 // mutators on the other.
4655 // NOTE: preclean_mod_union_table() and preclean_card_table()
4656 // further below are largely identical; if you need to modify
4657 // one of these methods, please check the other method too.
4659 size_t CMSCollector::preclean_mod_union_table(
4660 ConcurrentMarkSweepGeneration* gen,
4661 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4662 verify_work_stacks_empty();
4663 verify_overflow_empty();
4665 // Turn off checking for this method but turn it back on
4666 // selectively. There are yield points in this method
4667 // but it is difficult to turn the checking off just around
4668 // the yield points. It is simpler to selectively turn
4669 // it on.
4670 DEBUG_ONLY(RememberKlassesChecker mux(false);)
4672 // strategy: starting with the first card, accumulate contiguous
4673 // ranges of dirty cards; clear these cards, then scan the region
4674 // covered by these cards.
4676 // Since all of the MUT is committed ahead, we can just use
4677 // that, in case the generations expand while we are precleaning.
4678 // It might also be fine to just use the committed part of the
4679 // generation, but we might potentially miss cards when the
4680 // generation is rapidly expanding while we are in the midst
4681 // of precleaning.
4682 HeapWord* startAddr = gen->reserved().start();
4683 HeapWord* endAddr = gen->reserved().end();
4685 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4687 size_t numDirtyCards, cumNumDirtyCards;
4688 HeapWord *nextAddr, *lastAddr;
4689 for (cumNumDirtyCards = numDirtyCards = 0,
4690 nextAddr = lastAddr = startAddr;
4691 nextAddr < endAddr;
4692 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4694 ResourceMark rm;
4695 HandleMark hm;
4697 MemRegion dirtyRegion;
4698 {
4699 stopTimer();
4700 // Potential yield point
4701 CMSTokenSync ts(true);
4702 startTimer();
4703 sample_eden();
4704 // Get dirty region starting at nextOffset (inclusive),
4705 // simultaneously clearing it.
4706 dirtyRegion =
4707 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4708 assert(dirtyRegion.start() >= nextAddr,
4709 "returned region inconsistent?");
4710 }
4711 // Remember where the next search should begin.
4712 // The returned region (if non-empty) is a right open interval,
4713 // so lastOffset is obtained from the right end of that
4714 // interval.
4715 lastAddr = dirtyRegion.end();
4716 // Should do something more transparent and less hacky XXX
4717 numDirtyCards =
4718 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4720 // We'll scan the cards in the dirty region (with periodic
4721 // yields for foreground GC as needed).
4722 if (!dirtyRegion.is_empty()) {
4723 assert(numDirtyCards > 0, "consistency check");
4724 HeapWord* stop_point = NULL;
4725 stopTimer();
4726 // Potential yield point
4727 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4728 bitMapLock());
4729 startTimer();
4730 {
4731 verify_work_stacks_empty();
4732 verify_overflow_empty();
4733 sample_eden();
4734 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4735 stop_point =
4736 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4737 }
4738 if (stop_point != NULL) {
4739 // The careful iteration stopped early either because it found an
4740 // uninitialized object, or because we were in the midst of an
4741 // "abortable preclean", which should now be aborted. Redirty
4742 // the bits corresponding to the partially-scanned or unscanned
4743 // cards. We'll either restart at the next block boundary or
4744 // abort the preclean.
4745 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4746 (_collectorState == AbortablePreclean && should_abort_preclean()),
4747 "Unparsable objects should only be in perm gen.");
4748 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4749 if (should_abort_preclean()) {
4750 break; // out of preclean loop
4751 } else {
4752 // Compute the next address at which preclean should pick up;
4753 // might need bitMapLock in order to read P-bits.
4754 lastAddr = next_card_start_after_block(stop_point);
4755 }
4756 }
4757 } else {
4758 assert(lastAddr == endAddr, "consistency check");
4759 assert(numDirtyCards == 0, "consistency check");
4760 break;
4761 }
4762 }
4763 verify_work_stacks_empty();
4764 verify_overflow_empty();
4765 return cumNumDirtyCards;
4766 }
4768 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4769 // below are largely identical; if you need to modify
4770 // one of these methods, please check the other method too.
4772 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4773 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4774 // strategy: it's similar to precleamModUnionTable above, in that
4775 // we accumulate contiguous ranges of dirty cards, mark these cards
4776 // precleaned, then scan the region covered by these cards.
4777 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4778 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4780 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4782 size_t numDirtyCards, cumNumDirtyCards;
4783 HeapWord *lastAddr, *nextAddr;
4785 for (cumNumDirtyCards = numDirtyCards = 0,
4786 nextAddr = lastAddr = startAddr;
4787 nextAddr < endAddr;
4788 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4790 ResourceMark rm;
4791 HandleMark hm;
4793 MemRegion dirtyRegion;
4794 {
4795 // See comments in "Precleaning notes" above on why we
4796 // do this locking. XXX Could the locking overheads be
4797 // too high when dirty cards are sparse? [I don't think so.]
4798 stopTimer();
4799 CMSTokenSync x(true); // is cms thread
4800 startTimer();
4801 sample_eden();
4802 // Get and clear dirty region from card table
4803 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4804 MemRegion(nextAddr, endAddr),
4805 true,
4806 CardTableModRefBS::precleaned_card_val());
4808 assert(dirtyRegion.start() >= nextAddr,
4809 "returned region inconsistent?");
4810 }
4811 lastAddr = dirtyRegion.end();
4812 numDirtyCards =
4813 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4815 if (!dirtyRegion.is_empty()) {
4816 stopTimer();
4817 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4818 startTimer();
4819 sample_eden();
4820 verify_work_stacks_empty();
4821 verify_overflow_empty();
4822 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4823 HeapWord* stop_point =
4824 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4825 if (stop_point != NULL) {
4826 // The careful iteration stopped early because it found an
4827 // uninitialized object. Redirty the bits corresponding to the
4828 // partially-scanned or unscanned cards, and start again at the
4829 // next block boundary.
4830 assert(CMSPermGenPrecleaningEnabled ||
4831 (_collectorState == AbortablePreclean && should_abort_preclean()),
4832 "Unparsable objects should only be in perm gen.");
4833 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4834 if (should_abort_preclean()) {
4835 break; // out of preclean loop
4836 } else {
4837 // Compute the next address at which preclean should pick up.
4838 lastAddr = next_card_start_after_block(stop_point);
4839 }
4840 }
4841 } else {
4842 break;
4843 }
4844 }
4845 verify_work_stacks_empty();
4846 verify_overflow_empty();
4847 return cumNumDirtyCards;
4848 }
4850 void CMSCollector::checkpointRootsFinal(bool asynch,
4851 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4852 assert(_collectorState == FinalMarking, "incorrect state transition?");
4853 check_correct_thread_executing();
4854 // world is stopped at this checkpoint
4855 assert(SafepointSynchronize::is_at_safepoint(),
4856 "world should be stopped");
4857 TraceCMSMemoryManagerStats tms(_collectorState);
4858 verify_work_stacks_empty();
4859 verify_overflow_empty();
4861 SpecializationStats::clear();
4862 if (PrintGCDetails) {
4863 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4864 _young_gen->used() / K,
4865 _young_gen->capacity() / K);
4866 }
4867 if (asynch) {
4868 if (CMSScavengeBeforeRemark) {
4869 GenCollectedHeap* gch = GenCollectedHeap::heap();
4870 // Temporarily set flag to false, GCH->do_collection will
4871 // expect it to be false and set to true
4872 FlagSetting fl(gch->_is_gc_active, false);
4873 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4874 PrintGCDetails && Verbose, true, gclog_or_tty);)
4875 int level = _cmsGen->level() - 1;
4876 if (level >= 0) {
4877 gch->do_collection(true, // full (i.e. force, see below)
4878 false, // !clear_all_soft_refs
4879 0, // size
4880 false, // is_tlab
4881 level // max_level
4882 );
4883 }
4884 }
4885 FreelistLocker x(this);
4886 MutexLockerEx y(bitMapLock(),
4887 Mutex::_no_safepoint_check_flag);
4888 assert(!init_mark_was_synchronous, "but that's impossible!");
4889 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4890 } else {
4891 // already have all the locks
4892 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4893 init_mark_was_synchronous);
4894 }
4895 verify_work_stacks_empty();
4896 verify_overflow_empty();
4897 SpecializationStats::print();
4898 }
4900 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4901 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4903 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4905 assert(haveFreelistLocks(), "must have free list locks");
4906 assert_lock_strong(bitMapLock());
4908 if (UseAdaptiveSizePolicy) {
4909 size_policy()->checkpoint_roots_final_begin();
4910 }
4912 ResourceMark rm;
4913 HandleMark hm;
4915 GenCollectedHeap* gch = GenCollectedHeap::heap();
4917 if (should_unload_classes()) {
4918 CodeCache::gc_prologue();
4919 }
4920 assert(haveFreelistLocks(), "must have free list locks");
4921 assert_lock_strong(bitMapLock());
4923 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4924 if (!init_mark_was_synchronous) {
4925 // We might assume that we need not fill TLAB's when
4926 // CMSScavengeBeforeRemark is set, because we may have just done
4927 // a scavenge which would have filled all TLAB's -- and besides
4928 // Eden would be empty. This however may not always be the case --
4929 // for instance although we asked for a scavenge, it may not have
4930 // happened because of a JNI critical section. We probably need
4931 // a policy for deciding whether we can in that case wait until
4932 // the critical section releases and then do the remark following
4933 // the scavenge, and skip it here. In the absence of that policy,
4934 // or of an indication of whether the scavenge did indeed occur,
4935 // we cannot rely on TLAB's having been filled and must do
4936 // so here just in case a scavenge did not happen.
4937 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4938 // Update the saved marks which may affect the root scans.
4939 gch->save_marks();
4941 {
4942 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4944 // Note on the role of the mod union table:
4945 // Since the marker in "markFromRoots" marks concurrently with
4946 // mutators, it is possible for some reachable objects not to have been
4947 // scanned. For instance, an only reference to an object A was
4948 // placed in object B after the marker scanned B. Unless B is rescanned,
4949 // A would be collected. Such updates to references in marked objects
4950 // are detected via the mod union table which is the set of all cards
4951 // dirtied since the first checkpoint in this GC cycle and prior to
4952 // the most recent young generation GC, minus those cleaned up by the
4953 // concurrent precleaning.
4954 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
4955 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4956 do_remark_parallel();
4957 } else {
4958 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4959 gclog_or_tty);
4960 do_remark_non_parallel();
4961 }
4962 }
4963 } else {
4964 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4965 // The initial mark was stop-world, so there's no rescanning to
4966 // do; go straight on to the next step below.
4967 }
4968 verify_work_stacks_empty();
4969 verify_overflow_empty();
4971 {
4972 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4973 refProcessingWork(asynch, clear_all_soft_refs);
4974 }
4975 verify_work_stacks_empty();
4976 verify_overflow_empty();
4978 if (should_unload_classes()) {
4979 CodeCache::gc_epilogue();
4980 }
4982 // If we encountered any (marking stack / work queue) overflow
4983 // events during the current CMS cycle, take appropriate
4984 // remedial measures, where possible, so as to try and avoid
4985 // recurrence of that condition.
4986 assert(_markStack.isEmpty(), "No grey objects");
4987 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4988 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4989 if (ser_ovflw > 0) {
4990 if (PrintCMSStatistics != 0) {
4991 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4992 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4993 ", kac_preclean="SIZE_FORMAT")",
4994 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4995 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4996 }
4997 _markStack.expand();
4998 _ser_pmc_remark_ovflw = 0;
4999 _ser_pmc_preclean_ovflw = 0;
5000 _ser_kac_preclean_ovflw = 0;
5001 _ser_kac_ovflw = 0;
5002 }
5003 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5004 if (PrintCMSStatistics != 0) {
5005 gclog_or_tty->print_cr("Work queue overflow (benign) "
5006 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5007 _par_pmc_remark_ovflw, _par_kac_ovflw);
5008 }
5009 _par_pmc_remark_ovflw = 0;
5010 _par_kac_ovflw = 0;
5011 }
5012 if (PrintCMSStatistics != 0) {
5013 if (_markStack._hit_limit > 0) {
5014 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5015 _markStack._hit_limit);
5016 }
5017 if (_markStack._failed_double > 0) {
5018 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5019 " current capacity "SIZE_FORMAT,
5020 _markStack._failed_double,
5021 _markStack.capacity());
5022 }
5023 }
5024 _markStack._hit_limit = 0;
5025 _markStack._failed_double = 0;
5027 // Check that all the klasses have been checked
5028 assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5030 if ((VerifyAfterGC || VerifyDuringGC) &&
5031 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5032 verify_after_remark();
5033 }
5035 // Change under the freelistLocks.
5036 _collectorState = Sweeping;
5037 // Call isAllClear() under bitMapLock
5038 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
5039 " final marking");
5040 if (UseAdaptiveSizePolicy) {
5041 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5042 }
5043 }
5045 // Parallel remark task
5046 class CMSParRemarkTask: public AbstractGangTask {
5047 CMSCollector* _collector;
5048 int _n_workers;
5049 CompactibleFreeListSpace* _cms_space;
5050 CompactibleFreeListSpace* _perm_space;
5052 // The per-thread work queues, available here for stealing.
5053 OopTaskQueueSet* _task_queues;
5054 ParallelTaskTerminator _term;
5056 public:
5057 CMSParRemarkTask(CMSCollector* collector,
5058 CompactibleFreeListSpace* cms_space,
5059 CompactibleFreeListSpace* perm_space,
5060 int n_workers, FlexibleWorkGang* workers,
5061 OopTaskQueueSet* task_queues):
5062 AbstractGangTask("Rescan roots and grey objects in parallel"),
5063 _collector(collector),
5064 _cms_space(cms_space), _perm_space(perm_space),
5065 _n_workers(n_workers),
5066 _task_queues(task_queues),
5067 _term(n_workers, task_queues) { }
5069 OopTaskQueueSet* task_queues() { return _task_queues; }
5071 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5073 ParallelTaskTerminator* terminator() { return &_term; }
5074 int n_workers() { return _n_workers; }
5076 void work(int i);
5078 private:
5079 // Work method in support of parallel rescan ... of young gen spaces
5080 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5081 ContiguousSpace* space,
5082 HeapWord** chunk_array, size_t chunk_top);
5084 // ... of dirty cards in old space
5085 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5086 Par_MarkRefsIntoAndScanClosure* cl);
5088 // ... work stealing for the above
5089 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5090 };
5092 // work_queue(i) is passed to the closure
5093 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
5094 // also is passed to do_dirty_card_rescan_tasks() and to
5095 // do_work_steal() to select the i-th task_queue.
5097 void CMSParRemarkTask::work(int i) {
5098 elapsedTimer _timer;
5099 ResourceMark rm;
5100 HandleMark hm;
5102 // ---------- rescan from roots --------------
5103 _timer.start();
5104 GenCollectedHeap* gch = GenCollectedHeap::heap();
5105 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5106 _collector->_span, _collector->ref_processor(),
5107 &(_collector->_markBitMap),
5108 work_queue(i), &(_collector->_revisitStack));
5110 // Rescan young gen roots first since these are likely
5111 // coarsely partitioned and may, on that account, constitute
5112 // the critical path; thus, it's best to start off that
5113 // work first.
5114 // ---------- young gen roots --------------
5115 {
5116 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5117 EdenSpace* eden_space = dng->eden();
5118 ContiguousSpace* from_space = dng->from();
5119 ContiguousSpace* to_space = dng->to();
5121 HeapWord** eca = _collector->_eden_chunk_array;
5122 size_t ect = _collector->_eden_chunk_index;
5123 HeapWord** sca = _collector->_survivor_chunk_array;
5124 size_t sct = _collector->_survivor_chunk_index;
5126 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5127 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5129 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
5130 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
5131 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
5133 _timer.stop();
5134 if (PrintCMSStatistics != 0) {
5135 gclog_or_tty->print_cr(
5136 "Finished young gen rescan work in %dth thread: %3.3f sec",
5137 i, _timer.seconds());
5138 }
5139 }
5141 // ---------- remaining roots --------------
5142 _timer.reset();
5143 _timer.start();
5144 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5145 false, // yg was scanned above
5146 false, // this is parallel code
5147 true, // collecting perm gen
5148 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5149 &par_mrias_cl,
5150 true, // walk all of code cache if (so & SO_CodeCache)
5151 NULL);
5152 assert(_collector->should_unload_classes()
5153 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5154 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5155 _timer.stop();
5156 if (PrintCMSStatistics != 0) {
5157 gclog_or_tty->print_cr(
5158 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5159 i, _timer.seconds());
5160 }
5162 // ---------- rescan dirty cards ------------
5163 _timer.reset();
5164 _timer.start();
5166 // Do the rescan tasks for each of the two spaces
5167 // (cms_space and perm_space) in turn.
5168 // "i" is passed to select the "i-th" task_queue
5169 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
5170 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
5171 _timer.stop();
5172 if (PrintCMSStatistics != 0) {
5173 gclog_or_tty->print_cr(
5174 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5175 i, _timer.seconds());
5176 }
5178 // ---------- steal work from other threads ...
5179 // ---------- ... and drain overflow list.
5180 _timer.reset();
5181 _timer.start();
5182 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5183 _timer.stop();
5184 if (PrintCMSStatistics != 0) {
5185 gclog_or_tty->print_cr(
5186 "Finished work stealing in %dth thread: %3.3f sec",
5187 i, _timer.seconds());
5188 }
5189 }
5191 // Note that parameter "i" is not used.
5192 void
5193 CMSParRemarkTask::do_young_space_rescan(int i,
5194 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5195 HeapWord** chunk_array, size_t chunk_top) {
5196 // Until all tasks completed:
5197 // . claim an unclaimed task
5198 // . compute region boundaries corresponding to task claimed
5199 // using chunk_array
5200 // . par_oop_iterate(cl) over that region
5202 ResourceMark rm;
5203 HandleMark hm;
5205 SequentialSubTasksDone* pst = space->par_seq_tasks();
5206 assert(pst->valid(), "Uninitialized use?");
5208 int nth_task = 0;
5209 int n_tasks = pst->n_tasks();
5211 HeapWord *start, *end;
5212 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5213 // We claimed task # nth_task; compute its boundaries.
5214 if (chunk_top == 0) { // no samples were taken
5215 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5216 start = space->bottom();
5217 end = space->top();
5218 } else if (nth_task == 0) {
5219 start = space->bottom();
5220 end = chunk_array[nth_task];
5221 } else if (nth_task < (jint)chunk_top) {
5222 assert(nth_task >= 1, "Control point invariant");
5223 start = chunk_array[nth_task - 1];
5224 end = chunk_array[nth_task];
5225 } else {
5226 assert(nth_task == (jint)chunk_top, "Control point invariant");
5227 start = chunk_array[chunk_top - 1];
5228 end = space->top();
5229 }
5230 MemRegion mr(start, end);
5231 // Verify that mr is in space
5232 assert(mr.is_empty() || space->used_region().contains(mr),
5233 "Should be in space");
5234 // Verify that "start" is an object boundary
5235 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5236 "Should be an oop");
5237 space->par_oop_iterate(mr, cl);
5238 }
5239 pst->all_tasks_completed();
5240 }
5242 void
5243 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5244 CompactibleFreeListSpace* sp, int i,
5245 Par_MarkRefsIntoAndScanClosure* cl) {
5246 // Until all tasks completed:
5247 // . claim an unclaimed task
5248 // . compute region boundaries corresponding to task claimed
5249 // . transfer dirty bits ct->mut for that region
5250 // . apply rescanclosure to dirty mut bits for that region
5252 ResourceMark rm;
5253 HandleMark hm;
5255 OopTaskQueue* work_q = work_queue(i);
5256 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5257 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5258 // CAUTION: This closure has state that persists across calls to
5259 // the work method dirty_range_iterate_clear() in that it has
5260 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5261 // use of that state in the imbedded UpwardsObjectClosure instance
5262 // assumes that the cards are always iterated (even if in parallel
5263 // by several threads) in monotonically increasing order per each
5264 // thread. This is true of the implementation below which picks
5265 // card ranges (chunks) in monotonically increasing order globally
5266 // and, a-fortiori, in monotonically increasing order per thread
5267 // (the latter order being a subsequence of the former).
5268 // If the work code below is ever reorganized into a more chaotic
5269 // work-partitioning form than the current "sequential tasks"
5270 // paradigm, the use of that persistent state will have to be
5271 // revisited and modified appropriately. See also related
5272 // bug 4756801 work on which should examine this code to make
5273 // sure that the changes there do not run counter to the
5274 // assumptions made here and necessary for correctness and
5275 // efficiency. Note also that this code might yield inefficient
5276 // behaviour in the case of very large objects that span one or
5277 // more work chunks. Such objects would potentially be scanned
5278 // several times redundantly. Work on 4756801 should try and
5279 // address that performance anomaly if at all possible. XXX
5280 MemRegion full_span = _collector->_span;
5281 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5282 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5283 MarkFromDirtyCardsClosure
5284 greyRescanClosure(_collector, full_span, // entire span of interest
5285 sp, bm, work_q, rs, cl);
5287 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5288 assert(pst->valid(), "Uninitialized use?");
5289 int nth_task = 0;
5290 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5291 MemRegion span = sp->used_region();
5292 HeapWord* start_addr = span.start();
5293 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5294 alignment);
5295 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5296 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5297 start_addr, "Check alignment");
5298 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5299 chunk_size, "Check alignment");
5301 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5302 // Having claimed the nth_task, compute corresponding mem-region,
5303 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5304 // The alignment restriction ensures that we do not need any
5305 // synchronization with other gang-workers while setting or
5306 // clearing bits in thus chunk of the MUT.
5307 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5308 start_addr + (nth_task+1)*chunk_size);
5309 // The last chunk's end might be way beyond end of the
5310 // used region. In that case pull back appropriately.
5311 if (this_span.end() > end_addr) {
5312 this_span.set_end(end_addr);
5313 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5314 }
5315 // Iterate over the dirty cards covering this chunk, marking them
5316 // precleaned, and setting the corresponding bits in the mod union
5317 // table. Since we have been careful to partition at Card and MUT-word
5318 // boundaries no synchronization is needed between parallel threads.
5319 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5320 &modUnionClosure);
5322 // Having transferred these marks into the modUnionTable,
5323 // rescan the marked objects on the dirty cards in the modUnionTable.
5324 // Even if this is at a synchronous collection, the initial marking
5325 // may have been done during an asynchronous collection so there
5326 // may be dirty bits in the mod-union table.
5327 _collector->_modUnionTable.dirty_range_iterate_clear(
5328 this_span, &greyRescanClosure);
5329 _collector->_modUnionTable.verifyNoOneBitsInRange(
5330 this_span.start(),
5331 this_span.end());
5332 }
5333 pst->all_tasks_completed(); // declare that i am done
5334 }
5336 // . see if we can share work_queues with ParNew? XXX
5337 void
5338 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5339 int* seed) {
5340 OopTaskQueue* work_q = work_queue(i);
5341 NOT_PRODUCT(int num_steals = 0;)
5342 oop obj_to_scan;
5343 CMSBitMap* bm = &(_collector->_markBitMap);
5345 while (true) {
5346 // Completely finish any left over work from (an) earlier round(s)
5347 cl->trim_queue(0);
5348 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5349 (size_t)ParGCDesiredObjsFromOverflowList);
5350 // Now check if there's any work in the overflow list
5351 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5352 // only affects the number of attempts made to get work from the
5353 // overflow list and does not affect the number of workers. Just
5354 // pass ParallelGCThreads so this behavior is unchanged.
5355 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5356 work_q,
5357 ParallelGCThreads)) {
5358 // found something in global overflow list;
5359 // not yet ready to go stealing work from others.
5360 // We'd like to assert(work_q->size() != 0, ...)
5361 // because we just took work from the overflow list,
5362 // but of course we can't since all of that could have
5363 // been already stolen from us.
5364 // "He giveth and He taketh away."
5365 continue;
5366 }
5367 // Verify that we have no work before we resort to stealing
5368 assert(work_q->size() == 0, "Have work, shouldn't steal");
5369 // Try to steal from other queues that have work
5370 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5371 NOT_PRODUCT(num_steals++;)
5372 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5373 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5374 // Do scanning work
5375 obj_to_scan->oop_iterate(cl);
5376 // Loop around, finish this work, and try to steal some more
5377 } else if (terminator()->offer_termination()) {
5378 break; // nirvana from the infinite cycle
5379 }
5380 }
5381 NOT_PRODUCT(
5382 if (PrintCMSStatistics != 0) {
5383 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5384 }
5385 )
5386 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5387 "Else our work is not yet done");
5388 }
5390 // Return a thread-local PLAB recording array, as appropriate.
5391 void* CMSCollector::get_data_recorder(int thr_num) {
5392 if (_survivor_plab_array != NULL &&
5393 (CMSPLABRecordAlways ||
5394 (_collectorState > Marking && _collectorState < FinalMarking))) {
5395 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5396 ChunkArray* ca = &_survivor_plab_array[thr_num];
5397 ca->reset(); // clear it so that fresh data is recorded
5398 return (void*) ca;
5399 } else {
5400 return NULL;
5401 }
5402 }
5404 // Reset all the thread-local PLAB recording arrays
5405 void CMSCollector::reset_survivor_plab_arrays() {
5406 for (uint i = 0; i < ParallelGCThreads; i++) {
5407 _survivor_plab_array[i].reset();
5408 }
5409 }
5411 // Merge the per-thread plab arrays into the global survivor chunk
5412 // array which will provide the partitioning of the survivor space
5413 // for CMS rescan.
5414 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5415 int no_of_gc_threads) {
5416 assert(_survivor_plab_array != NULL, "Error");
5417 assert(_survivor_chunk_array != NULL, "Error");
5418 assert(_collectorState == FinalMarking, "Error");
5419 for (int j = 0; j < no_of_gc_threads; j++) {
5420 _cursor[j] = 0;
5421 }
5422 HeapWord* top = surv->top();
5423 size_t i;
5424 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5425 HeapWord* min_val = top; // Higher than any PLAB address
5426 uint min_tid = 0; // position of min_val this round
5427 for (int j = 0; j < no_of_gc_threads; j++) {
5428 ChunkArray* cur_sca = &_survivor_plab_array[j];
5429 if (_cursor[j] == cur_sca->end()) {
5430 continue;
5431 }
5432 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5433 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5434 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5435 if (cur_val < min_val) {
5436 min_tid = j;
5437 min_val = cur_val;
5438 } else {
5439 assert(cur_val < top, "All recorded addresses should be less");
5440 }
5441 }
5442 // At this point min_val and min_tid are respectively
5443 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5444 // and the thread (j) that witnesses that address.
5445 // We record this address in the _survivor_chunk_array[i]
5446 // and increment _cursor[min_tid] prior to the next round i.
5447 if (min_val == top) {
5448 break;
5449 }
5450 _survivor_chunk_array[i] = min_val;
5451 _cursor[min_tid]++;
5452 }
5453 // We are all done; record the size of the _survivor_chunk_array
5454 _survivor_chunk_index = i; // exclusive: [0, i)
5455 if (PrintCMSStatistics > 0) {
5456 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5457 }
5458 // Verify that we used up all the recorded entries
5459 #ifdef ASSERT
5460 size_t total = 0;
5461 for (int j = 0; j < no_of_gc_threads; j++) {
5462 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5463 total += _cursor[j];
5464 }
5465 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5466 // Check that the merged array is in sorted order
5467 if (total > 0) {
5468 for (size_t i = 0; i < total - 1; i++) {
5469 if (PrintCMSStatistics > 0) {
5470 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5471 i, _survivor_chunk_array[i]);
5472 }
5473 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5474 "Not sorted");
5475 }
5476 }
5477 #endif // ASSERT
5478 }
5480 // Set up the space's par_seq_tasks structure for work claiming
5481 // for parallel rescan of young gen.
5482 // See ParRescanTask where this is currently used.
5483 void
5484 CMSCollector::
5485 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5486 assert(n_threads > 0, "Unexpected n_threads argument");
5487 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5489 // Eden space
5490 {
5491 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5492 assert(!pst->valid(), "Clobbering existing data?");
5493 // Each valid entry in [0, _eden_chunk_index) represents a task.
5494 size_t n_tasks = _eden_chunk_index + 1;
5495 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5496 // Sets the condition for completion of the subtask (how many threads
5497 // need to finish in order to be done).
5498 pst->set_n_threads(n_threads);
5499 pst->set_n_tasks((int)n_tasks);
5500 }
5502 // Merge the survivor plab arrays into _survivor_chunk_array
5503 if (_survivor_plab_array != NULL) {
5504 merge_survivor_plab_arrays(dng->from(), n_threads);
5505 } else {
5506 assert(_survivor_chunk_index == 0, "Error");
5507 }
5509 // To space
5510 {
5511 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5512 assert(!pst->valid(), "Clobbering existing data?");
5513 // Sets the condition for completion of the subtask (how many threads
5514 // need to finish in order to be done).
5515 pst->set_n_threads(n_threads);
5516 pst->set_n_tasks(1);
5517 assert(pst->valid(), "Error");
5518 }
5520 // From space
5521 {
5522 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5523 assert(!pst->valid(), "Clobbering existing data?");
5524 size_t n_tasks = _survivor_chunk_index + 1;
5525 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5526 // Sets the condition for completion of the subtask (how many threads
5527 // need to finish in order to be done).
5528 pst->set_n_threads(n_threads);
5529 pst->set_n_tasks((int)n_tasks);
5530 assert(pst->valid(), "Error");
5531 }
5532 }
5534 // Parallel version of remark
5535 void CMSCollector::do_remark_parallel() {
5536 GenCollectedHeap* gch = GenCollectedHeap::heap();
5537 FlexibleWorkGang* workers = gch->workers();
5538 assert(workers != NULL, "Need parallel worker threads.");
5539 int n_workers = workers->total_workers();
5540 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5541 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5543 CMSParRemarkTask tsk(this,
5544 cms_space, perm_space,
5545 n_workers, workers, task_queues());
5547 // Set up for parallel process_strong_roots work.
5548 gch->set_par_threads(n_workers);
5549 // We won't be iterating over the cards in the card table updating
5550 // the younger_gen cards, so we shouldn't call the following else
5551 // the verification code as well as subsequent younger_refs_iterate
5552 // code would get confused. XXX
5553 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5555 // The young gen rescan work will not be done as part of
5556 // process_strong_roots (which currently doesn't knw how to
5557 // parallelize such a scan), but rather will be broken up into
5558 // a set of parallel tasks (via the sampling that the [abortable]
5559 // preclean phase did of EdenSpace, plus the [two] tasks of
5560 // scanning the [two] survivor spaces. Further fine-grain
5561 // parallelization of the scanning of the survivor spaces
5562 // themselves, and of precleaning of the younger gen itself
5563 // is deferred to the future.
5564 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5566 // The dirty card rescan work is broken up into a "sequence"
5567 // of parallel tasks (per constituent space) that are dynamically
5568 // claimed by the parallel threads.
5569 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5570 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5572 // It turns out that even when we're using 1 thread, doing the work in a
5573 // separate thread causes wide variance in run times. We can't help this
5574 // in the multi-threaded case, but we special-case n=1 here to get
5575 // repeatable measurements of the 1-thread overhead of the parallel code.
5576 if (n_workers > 1) {
5577 // Make refs discovery MT-safe
5578 ReferenceProcessorMTMutator mt(ref_processor(), true);
5579 GenCollectedHeap::StrongRootsScope srs(gch);
5580 workers->run_task(&tsk);
5581 } else {
5582 GenCollectedHeap::StrongRootsScope srs(gch);
5583 tsk.work(0);
5584 }
5585 gch->set_par_threads(0); // 0 ==> non-parallel.
5586 // restore, single-threaded for now, any preserved marks
5587 // as a result of work_q overflow
5588 restore_preserved_marks_if_any();
5589 }
5591 // Non-parallel version of remark
5592 void CMSCollector::do_remark_non_parallel() {
5593 ResourceMark rm;
5594 HandleMark hm;
5595 GenCollectedHeap* gch = GenCollectedHeap::heap();
5596 MarkRefsIntoAndScanClosure
5597 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5598 &_markStack, &_revisitStack, this,
5599 false /* should_yield */, false /* not precleaning */);
5600 MarkFromDirtyCardsClosure
5601 markFromDirtyCardsClosure(this, _span,
5602 NULL, // space is set further below
5603 &_markBitMap, &_markStack, &_revisitStack,
5604 &mrias_cl);
5605 {
5606 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5607 // Iterate over the dirty cards, setting the corresponding bits in the
5608 // mod union table.
5609 {
5610 ModUnionClosure modUnionClosure(&_modUnionTable);
5611 _ct->ct_bs()->dirty_card_iterate(
5612 _cmsGen->used_region(),
5613 &modUnionClosure);
5614 _ct->ct_bs()->dirty_card_iterate(
5615 _permGen->used_region(),
5616 &modUnionClosure);
5617 }
5618 // Having transferred these marks into the modUnionTable, we just need
5619 // to rescan the marked objects on the dirty cards in the modUnionTable.
5620 // The initial marking may have been done during an asynchronous
5621 // collection so there may be dirty bits in the mod-union table.
5622 const int alignment =
5623 CardTableModRefBS::card_size * BitsPerWord;
5624 {
5625 // ... First handle dirty cards in CMS gen
5626 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5627 MemRegion ur = _cmsGen->used_region();
5628 HeapWord* lb = ur.start();
5629 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5630 MemRegion cms_span(lb, ub);
5631 _modUnionTable.dirty_range_iterate_clear(cms_span,
5632 &markFromDirtyCardsClosure);
5633 verify_work_stacks_empty();
5634 if (PrintCMSStatistics != 0) {
5635 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5636 markFromDirtyCardsClosure.num_dirty_cards());
5637 }
5638 }
5639 {
5640 // .. and then repeat for dirty cards in perm gen
5641 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5642 MemRegion ur = _permGen->used_region();
5643 HeapWord* lb = ur.start();
5644 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5645 MemRegion perm_span(lb, ub);
5646 _modUnionTable.dirty_range_iterate_clear(perm_span,
5647 &markFromDirtyCardsClosure);
5648 verify_work_stacks_empty();
5649 if (PrintCMSStatistics != 0) {
5650 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5651 markFromDirtyCardsClosure.num_dirty_cards());
5652 }
5653 }
5654 }
5655 if (VerifyDuringGC &&
5656 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5657 HandleMark hm; // Discard invalid handles created during verification
5658 Universe::verify(true);
5659 }
5660 {
5661 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5663 verify_work_stacks_empty();
5665 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5666 GenCollectedHeap::StrongRootsScope srs(gch);
5667 gch->gen_process_strong_roots(_cmsGen->level(),
5668 true, // younger gens as roots
5669 false, // use the local StrongRootsScope
5670 true, // collecting perm gen
5671 SharedHeap::ScanningOption(roots_scanning_options()),
5672 &mrias_cl,
5673 true, // walk code active on stacks
5674 NULL);
5675 assert(should_unload_classes()
5676 || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5677 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5678 }
5679 verify_work_stacks_empty();
5680 // Restore evacuated mark words, if any, used for overflow list links
5681 if (!CMSOverflowEarlyRestoration) {
5682 restore_preserved_marks_if_any();
5683 }
5684 verify_overflow_empty();
5685 }
5687 ////////////////////////////////////////////////////////
5688 // Parallel Reference Processing Task Proxy Class
5689 ////////////////////////////////////////////////////////
5690 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5691 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5692 CMSCollector* _collector;
5693 CMSBitMap* _mark_bit_map;
5694 const MemRegion _span;
5695 ProcessTask& _task;
5697 public:
5698 CMSRefProcTaskProxy(ProcessTask& task,
5699 CMSCollector* collector,
5700 const MemRegion& span,
5701 CMSBitMap* mark_bit_map,
5702 AbstractWorkGang* workers,
5703 OopTaskQueueSet* task_queues):
5704 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5705 task_queues),
5706 _task(task),
5707 _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5708 {
5709 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5710 "Inconsistency in _span");
5711 }
5713 OopTaskQueueSet* task_queues() { return queues(); }
5715 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5717 void do_work_steal(int i,
5718 CMSParDrainMarkingStackClosure* drain,
5719 CMSParKeepAliveClosure* keep_alive,
5720 int* seed);
5722 virtual void work(int i);
5723 };
5725 void CMSRefProcTaskProxy::work(int i) {
5726 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5727 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5728 _mark_bit_map,
5729 &_collector->_revisitStack,
5730 work_queue(i));
5731 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5732 _mark_bit_map,
5733 &_collector->_revisitStack,
5734 work_queue(i));
5735 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5736 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5737 if (_task.marks_oops_alive()) {
5738 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5739 _collector->hash_seed(i));
5740 }
5741 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5742 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5743 }
5745 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5746 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5747 EnqueueTask& _task;
5749 public:
5750 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5751 : AbstractGangTask("Enqueue reference objects in parallel"),
5752 _task(task)
5753 { }
5755 virtual void work(int i)
5756 {
5757 _task.work(i);
5758 }
5759 };
5761 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5762 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5763 OopTaskQueue* work_queue):
5764 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5765 _span(span),
5766 _bit_map(bit_map),
5767 _work_queue(work_queue),
5768 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5769 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5770 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5771 { }
5773 // . see if we can share work_queues with ParNew? XXX
5774 void CMSRefProcTaskProxy::do_work_steal(int i,
5775 CMSParDrainMarkingStackClosure* drain,
5776 CMSParKeepAliveClosure* keep_alive,
5777 int* seed) {
5778 OopTaskQueue* work_q = work_queue(i);
5779 NOT_PRODUCT(int num_steals = 0;)
5780 oop obj_to_scan;
5782 while (true) {
5783 // Completely finish any left over work from (an) earlier round(s)
5784 drain->trim_queue(0);
5785 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5786 (size_t)ParGCDesiredObjsFromOverflowList);
5787 // Now check if there's any work in the overflow list
5788 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5789 // only affects the number of attempts made to get work from the
5790 // overflow list and does not affect the number of workers. Just
5791 // pass ParallelGCThreads so this behavior is unchanged.
5792 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5793 work_q,
5794 ParallelGCThreads)) {
5795 // Found something in global overflow list;
5796 // not yet ready to go stealing work from others.
5797 // We'd like to assert(work_q->size() != 0, ...)
5798 // because we just took work from the overflow list,
5799 // but of course we can't, since all of that might have
5800 // been already stolen from us.
5801 continue;
5802 }
5803 // Verify that we have no work before we resort to stealing
5804 assert(work_q->size() == 0, "Have work, shouldn't steal");
5805 // Try to steal from other queues that have work
5806 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5807 NOT_PRODUCT(num_steals++;)
5808 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5809 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5810 // Do scanning work
5811 obj_to_scan->oop_iterate(keep_alive);
5812 // Loop around, finish this work, and try to steal some more
5813 } else if (terminator()->offer_termination()) {
5814 break; // nirvana from the infinite cycle
5815 }
5816 }
5817 NOT_PRODUCT(
5818 if (PrintCMSStatistics != 0) {
5819 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5820 }
5821 )
5822 }
5824 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5825 {
5826 GenCollectedHeap* gch = GenCollectedHeap::heap();
5827 FlexibleWorkGang* workers = gch->workers();
5828 assert(workers != NULL, "Need parallel worker threads.");
5829 CMSRefProcTaskProxy rp_task(task, &_collector,
5830 _collector.ref_processor()->span(),
5831 _collector.markBitMap(),
5832 workers, _collector.task_queues());
5833 workers->run_task(&rp_task);
5834 }
5836 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5837 {
5839 GenCollectedHeap* gch = GenCollectedHeap::heap();
5840 FlexibleWorkGang* workers = gch->workers();
5841 assert(workers != NULL, "Need parallel worker threads.");
5842 CMSRefEnqueueTaskProxy enq_task(task);
5843 workers->run_task(&enq_task);
5844 }
5846 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5848 ResourceMark rm;
5849 HandleMark hm;
5851 ReferenceProcessor* rp = ref_processor();
5852 assert(rp->span().equals(_span), "Spans should be equal");
5853 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5854 // Process weak references.
5855 rp->setup_policy(clear_all_soft_refs);
5856 verify_work_stacks_empty();
5858 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5859 &_markStack, &_revisitStack,
5860 false /* !preclean */);
5861 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5862 _span, &_markBitMap, &_markStack,
5863 &cmsKeepAliveClosure, false /* !preclean */);
5864 {
5865 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5866 if (rp->processing_is_mt()) {
5867 // Set the degree of MT here. If the discovery is done MT, there
5868 // may have been a different number of threads doing the discovery
5869 // and a different number of discovered lists may have Ref objects.
5870 // That is OK as long as the Reference lists are balanced (see
5871 // balance_all_queues() and balance_queues()).
5874 rp->set_mt_degree(ParallelGCThreads);
5875 CMSRefProcTaskExecutor task_executor(*this);
5876 rp->process_discovered_references(&_is_alive_closure,
5877 &cmsKeepAliveClosure,
5878 &cmsDrainMarkingStackClosure,
5879 &task_executor);
5880 } else {
5881 rp->process_discovered_references(&_is_alive_closure,
5882 &cmsKeepAliveClosure,
5883 &cmsDrainMarkingStackClosure,
5884 NULL);
5885 }
5886 verify_work_stacks_empty();
5887 }
5889 if (should_unload_classes()) {
5890 {
5891 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5893 // Follow SystemDictionary roots and unload classes
5894 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5896 // Follow CodeCache roots and unload any methods marked for unloading
5897 CodeCache::do_unloading(&_is_alive_closure,
5898 &cmsKeepAliveClosure,
5899 purged_class);
5901 cmsDrainMarkingStackClosure.do_void();
5902 verify_work_stacks_empty();
5904 // Update subklass/sibling/implementor links in KlassKlass descendants
5905 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5906 oop k;
5907 while ((k = _revisitStack.pop()) != NULL) {
5908 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5909 &_is_alive_closure,
5910 &cmsKeepAliveClosure);
5911 }
5912 assert(!ClassUnloading ||
5913 (_markStack.isEmpty() && overflow_list_is_empty()),
5914 "Should not have found new reachable objects");
5915 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5916 cmsDrainMarkingStackClosure.do_void();
5917 verify_work_stacks_empty();
5918 }
5920 {
5921 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5922 // Now clean up stale oops in SymbolTable and StringTable
5923 SymbolTable::unlink(&_is_alive_closure);
5924 StringTable::unlink(&_is_alive_closure);
5925 }
5926 }
5928 verify_work_stacks_empty();
5929 // Restore any preserved marks as a result of mark stack or
5930 // work queue overflow
5931 restore_preserved_marks_if_any(); // done single-threaded for now
5933 rp->set_enqueuing_is_done(true);
5934 if (rp->processing_is_mt()) {
5935 rp->balance_all_queues();
5936 CMSRefProcTaskExecutor task_executor(*this);
5937 rp->enqueue_discovered_references(&task_executor);
5938 } else {
5939 rp->enqueue_discovered_references(NULL);
5940 }
5941 rp->verify_no_references_recorded();
5942 assert(!rp->discovery_enabled(), "should have been disabled");
5944 // JVMTI object tagging is based on JNI weak refs. If any of these
5945 // refs were cleared then JVMTI needs to update its maps and
5946 // maybe post ObjectFrees to agents.
5947 JvmtiExport::cms_ref_processing_epilogue();
5948 }
5950 #ifndef PRODUCT
5951 void CMSCollector::check_correct_thread_executing() {
5952 Thread* t = Thread::current();
5953 // Only the VM thread or the CMS thread should be here.
5954 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5955 "Unexpected thread type");
5956 // If this is the vm thread, the foreground process
5957 // should not be waiting. Note that _foregroundGCIsActive is
5958 // true while the foreground collector is waiting.
5959 if (_foregroundGCShouldWait) {
5960 // We cannot be the VM thread
5961 assert(t->is_ConcurrentGC_thread(),
5962 "Should be CMS thread");
5963 } else {
5964 // We can be the CMS thread only if we are in a stop-world
5965 // phase of CMS collection.
5966 if (t->is_ConcurrentGC_thread()) {
5967 assert(_collectorState == InitialMarking ||
5968 _collectorState == FinalMarking,
5969 "Should be a stop-world phase");
5970 // The CMS thread should be holding the CMS_token.
5971 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5972 "Potential interference with concurrently "
5973 "executing VM thread");
5974 }
5975 }
5976 }
5977 #endif
5979 void CMSCollector::sweep(bool asynch) {
5980 assert(_collectorState == Sweeping, "just checking");
5981 check_correct_thread_executing();
5982 verify_work_stacks_empty();
5983 verify_overflow_empty();
5984 increment_sweep_count();
5985 TraceCMSMemoryManagerStats tms(_collectorState);
5987 _inter_sweep_timer.stop();
5988 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5989 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5991 // PermGen verification support: If perm gen sweeping is disabled in
5992 // this cycle, we preserve the perm gen object "deadness" information
5993 // in the perm_gen_verify_bit_map. In order to do that we traverse
5994 // all blocks in perm gen and mark all dead objects.
5995 if (verifying() && !should_unload_classes()) {
5996 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5997 "Should have already been allocated");
5998 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5999 markBitMap(), perm_gen_verify_bit_map());
6000 if (asynch) {
6001 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6002 bitMapLock());
6003 _permGen->cmsSpace()->blk_iterate(&mdo);
6004 } else {
6005 // In the case of synchronous sweep, we already have
6006 // the requisite locks/tokens.
6007 _permGen->cmsSpace()->blk_iterate(&mdo);
6008 }
6009 }
6011 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6012 _intra_sweep_timer.reset();
6013 _intra_sweep_timer.start();
6014 if (asynch) {
6015 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6016 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6017 // First sweep the old gen then the perm gen
6018 {
6019 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6020 bitMapLock());
6021 sweepWork(_cmsGen, asynch);
6022 }
6024 // Now repeat for perm gen
6025 if (should_unload_classes()) {
6026 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6027 bitMapLock());
6028 sweepWork(_permGen, asynch);
6029 }
6031 // Update Universe::_heap_*_at_gc figures.
6032 // We need all the free list locks to make the abstract state
6033 // transition from Sweeping to Resetting. See detailed note
6034 // further below.
6035 {
6036 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6037 _permGen->freelistLock());
6038 // Update heap occupancy information which is used as
6039 // input to soft ref clearing policy at the next gc.
6040 Universe::update_heap_info_at_gc();
6041 _collectorState = Resizing;
6042 }
6043 } else {
6044 // already have needed locks
6045 sweepWork(_cmsGen, asynch);
6047 if (should_unload_classes()) {
6048 sweepWork(_permGen, asynch);
6049 }
6050 // Update heap occupancy information which is used as
6051 // input to soft ref clearing policy at the next gc.
6052 Universe::update_heap_info_at_gc();
6053 _collectorState = Resizing;
6054 }
6055 verify_work_stacks_empty();
6056 verify_overflow_empty();
6058 _intra_sweep_timer.stop();
6059 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6061 _inter_sweep_timer.reset();
6062 _inter_sweep_timer.start();
6064 update_time_of_last_gc(os::javaTimeMillis());
6066 // NOTE on abstract state transitions:
6067 // Mutators allocate-live and/or mark the mod-union table dirty
6068 // based on the state of the collection. The former is done in
6069 // the interval [Marking, Sweeping] and the latter in the interval
6070 // [Marking, Sweeping). Thus the transitions into the Marking state
6071 // and out of the Sweeping state must be synchronously visible
6072 // globally to the mutators.
6073 // The transition into the Marking state happens with the world
6074 // stopped so the mutators will globally see it. Sweeping is
6075 // done asynchronously by the background collector so the transition
6076 // from the Sweeping state to the Resizing state must be done
6077 // under the freelistLock (as is the check for whether to
6078 // allocate-live and whether to dirty the mod-union table).
6079 assert(_collectorState == Resizing, "Change of collector state to"
6080 " Resizing must be done under the freelistLocks (plural)");
6082 // Now that sweeping has been completed, we clear
6083 // the incremental_collection_failed flag,
6084 // thus inviting a younger gen collection to promote into
6085 // this generation. If such a promotion may still fail,
6086 // the flag will be set again when a young collection is
6087 // attempted.
6088 GenCollectedHeap* gch = GenCollectedHeap::heap();
6089 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
6090 gch->update_full_collections_completed(_collection_count_start);
6091 }
6093 // FIX ME!!! Looks like this belongs in CFLSpace, with
6094 // CMSGen merely delegating to it.
6095 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6096 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6097 HeapWord* minAddr = _cmsSpace->bottom();
6098 HeapWord* largestAddr =
6099 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
6100 if (largestAddr == NULL) {
6101 // The dictionary appears to be empty. In this case
6102 // try to coalesce at the end of the heap.
6103 largestAddr = _cmsSpace->end();
6104 }
6105 size_t largestOffset = pointer_delta(largestAddr, minAddr);
6106 size_t nearLargestOffset =
6107 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6108 if (PrintFLSStatistics != 0) {
6109 gclog_or_tty->print_cr(
6110 "CMS: Large Block: " PTR_FORMAT ";"
6111 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6112 largestAddr,
6113 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6114 }
6115 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6116 }
6118 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6119 return addr >= _cmsSpace->nearLargestChunk();
6120 }
6122 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6123 return _cmsSpace->find_chunk_at_end();
6124 }
6126 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6127 bool full) {
6128 // The next lower level has been collected. Gather any statistics
6129 // that are of interest at this point.
6130 if (!full && (current_level + 1) == level()) {
6131 // Gather statistics on the young generation collection.
6132 collector()->stats().record_gc0_end(used());
6133 }
6134 }
6136 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6137 GenCollectedHeap* gch = GenCollectedHeap::heap();
6138 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6139 "Wrong type of heap");
6140 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6141 gch->gen_policy()->size_policy();
6142 assert(sp->is_gc_cms_adaptive_size_policy(),
6143 "Wrong type of size policy");
6144 return sp;
6145 }
6147 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6148 if (PrintGCDetails && Verbose) {
6149 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6150 }
6151 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6152 _debug_collection_type =
6153 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6154 if (PrintGCDetails && Verbose) {
6155 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6156 }
6157 }
6159 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6160 bool asynch) {
6161 // We iterate over the space(s) underlying this generation,
6162 // checking the mark bit map to see if the bits corresponding
6163 // to specific blocks are marked or not. Blocks that are
6164 // marked are live and are not swept up. All remaining blocks
6165 // are swept up, with coalescing on-the-fly as we sweep up
6166 // contiguous free and/or garbage blocks:
6167 // We need to ensure that the sweeper synchronizes with allocators
6168 // and stop-the-world collectors. In particular, the following
6169 // locks are used:
6170 // . CMS token: if this is held, a stop the world collection cannot occur
6171 // . freelistLock: if this is held no allocation can occur from this
6172 // generation by another thread
6173 // . bitMapLock: if this is held, no other thread can access or update
6174 //
6176 // Note that we need to hold the freelistLock if we use
6177 // block iterate below; else the iterator might go awry if
6178 // a mutator (or promotion) causes block contents to change
6179 // (for instance if the allocator divvies up a block).
6180 // If we hold the free list lock, for all practical purposes
6181 // young generation GC's can't occur (they'll usually need to
6182 // promote), so we might as well prevent all young generation
6183 // GC's while we do a sweeping step. For the same reason, we might
6184 // as well take the bit map lock for the entire duration
6186 // check that we hold the requisite locks
6187 assert(have_cms_token(), "Should hold cms token");
6188 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6189 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6190 "Should possess CMS token to sweep");
6191 assert_lock_strong(gen->freelistLock());
6192 assert_lock_strong(bitMapLock());
6194 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6195 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6196 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6197 _inter_sweep_estimate.padded_average(),
6198 _intra_sweep_estimate.padded_average());
6199 gen->setNearLargestChunk();
6201 {
6202 SweepClosure sweepClosure(this, gen, &_markBitMap,
6203 CMSYield && asynch);
6204 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6205 // We need to free-up/coalesce garbage/blocks from a
6206 // co-terminal free run. This is done in the SweepClosure
6207 // destructor; so, do not remove this scope, else the
6208 // end-of-sweep-census below will be off by a little bit.
6209 }
6210 gen->cmsSpace()->sweep_completed();
6211 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6212 if (should_unload_classes()) { // unloaded classes this cycle,
6213 _concurrent_cycles_since_last_unload = 0; // ... reset count
6214 } else { // did not unload classes,
6215 _concurrent_cycles_since_last_unload++; // ... increment count
6216 }
6217 }
6219 // Reset CMS data structures (for now just the marking bit map)
6220 // preparatory for the next cycle.
6221 void CMSCollector::reset(bool asynch) {
6222 GenCollectedHeap* gch = GenCollectedHeap::heap();
6223 CMSAdaptiveSizePolicy* sp = size_policy();
6224 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6225 if (asynch) {
6226 CMSTokenSyncWithLocks ts(true, bitMapLock());
6228 // If the state is not "Resetting", the foreground thread
6229 // has done a collection and the resetting.
6230 if (_collectorState != Resetting) {
6231 assert(_collectorState == Idling, "The state should only change"
6232 " because the foreground collector has finished the collection");
6233 return;
6234 }
6236 // Clear the mark bitmap (no grey objects to start with)
6237 // for the next cycle.
6238 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6239 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6241 HeapWord* curAddr = _markBitMap.startWord();
6242 while (curAddr < _markBitMap.endWord()) {
6243 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6244 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6245 _markBitMap.clear_large_range(chunk);
6246 if (ConcurrentMarkSweepThread::should_yield() &&
6247 !foregroundGCIsActive() &&
6248 CMSYield) {
6249 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6250 "CMS thread should hold CMS token");
6251 assert_lock_strong(bitMapLock());
6252 bitMapLock()->unlock();
6253 ConcurrentMarkSweepThread::desynchronize(true);
6254 ConcurrentMarkSweepThread::acknowledge_yield_request();
6255 stopTimer();
6256 if (PrintCMSStatistics != 0) {
6257 incrementYields();
6258 }
6259 icms_wait();
6261 // See the comment in coordinator_yield()
6262 for (unsigned i = 0; i < CMSYieldSleepCount &&
6263 ConcurrentMarkSweepThread::should_yield() &&
6264 !CMSCollector::foregroundGCIsActive(); ++i) {
6265 os::sleep(Thread::current(), 1, false);
6266 ConcurrentMarkSweepThread::acknowledge_yield_request();
6267 }
6269 ConcurrentMarkSweepThread::synchronize(true);
6270 bitMapLock()->lock_without_safepoint_check();
6271 startTimer();
6272 }
6273 curAddr = chunk.end();
6274 }
6275 // A successful mostly concurrent collection has been done.
6276 // Because only the full (i.e., concurrent mode failure) collections
6277 // are being measured for gc overhead limits, clean the "near" flag
6278 // and count.
6279 sp->reset_gc_overhead_limit_count();
6280 _collectorState = Idling;
6281 } else {
6282 // already have the lock
6283 assert(_collectorState == Resetting, "just checking");
6284 assert_lock_strong(bitMapLock());
6285 _markBitMap.clear_all();
6286 _collectorState = Idling;
6287 }
6289 // Stop incremental mode after a cycle completes, so that any future cycles
6290 // are triggered by allocation.
6291 stop_icms();
6293 NOT_PRODUCT(
6294 if (RotateCMSCollectionTypes) {
6295 _cmsGen->rotate_debug_collection_type();
6296 }
6297 )
6298 }
6300 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6301 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6302 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6303 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6304 TraceCollectorStats tcs(counters());
6306 switch (op) {
6307 case CMS_op_checkpointRootsInitial: {
6308 checkpointRootsInitial(true); // asynch
6309 if (PrintGC) {
6310 _cmsGen->printOccupancy("initial-mark");
6311 }
6312 break;
6313 }
6314 case CMS_op_checkpointRootsFinal: {
6315 checkpointRootsFinal(true, // asynch
6316 false, // !clear_all_soft_refs
6317 false); // !init_mark_was_synchronous
6318 if (PrintGC) {
6319 _cmsGen->printOccupancy("remark");
6320 }
6321 break;
6322 }
6323 default:
6324 fatal("No such CMS_op");
6325 }
6326 }
6328 #ifndef PRODUCT
6329 size_t const CMSCollector::skip_header_HeapWords() {
6330 return FreeChunk::header_size();
6331 }
6333 // Try and collect here conditions that should hold when
6334 // CMS thread is exiting. The idea is that the foreground GC
6335 // thread should not be blocked if it wants to terminate
6336 // the CMS thread and yet continue to run the VM for a while
6337 // after that.
6338 void CMSCollector::verify_ok_to_terminate() const {
6339 assert(Thread::current()->is_ConcurrentGC_thread(),
6340 "should be called by CMS thread");
6341 assert(!_foregroundGCShouldWait, "should be false");
6342 // We could check here that all the various low-level locks
6343 // are not held by the CMS thread, but that is overkill; see
6344 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6345 // is checked.
6346 }
6347 #endif
6349 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6350 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6351 "missing Printezis mark?");
6352 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6353 size_t size = pointer_delta(nextOneAddr + 1, addr);
6354 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6355 "alignment problem");
6356 assert(size >= 3, "Necessary for Printezis marks to work");
6357 return size;
6358 }
6360 // A variant of the above (block_size_using_printezis_bits()) except
6361 // that we return 0 if the P-bits are not yet set.
6362 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6363 if (_markBitMap.isMarked(addr)) {
6364 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6365 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6366 size_t size = pointer_delta(nextOneAddr + 1, addr);
6367 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6368 "alignment problem");
6369 assert(size >= 3, "Necessary for Printezis marks to work");
6370 return size;
6371 } else {
6372 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6373 return 0;
6374 }
6375 }
6377 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6378 size_t sz = 0;
6379 oop p = (oop)addr;
6380 if (p->klass_or_null() != NULL && p->is_parsable()) {
6381 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6382 } else {
6383 sz = block_size_using_printezis_bits(addr);
6384 }
6385 assert(sz > 0, "size must be nonzero");
6386 HeapWord* next_block = addr + sz;
6387 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6388 CardTableModRefBS::card_size);
6389 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6390 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6391 "must be different cards");
6392 return next_card;
6393 }
6396 // CMS Bit Map Wrapper /////////////////////////////////////////
6398 // Construct a CMS bit map infrastructure, but don't create the
6399 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6400 // further below.
6401 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6402 _bm(),
6403 _shifter(shifter),
6404 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6405 {
6406 _bmStartWord = 0;
6407 _bmWordSize = 0;
6408 }
6410 bool CMSBitMap::allocate(MemRegion mr) {
6411 _bmStartWord = mr.start();
6412 _bmWordSize = mr.word_size();
6413 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6414 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6415 if (!brs.is_reserved()) {
6416 warning("CMS bit map allocation failure");
6417 return false;
6418 }
6419 // For now we'll just commit all of the bit map up fromt.
6420 // Later on we'll try to be more parsimonious with swap.
6421 if (!_virtual_space.initialize(brs, brs.size())) {
6422 warning("CMS bit map backing store failure");
6423 return false;
6424 }
6425 assert(_virtual_space.committed_size() == brs.size(),
6426 "didn't reserve backing store for all of CMS bit map?");
6427 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6428 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6429 _bmWordSize, "inconsistency in bit map sizing");
6430 _bm.set_size(_bmWordSize >> _shifter);
6432 // bm.clear(); // can we rely on getting zero'd memory? verify below
6433 assert(isAllClear(),
6434 "Expected zero'd memory from ReservedSpace constructor");
6435 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6436 "consistency check");
6437 return true;
6438 }
6440 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6441 HeapWord *next_addr, *end_addr, *last_addr;
6442 assert_locked();
6443 assert(covers(mr), "out-of-range error");
6444 // XXX assert that start and end are appropriately aligned
6445 for (next_addr = mr.start(), end_addr = mr.end();
6446 next_addr < end_addr; next_addr = last_addr) {
6447 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6448 last_addr = dirty_region.end();
6449 if (!dirty_region.is_empty()) {
6450 cl->do_MemRegion(dirty_region);
6451 } else {
6452 assert(last_addr == end_addr, "program logic");
6453 return;
6454 }
6455 }
6456 }
6458 #ifndef PRODUCT
6459 void CMSBitMap::assert_locked() const {
6460 CMSLockVerifier::assert_locked(lock());
6461 }
6463 bool CMSBitMap::covers(MemRegion mr) const {
6464 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6465 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6466 "size inconsistency");
6467 return (mr.start() >= _bmStartWord) &&
6468 (mr.end() <= endWord());
6469 }
6471 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6472 return (start >= _bmStartWord && (start + size) <= endWord());
6473 }
6475 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6476 // verify that there are no 1 bits in the interval [left, right)
6477 FalseBitMapClosure falseBitMapClosure;
6478 iterate(&falseBitMapClosure, left, right);
6479 }
6481 void CMSBitMap::region_invariant(MemRegion mr)
6482 {
6483 assert_locked();
6484 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6485 assert(!mr.is_empty(), "unexpected empty region");
6486 assert(covers(mr), "mr should be covered by bit map");
6487 // convert address range into offset range
6488 size_t start_ofs = heapWordToOffset(mr.start());
6489 // Make sure that end() is appropriately aligned
6490 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6491 (1 << (_shifter+LogHeapWordSize))),
6492 "Misaligned mr.end()");
6493 size_t end_ofs = heapWordToOffset(mr.end());
6494 assert(end_ofs > start_ofs, "Should mark at least one bit");
6495 }
6497 #endif
6499 bool CMSMarkStack::allocate(size_t size) {
6500 // allocate a stack of the requisite depth
6501 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6502 size * sizeof(oop)));
6503 if (!rs.is_reserved()) {
6504 warning("CMSMarkStack allocation failure");
6505 return false;
6506 }
6507 if (!_virtual_space.initialize(rs, rs.size())) {
6508 warning("CMSMarkStack backing store failure");
6509 return false;
6510 }
6511 assert(_virtual_space.committed_size() == rs.size(),
6512 "didn't reserve backing store for all of CMS stack?");
6513 _base = (oop*)(_virtual_space.low());
6514 _index = 0;
6515 _capacity = size;
6516 NOT_PRODUCT(_max_depth = 0);
6517 return true;
6518 }
6520 // XXX FIX ME !!! In the MT case we come in here holding a
6521 // leaf lock. For printing we need to take a further lock
6522 // which has lower rank. We need to recallibrate the two
6523 // lock-ranks involved in order to be able to rpint the
6524 // messages below. (Or defer the printing to the caller.
6525 // For now we take the expedient path of just disabling the
6526 // messages for the problematic case.)
6527 void CMSMarkStack::expand() {
6528 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6529 if (_capacity == MarkStackSizeMax) {
6530 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6531 // We print a warning message only once per CMS cycle.
6532 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6533 }
6534 return;
6535 }
6536 // Double capacity if possible
6537 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6538 // Do not give up existing stack until we have managed to
6539 // get the double capacity that we desired.
6540 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6541 new_capacity * sizeof(oop)));
6542 if (rs.is_reserved()) {
6543 // Release the backing store associated with old stack
6544 _virtual_space.release();
6545 // Reinitialize virtual space for new stack
6546 if (!_virtual_space.initialize(rs, rs.size())) {
6547 fatal("Not enough swap for expanded marking stack");
6548 }
6549 _base = (oop*)(_virtual_space.low());
6550 _index = 0;
6551 _capacity = new_capacity;
6552 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6553 // Failed to double capacity, continue;
6554 // we print a detail message only once per CMS cycle.
6555 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6556 SIZE_FORMAT"K",
6557 _capacity / K, new_capacity / K);
6558 }
6559 }
6562 // Closures
6563 // XXX: there seems to be a lot of code duplication here;
6564 // should refactor and consolidate common code.
6566 // This closure is used to mark refs into the CMS generation in
6567 // the CMS bit map. Called at the first checkpoint. This closure
6568 // assumes that we do not need to re-mark dirty cards; if the CMS
6569 // generation on which this is used is not an oldest (modulo perm gen)
6570 // generation then this will lose younger_gen cards!
6572 MarkRefsIntoClosure::MarkRefsIntoClosure(
6573 MemRegion span, CMSBitMap* bitMap):
6574 _span(span),
6575 _bitMap(bitMap)
6576 {
6577 assert(_ref_processor == NULL, "deliberately left NULL");
6578 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6579 }
6581 void MarkRefsIntoClosure::do_oop(oop obj) {
6582 // if p points into _span, then mark corresponding bit in _markBitMap
6583 assert(obj->is_oop(), "expected an oop");
6584 HeapWord* addr = (HeapWord*)obj;
6585 if (_span.contains(addr)) {
6586 // this should be made more efficient
6587 _bitMap->mark(addr);
6588 }
6589 }
6591 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6592 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6594 // A variant of the above, used for CMS marking verification.
6595 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6596 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6597 _span(span),
6598 _verification_bm(verification_bm),
6599 _cms_bm(cms_bm)
6600 {
6601 assert(_ref_processor == NULL, "deliberately left NULL");
6602 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6603 }
6605 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6606 // if p points into _span, then mark corresponding bit in _markBitMap
6607 assert(obj->is_oop(), "expected an oop");
6608 HeapWord* addr = (HeapWord*)obj;
6609 if (_span.contains(addr)) {
6610 _verification_bm->mark(addr);
6611 if (!_cms_bm->isMarked(addr)) {
6612 oop(addr)->print();
6613 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6614 fatal("... aborting");
6615 }
6616 }
6617 }
6619 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6620 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6622 //////////////////////////////////////////////////
6623 // MarkRefsIntoAndScanClosure
6624 //////////////////////////////////////////////////
6626 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6627 ReferenceProcessor* rp,
6628 CMSBitMap* bit_map,
6629 CMSBitMap* mod_union_table,
6630 CMSMarkStack* mark_stack,
6631 CMSMarkStack* revisit_stack,
6632 CMSCollector* collector,
6633 bool should_yield,
6634 bool concurrent_precleaning):
6635 _collector(collector),
6636 _span(span),
6637 _bit_map(bit_map),
6638 _mark_stack(mark_stack),
6639 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6640 mark_stack, revisit_stack, concurrent_precleaning),
6641 _yield(should_yield),
6642 _concurrent_precleaning(concurrent_precleaning),
6643 _freelistLock(NULL)
6644 {
6645 _ref_processor = rp;
6646 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6647 }
6649 // This closure is used to mark refs into the CMS generation at the
6650 // second (final) checkpoint, and to scan and transitively follow
6651 // the unmarked oops. It is also used during the concurrent precleaning
6652 // phase while scanning objects on dirty cards in the CMS generation.
6653 // The marks are made in the marking bit map and the marking stack is
6654 // used for keeping the (newly) grey objects during the scan.
6655 // The parallel version (Par_...) appears further below.
6656 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6657 if (obj != NULL) {
6658 assert(obj->is_oop(), "expected an oop");
6659 HeapWord* addr = (HeapWord*)obj;
6660 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6661 assert(_collector->overflow_list_is_empty(),
6662 "overflow list should be empty");
6663 if (_span.contains(addr) &&
6664 !_bit_map->isMarked(addr)) {
6665 // mark bit map (object is now grey)
6666 _bit_map->mark(addr);
6667 // push on marking stack (stack should be empty), and drain the
6668 // stack by applying this closure to the oops in the oops popped
6669 // from the stack (i.e. blacken the grey objects)
6670 bool res = _mark_stack->push(obj);
6671 assert(res, "Should have space to push on empty stack");
6672 do {
6673 oop new_oop = _mark_stack->pop();
6674 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6675 assert(new_oop->is_parsable(), "Found unparsable oop");
6676 assert(_bit_map->isMarked((HeapWord*)new_oop),
6677 "only grey objects on this stack");
6678 // iterate over the oops in this oop, marking and pushing
6679 // the ones in CMS heap (i.e. in _span).
6680 new_oop->oop_iterate(&_pushAndMarkClosure);
6681 // check if it's time to yield
6682 do_yield_check();
6683 } while (!_mark_stack->isEmpty() ||
6684 (!_concurrent_precleaning && take_from_overflow_list()));
6685 // if marking stack is empty, and we are not doing this
6686 // during precleaning, then check the overflow list
6687 }
6688 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6689 assert(_collector->overflow_list_is_empty(),
6690 "overflow list was drained above");
6691 // We could restore evacuated mark words, if any, used for
6692 // overflow list links here because the overflow list is
6693 // provably empty here. That would reduce the maximum
6694 // size requirements for preserved_{oop,mark}_stack.
6695 // But we'll just postpone it until we are all done
6696 // so we can just stream through.
6697 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6698 _collector->restore_preserved_marks_if_any();
6699 assert(_collector->no_preserved_marks(), "No preserved marks");
6700 }
6701 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6702 "All preserved marks should have been restored above");
6703 }
6704 }
6706 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6707 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6709 void MarkRefsIntoAndScanClosure::do_yield_work() {
6710 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6711 "CMS thread should hold CMS token");
6712 assert_lock_strong(_freelistLock);
6713 assert_lock_strong(_bit_map->lock());
6714 // relinquish the free_list_lock and bitMaplock()
6715 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6716 _bit_map->lock()->unlock();
6717 _freelistLock->unlock();
6718 ConcurrentMarkSweepThread::desynchronize(true);
6719 ConcurrentMarkSweepThread::acknowledge_yield_request();
6720 _collector->stopTimer();
6721 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6722 if (PrintCMSStatistics != 0) {
6723 _collector->incrementYields();
6724 }
6725 _collector->icms_wait();
6727 // See the comment in coordinator_yield()
6728 for (unsigned i = 0;
6729 i < CMSYieldSleepCount &&
6730 ConcurrentMarkSweepThread::should_yield() &&
6731 !CMSCollector::foregroundGCIsActive();
6732 ++i) {
6733 os::sleep(Thread::current(), 1, false);
6734 ConcurrentMarkSweepThread::acknowledge_yield_request();
6735 }
6737 ConcurrentMarkSweepThread::synchronize(true);
6738 _freelistLock->lock_without_safepoint_check();
6739 _bit_map->lock()->lock_without_safepoint_check();
6740 _collector->startTimer();
6741 }
6743 ///////////////////////////////////////////////////////////
6744 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6745 // MarkRefsIntoAndScanClosure
6746 ///////////////////////////////////////////////////////////
6747 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6748 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6749 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6750 _span(span),
6751 _bit_map(bit_map),
6752 _work_queue(work_queue),
6753 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6754 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6755 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6756 revisit_stack)
6757 {
6758 _ref_processor = rp;
6759 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6760 }
6762 // This closure is used to mark refs into the CMS generation at the
6763 // second (final) checkpoint, and to scan and transitively follow
6764 // the unmarked oops. The marks are made in the marking bit map and
6765 // the work_queue is used for keeping the (newly) grey objects during
6766 // the scan phase whence they are also available for stealing by parallel
6767 // threads. Since the marking bit map is shared, updates are
6768 // synchronized (via CAS).
6769 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6770 if (obj != NULL) {
6771 // Ignore mark word because this could be an already marked oop
6772 // that may be chained at the end of the overflow list.
6773 assert(obj->is_oop(true), "expected an oop");
6774 HeapWord* addr = (HeapWord*)obj;
6775 if (_span.contains(addr) &&
6776 !_bit_map->isMarked(addr)) {
6777 // mark bit map (object will become grey):
6778 // It is possible for several threads to be
6779 // trying to "claim" this object concurrently;
6780 // the unique thread that succeeds in marking the
6781 // object first will do the subsequent push on
6782 // to the work queue (or overflow list).
6783 if (_bit_map->par_mark(addr)) {
6784 // push on work_queue (which may not be empty), and trim the
6785 // queue to an appropriate length by applying this closure to
6786 // the oops in the oops popped from the stack (i.e. blacken the
6787 // grey objects)
6788 bool res = _work_queue->push(obj);
6789 assert(res, "Low water mark should be less than capacity?");
6790 trim_queue(_low_water_mark);
6791 } // Else, another thread claimed the object
6792 }
6793 }
6794 }
6796 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6797 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6799 // This closure is used to rescan the marked objects on the dirty cards
6800 // in the mod union table and the card table proper.
6801 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6802 oop p, MemRegion mr) {
6804 size_t size = 0;
6805 HeapWord* addr = (HeapWord*)p;
6806 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6807 assert(_span.contains(addr), "we are scanning the CMS generation");
6808 // check if it's time to yield
6809 if (do_yield_check()) {
6810 // We yielded for some foreground stop-world work,
6811 // and we have been asked to abort this ongoing preclean cycle.
6812 return 0;
6813 }
6814 if (_bitMap->isMarked(addr)) {
6815 // it's marked; is it potentially uninitialized?
6816 if (p->klass_or_null() != NULL) {
6817 // If is_conc_safe is false, the object may be undergoing
6818 // change by the VM outside a safepoint. Don't try to
6819 // scan it, but rather leave it for the remark phase.
6820 if (CMSPermGenPrecleaningEnabled &&
6821 (!p->is_conc_safe() || !p->is_parsable())) {
6822 // Signal precleaning to redirty the card since
6823 // the klass pointer is already installed.
6824 assert(size == 0, "Initial value");
6825 } else {
6826 assert(p->is_parsable(), "must be parsable.");
6827 // an initialized object; ignore mark word in verification below
6828 // since we are running concurrent with mutators
6829 assert(p->is_oop(true), "should be an oop");
6830 if (p->is_objArray()) {
6831 // objArrays are precisely marked; restrict scanning
6832 // to dirty cards only.
6833 size = CompactibleFreeListSpace::adjustObjectSize(
6834 p->oop_iterate(_scanningClosure, mr));
6835 } else {
6836 // A non-array may have been imprecisely marked; we need
6837 // to scan object in its entirety.
6838 size = CompactibleFreeListSpace::adjustObjectSize(
6839 p->oop_iterate(_scanningClosure));
6840 }
6841 #ifdef DEBUG
6842 size_t direct_size =
6843 CompactibleFreeListSpace::adjustObjectSize(p->size());
6844 assert(size == direct_size, "Inconsistency in size");
6845 assert(size >= 3, "Necessary for Printezis marks to work");
6846 if (!_bitMap->isMarked(addr+1)) {
6847 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6848 } else {
6849 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6850 assert(_bitMap->isMarked(addr+size-1),
6851 "inconsistent Printezis mark");
6852 }
6853 #endif // DEBUG
6854 }
6855 } else {
6856 // an unitialized object
6857 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6858 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6859 size = pointer_delta(nextOneAddr + 1, addr);
6860 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6861 "alignment problem");
6862 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6863 // will dirty the card when the klass pointer is installed in the
6864 // object (signalling the completion of initialization).
6865 }
6866 } else {
6867 // Either a not yet marked object or an uninitialized object
6868 if (p->klass_or_null() == NULL || !p->is_parsable()) {
6869 // An uninitialized object, skip to the next card, since
6870 // we may not be able to read its P-bits yet.
6871 assert(size == 0, "Initial value");
6872 } else {
6873 // An object not (yet) reached by marking: we merely need to
6874 // compute its size so as to go look at the next block.
6875 assert(p->is_oop(true), "should be an oop");
6876 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6877 }
6878 }
6879 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6880 return size;
6881 }
6883 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6884 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6885 "CMS thread should hold CMS token");
6886 assert_lock_strong(_freelistLock);
6887 assert_lock_strong(_bitMap->lock());
6888 DEBUG_ONLY(RememberKlassesChecker mux(false);)
6889 // relinquish the free_list_lock and bitMaplock()
6890 _bitMap->lock()->unlock();
6891 _freelistLock->unlock();
6892 ConcurrentMarkSweepThread::desynchronize(true);
6893 ConcurrentMarkSweepThread::acknowledge_yield_request();
6894 _collector->stopTimer();
6895 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6896 if (PrintCMSStatistics != 0) {
6897 _collector->incrementYields();
6898 }
6899 _collector->icms_wait();
6901 // See the comment in coordinator_yield()
6902 for (unsigned i = 0; i < CMSYieldSleepCount &&
6903 ConcurrentMarkSweepThread::should_yield() &&
6904 !CMSCollector::foregroundGCIsActive(); ++i) {
6905 os::sleep(Thread::current(), 1, false);
6906 ConcurrentMarkSweepThread::acknowledge_yield_request();
6907 }
6909 ConcurrentMarkSweepThread::synchronize(true);
6910 _freelistLock->lock_without_safepoint_check();
6911 _bitMap->lock()->lock_without_safepoint_check();
6912 _collector->startTimer();
6913 }
6916 //////////////////////////////////////////////////////////////////
6917 // SurvivorSpacePrecleanClosure
6918 //////////////////////////////////////////////////////////////////
6919 // This (single-threaded) closure is used to preclean the oops in
6920 // the survivor spaces.
6921 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6923 HeapWord* addr = (HeapWord*)p;
6924 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6925 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6926 assert(p->klass_or_null() != NULL, "object should be initializd");
6927 assert(p->is_parsable(), "must be parsable.");
6928 // an initialized object; ignore mark word in verification below
6929 // since we are running concurrent with mutators
6930 assert(p->is_oop(true), "should be an oop");
6931 // Note that we do not yield while we iterate over
6932 // the interior oops of p, pushing the relevant ones
6933 // on our marking stack.
6934 size_t size = p->oop_iterate(_scanning_closure);
6935 do_yield_check();
6936 // Observe that below, we do not abandon the preclean
6937 // phase as soon as we should; rather we empty the
6938 // marking stack before returning. This is to satisfy
6939 // some existing assertions. In general, it may be a
6940 // good idea to abort immediately and complete the marking
6941 // from the grey objects at a later time.
6942 while (!_mark_stack->isEmpty()) {
6943 oop new_oop = _mark_stack->pop();
6944 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6945 assert(new_oop->is_parsable(), "Found unparsable oop");
6946 assert(_bit_map->isMarked((HeapWord*)new_oop),
6947 "only grey objects on this stack");
6948 // iterate over the oops in this oop, marking and pushing
6949 // the ones in CMS heap (i.e. in _span).
6950 new_oop->oop_iterate(_scanning_closure);
6951 // check if it's time to yield
6952 do_yield_check();
6953 }
6954 unsigned int after_count =
6955 GenCollectedHeap::heap()->total_collections();
6956 bool abort = (_before_count != after_count) ||
6957 _collector->should_abort_preclean();
6958 return abort ? 0 : size;
6959 }
6961 void SurvivorSpacePrecleanClosure::do_yield_work() {
6962 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6963 "CMS thread should hold CMS token");
6964 assert_lock_strong(_bit_map->lock());
6965 DEBUG_ONLY(RememberKlassesChecker smx(false);)
6966 // Relinquish the bit map lock
6967 _bit_map->lock()->unlock();
6968 ConcurrentMarkSweepThread::desynchronize(true);
6969 ConcurrentMarkSweepThread::acknowledge_yield_request();
6970 _collector->stopTimer();
6971 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6972 if (PrintCMSStatistics != 0) {
6973 _collector->incrementYields();
6974 }
6975 _collector->icms_wait();
6977 // See the comment in coordinator_yield()
6978 for (unsigned i = 0; i < CMSYieldSleepCount &&
6979 ConcurrentMarkSweepThread::should_yield() &&
6980 !CMSCollector::foregroundGCIsActive(); ++i) {
6981 os::sleep(Thread::current(), 1, false);
6982 ConcurrentMarkSweepThread::acknowledge_yield_request();
6983 }
6985 ConcurrentMarkSweepThread::synchronize(true);
6986 _bit_map->lock()->lock_without_safepoint_check();
6987 _collector->startTimer();
6988 }
6990 // This closure is used to rescan the marked objects on the dirty cards
6991 // in the mod union table and the card table proper. In the parallel
6992 // case, although the bitMap is shared, we do a single read so the
6993 // isMarked() query is "safe".
6994 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6995 // Ignore mark word because we are running concurrent with mutators
6996 assert(p->is_oop_or_null(true), "expected an oop or null");
6997 HeapWord* addr = (HeapWord*)p;
6998 assert(_span.contains(addr), "we are scanning the CMS generation");
6999 bool is_obj_array = false;
7000 #ifdef DEBUG
7001 if (!_parallel) {
7002 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7003 assert(_collector->overflow_list_is_empty(),
7004 "overflow list should be empty");
7006 }
7007 #endif // DEBUG
7008 if (_bit_map->isMarked(addr)) {
7009 // Obj arrays are precisely marked, non-arrays are not;
7010 // so we scan objArrays precisely and non-arrays in their
7011 // entirety.
7012 if (p->is_objArray()) {
7013 is_obj_array = true;
7014 if (_parallel) {
7015 p->oop_iterate(_par_scan_closure, mr);
7016 } else {
7017 p->oop_iterate(_scan_closure, mr);
7018 }
7019 } else {
7020 if (_parallel) {
7021 p->oop_iterate(_par_scan_closure);
7022 } else {
7023 p->oop_iterate(_scan_closure);
7024 }
7025 }
7026 }
7027 #ifdef DEBUG
7028 if (!_parallel) {
7029 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7030 assert(_collector->overflow_list_is_empty(),
7031 "overflow list should be empty");
7033 }
7034 #endif // DEBUG
7035 return is_obj_array;
7036 }
7038 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7039 MemRegion span,
7040 CMSBitMap* bitMap, CMSMarkStack* markStack,
7041 CMSMarkStack* revisitStack,
7042 bool should_yield, bool verifying):
7043 _collector(collector),
7044 _span(span),
7045 _bitMap(bitMap),
7046 _mut(&collector->_modUnionTable),
7047 _markStack(markStack),
7048 _revisitStack(revisitStack),
7049 _yield(should_yield),
7050 _skipBits(0)
7051 {
7052 assert(_markStack->isEmpty(), "stack should be empty");
7053 _finger = _bitMap->startWord();
7054 _threshold = _finger;
7055 assert(_collector->_restart_addr == NULL, "Sanity check");
7056 assert(_span.contains(_finger), "Out of bounds _finger?");
7057 DEBUG_ONLY(_verifying = verifying;)
7058 }
7060 void MarkFromRootsClosure::reset(HeapWord* addr) {
7061 assert(_markStack->isEmpty(), "would cause duplicates on stack");
7062 assert(_span.contains(addr), "Out of bounds _finger?");
7063 _finger = addr;
7064 _threshold = (HeapWord*)round_to(
7065 (intptr_t)_finger, CardTableModRefBS::card_size);
7066 }
7068 // Should revisit to see if this should be restructured for
7069 // greater efficiency.
7070 bool MarkFromRootsClosure::do_bit(size_t offset) {
7071 if (_skipBits > 0) {
7072 _skipBits--;
7073 return true;
7074 }
7075 // convert offset into a HeapWord*
7076 HeapWord* addr = _bitMap->startWord() + offset;
7077 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7078 "address out of range");
7079 assert(_bitMap->isMarked(addr), "tautology");
7080 if (_bitMap->isMarked(addr+1)) {
7081 // this is an allocated but not yet initialized object
7082 assert(_skipBits == 0, "tautology");
7083 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
7084 oop p = oop(addr);
7085 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7086 DEBUG_ONLY(if (!_verifying) {)
7087 // We re-dirty the cards on which this object lies and increase
7088 // the _threshold so that we'll come back to scan this object
7089 // during the preclean or remark phase. (CMSCleanOnEnter)
7090 if (CMSCleanOnEnter) {
7091 size_t sz = _collector->block_size_using_printezis_bits(addr);
7092 HeapWord* end_card_addr = (HeapWord*)round_to(
7093 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7094 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7095 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7096 // Bump _threshold to end_card_addr; note that
7097 // _threshold cannot possibly exceed end_card_addr, anyhow.
7098 // This prevents future clearing of the card as the scan proceeds
7099 // to the right.
7100 assert(_threshold <= end_card_addr,
7101 "Because we are just scanning into this object");
7102 if (_threshold < end_card_addr) {
7103 _threshold = end_card_addr;
7104 }
7105 if (p->klass_or_null() != NULL) {
7106 // Redirty the range of cards...
7107 _mut->mark_range(redirty_range);
7108 } // ...else the setting of klass will dirty the card anyway.
7109 }
7110 DEBUG_ONLY(})
7111 return true;
7112 }
7113 }
7114 scanOopsInOop(addr);
7115 return true;
7116 }
7118 // We take a break if we've been at this for a while,
7119 // so as to avoid monopolizing the locks involved.
7120 void MarkFromRootsClosure::do_yield_work() {
7121 // First give up the locks, then yield, then re-lock
7122 // We should probably use a constructor/destructor idiom to
7123 // do this unlock/lock or modify the MutexUnlocker class to
7124 // serve our purpose. XXX
7125 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7126 "CMS thread should hold CMS token");
7127 assert_lock_strong(_bitMap->lock());
7128 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7129 _bitMap->lock()->unlock();
7130 ConcurrentMarkSweepThread::desynchronize(true);
7131 ConcurrentMarkSweepThread::acknowledge_yield_request();
7132 _collector->stopTimer();
7133 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7134 if (PrintCMSStatistics != 0) {
7135 _collector->incrementYields();
7136 }
7137 _collector->icms_wait();
7139 // See the comment in coordinator_yield()
7140 for (unsigned i = 0; i < CMSYieldSleepCount &&
7141 ConcurrentMarkSweepThread::should_yield() &&
7142 !CMSCollector::foregroundGCIsActive(); ++i) {
7143 os::sleep(Thread::current(), 1, false);
7144 ConcurrentMarkSweepThread::acknowledge_yield_request();
7145 }
7147 ConcurrentMarkSweepThread::synchronize(true);
7148 _bitMap->lock()->lock_without_safepoint_check();
7149 _collector->startTimer();
7150 }
7152 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7153 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7154 assert(_markStack->isEmpty(),
7155 "should drain stack to limit stack usage");
7156 // convert ptr to an oop preparatory to scanning
7157 oop obj = oop(ptr);
7158 // Ignore mark word in verification below, since we
7159 // may be running concurrent with mutators.
7160 assert(obj->is_oop(true), "should be an oop");
7161 assert(_finger <= ptr, "_finger runneth ahead");
7162 // advance the finger to right end of this object
7163 _finger = ptr + obj->size();
7164 assert(_finger > ptr, "we just incremented it above");
7165 // On large heaps, it may take us some time to get through
7166 // the marking phase (especially if running iCMS). During
7167 // this time it's possible that a lot of mutations have
7168 // accumulated in the card table and the mod union table --
7169 // these mutation records are redundant until we have
7170 // actually traced into the corresponding card.
7171 // Here, we check whether advancing the finger would make
7172 // us cross into a new card, and if so clear corresponding
7173 // cards in the MUT (preclean them in the card-table in the
7174 // future).
7176 DEBUG_ONLY(if (!_verifying) {)
7177 // The clean-on-enter optimization is disabled by default,
7178 // until we fix 6178663.
7179 if (CMSCleanOnEnter && (_finger > _threshold)) {
7180 // [_threshold, _finger) represents the interval
7181 // of cards to be cleared in MUT (or precleaned in card table).
7182 // The set of cards to be cleared is all those that overlap
7183 // with the interval [_threshold, _finger); note that
7184 // _threshold is always kept card-aligned but _finger isn't
7185 // always card-aligned.
7186 HeapWord* old_threshold = _threshold;
7187 assert(old_threshold == (HeapWord*)round_to(
7188 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7189 "_threshold should always be card-aligned");
7190 _threshold = (HeapWord*)round_to(
7191 (intptr_t)_finger, CardTableModRefBS::card_size);
7192 MemRegion mr(old_threshold, _threshold);
7193 assert(!mr.is_empty(), "Control point invariant");
7194 assert(_span.contains(mr), "Should clear within span");
7195 // XXX When _finger crosses from old gen into perm gen
7196 // we may be doing unnecessary cleaning; do better in the
7197 // future by detecting that condition and clearing fewer
7198 // MUT/CT entries.
7199 _mut->clear_range(mr);
7200 }
7201 DEBUG_ONLY(})
7202 // Note: the finger doesn't advance while we drain
7203 // the stack below.
7204 PushOrMarkClosure pushOrMarkClosure(_collector,
7205 _span, _bitMap, _markStack,
7206 _revisitStack,
7207 _finger, this);
7208 bool res = _markStack->push(obj);
7209 assert(res, "Empty non-zero size stack should have space for single push");
7210 while (!_markStack->isEmpty()) {
7211 oop new_oop = _markStack->pop();
7212 // Skip verifying header mark word below because we are
7213 // running concurrent with mutators.
7214 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7215 // now scan this oop's oops
7216 new_oop->oop_iterate(&pushOrMarkClosure);
7217 do_yield_check();
7218 }
7219 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7220 }
7222 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7223 CMSCollector* collector, MemRegion span,
7224 CMSBitMap* bit_map,
7225 OopTaskQueue* work_queue,
7226 CMSMarkStack* overflow_stack,
7227 CMSMarkStack* revisit_stack,
7228 bool should_yield):
7229 _collector(collector),
7230 _whole_span(collector->_span),
7231 _span(span),
7232 _bit_map(bit_map),
7233 _mut(&collector->_modUnionTable),
7234 _work_queue(work_queue),
7235 _overflow_stack(overflow_stack),
7236 _revisit_stack(revisit_stack),
7237 _yield(should_yield),
7238 _skip_bits(0),
7239 _task(task)
7240 {
7241 assert(_work_queue->size() == 0, "work_queue should be empty");
7242 _finger = span.start();
7243 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7244 assert(_span.contains(_finger), "Out of bounds _finger?");
7245 }
7247 // Should revisit to see if this should be restructured for
7248 // greater efficiency.
7249 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7250 if (_skip_bits > 0) {
7251 _skip_bits--;
7252 return true;
7253 }
7254 // convert offset into a HeapWord*
7255 HeapWord* addr = _bit_map->startWord() + offset;
7256 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7257 "address out of range");
7258 assert(_bit_map->isMarked(addr), "tautology");
7259 if (_bit_map->isMarked(addr+1)) {
7260 // this is an allocated object that might not yet be initialized
7261 assert(_skip_bits == 0, "tautology");
7262 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7263 oop p = oop(addr);
7264 if (p->klass_or_null() == NULL || !p->is_parsable()) {
7265 // in the case of Clean-on-Enter optimization, redirty card
7266 // and avoid clearing card by increasing the threshold.
7267 return true;
7268 }
7269 }
7270 scan_oops_in_oop(addr);
7271 return true;
7272 }
7274 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7275 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7276 // Should we assert that our work queue is empty or
7277 // below some drain limit?
7278 assert(_work_queue->size() == 0,
7279 "should drain stack to limit stack usage");
7280 // convert ptr to an oop preparatory to scanning
7281 oop obj = oop(ptr);
7282 // Ignore mark word in verification below, since we
7283 // may be running concurrent with mutators.
7284 assert(obj->is_oop(true), "should be an oop");
7285 assert(_finger <= ptr, "_finger runneth ahead");
7286 // advance the finger to right end of this object
7287 _finger = ptr + obj->size();
7288 assert(_finger > ptr, "we just incremented it above");
7289 // On large heaps, it may take us some time to get through
7290 // the marking phase (especially if running iCMS). During
7291 // this time it's possible that a lot of mutations have
7292 // accumulated in the card table and the mod union table --
7293 // these mutation records are redundant until we have
7294 // actually traced into the corresponding card.
7295 // Here, we check whether advancing the finger would make
7296 // us cross into a new card, and if so clear corresponding
7297 // cards in the MUT (preclean them in the card-table in the
7298 // future).
7300 // The clean-on-enter optimization is disabled by default,
7301 // until we fix 6178663.
7302 if (CMSCleanOnEnter && (_finger > _threshold)) {
7303 // [_threshold, _finger) represents the interval
7304 // of cards to be cleared in MUT (or precleaned in card table).
7305 // The set of cards to be cleared is all those that overlap
7306 // with the interval [_threshold, _finger); note that
7307 // _threshold is always kept card-aligned but _finger isn't
7308 // always card-aligned.
7309 HeapWord* old_threshold = _threshold;
7310 assert(old_threshold == (HeapWord*)round_to(
7311 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7312 "_threshold should always be card-aligned");
7313 _threshold = (HeapWord*)round_to(
7314 (intptr_t)_finger, CardTableModRefBS::card_size);
7315 MemRegion mr(old_threshold, _threshold);
7316 assert(!mr.is_empty(), "Control point invariant");
7317 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7318 // XXX When _finger crosses from old gen into perm gen
7319 // we may be doing unnecessary cleaning; do better in the
7320 // future by detecting that condition and clearing fewer
7321 // MUT/CT entries.
7322 _mut->clear_range(mr);
7323 }
7325 // Note: the local finger doesn't advance while we drain
7326 // the stack below, but the global finger sure can and will.
7327 HeapWord** gfa = _task->global_finger_addr();
7328 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7329 _span, _bit_map,
7330 _work_queue,
7331 _overflow_stack,
7332 _revisit_stack,
7333 _finger,
7334 gfa, this);
7335 bool res = _work_queue->push(obj); // overflow could occur here
7336 assert(res, "Will hold once we use workqueues");
7337 while (true) {
7338 oop new_oop;
7339 if (!_work_queue->pop_local(new_oop)) {
7340 // We emptied our work_queue; check if there's stuff that can
7341 // be gotten from the overflow stack.
7342 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7343 _overflow_stack, _work_queue)) {
7344 do_yield_check();
7345 continue;
7346 } else { // done
7347 break;
7348 }
7349 }
7350 // Skip verifying header mark word below because we are
7351 // running concurrent with mutators.
7352 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7353 // now scan this oop's oops
7354 new_oop->oop_iterate(&pushOrMarkClosure);
7355 do_yield_check();
7356 }
7357 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7358 }
7360 // Yield in response to a request from VM Thread or
7361 // from mutators.
7362 void Par_MarkFromRootsClosure::do_yield_work() {
7363 assert(_task != NULL, "sanity");
7364 _task->yield();
7365 }
7367 // A variant of the above used for verifying CMS marking work.
7368 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7369 MemRegion span,
7370 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7371 CMSMarkStack* mark_stack):
7372 _collector(collector),
7373 _span(span),
7374 _verification_bm(verification_bm),
7375 _cms_bm(cms_bm),
7376 _mark_stack(mark_stack),
7377 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7378 mark_stack)
7379 {
7380 assert(_mark_stack->isEmpty(), "stack should be empty");
7381 _finger = _verification_bm->startWord();
7382 assert(_collector->_restart_addr == NULL, "Sanity check");
7383 assert(_span.contains(_finger), "Out of bounds _finger?");
7384 }
7386 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7387 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7388 assert(_span.contains(addr), "Out of bounds _finger?");
7389 _finger = addr;
7390 }
7392 // Should revisit to see if this should be restructured for
7393 // greater efficiency.
7394 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7395 // convert offset into a HeapWord*
7396 HeapWord* addr = _verification_bm->startWord() + offset;
7397 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7398 "address out of range");
7399 assert(_verification_bm->isMarked(addr), "tautology");
7400 assert(_cms_bm->isMarked(addr), "tautology");
7402 assert(_mark_stack->isEmpty(),
7403 "should drain stack to limit stack usage");
7404 // convert addr to an oop preparatory to scanning
7405 oop obj = oop(addr);
7406 assert(obj->is_oop(), "should be an oop");
7407 assert(_finger <= addr, "_finger runneth ahead");
7408 // advance the finger to right end of this object
7409 _finger = addr + obj->size();
7410 assert(_finger > addr, "we just incremented it above");
7411 // Note: the finger doesn't advance while we drain
7412 // the stack below.
7413 bool res = _mark_stack->push(obj);
7414 assert(res, "Empty non-zero size stack should have space for single push");
7415 while (!_mark_stack->isEmpty()) {
7416 oop new_oop = _mark_stack->pop();
7417 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7418 // now scan this oop's oops
7419 new_oop->oop_iterate(&_pam_verify_closure);
7420 }
7421 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7422 return true;
7423 }
7425 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7426 CMSCollector* collector, MemRegion span,
7427 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7428 CMSMarkStack* mark_stack):
7429 OopClosure(collector->ref_processor()),
7430 _collector(collector),
7431 _span(span),
7432 _verification_bm(verification_bm),
7433 _cms_bm(cms_bm),
7434 _mark_stack(mark_stack)
7435 { }
7437 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7438 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7440 // Upon stack overflow, we discard (part of) the stack,
7441 // remembering the least address amongst those discarded
7442 // in CMSCollector's _restart_address.
7443 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7444 // Remember the least grey address discarded
7445 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7446 _collector->lower_restart_addr(ra);
7447 _mark_stack->reset(); // discard stack contents
7448 _mark_stack->expand(); // expand the stack if possible
7449 }
7451 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7452 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7453 HeapWord* addr = (HeapWord*)obj;
7454 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7455 // Oop lies in _span and isn't yet grey or black
7456 _verification_bm->mark(addr); // now grey
7457 if (!_cms_bm->isMarked(addr)) {
7458 oop(addr)->print();
7459 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7460 addr);
7461 fatal("... aborting");
7462 }
7464 if (!_mark_stack->push(obj)) { // stack overflow
7465 if (PrintCMSStatistics != 0) {
7466 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7467 SIZE_FORMAT, _mark_stack->capacity());
7468 }
7469 assert(_mark_stack->isFull(), "Else push should have succeeded");
7470 handle_stack_overflow(addr);
7471 }
7472 // anything including and to the right of _finger
7473 // will be scanned as we iterate over the remainder of the
7474 // bit map
7475 }
7476 }
7478 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7479 MemRegion span,
7480 CMSBitMap* bitMap, CMSMarkStack* markStack,
7481 CMSMarkStack* revisitStack,
7482 HeapWord* finger, MarkFromRootsClosure* parent) :
7483 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7484 _span(span),
7485 _bitMap(bitMap),
7486 _markStack(markStack),
7487 _finger(finger),
7488 _parent(parent)
7489 { }
7491 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7492 MemRegion span,
7493 CMSBitMap* bit_map,
7494 OopTaskQueue* work_queue,
7495 CMSMarkStack* overflow_stack,
7496 CMSMarkStack* revisit_stack,
7497 HeapWord* finger,
7498 HeapWord** global_finger_addr,
7499 Par_MarkFromRootsClosure* parent) :
7500 Par_KlassRememberingOopClosure(collector,
7501 collector->ref_processor(),
7502 revisit_stack),
7503 _whole_span(collector->_span),
7504 _span(span),
7505 _bit_map(bit_map),
7506 _work_queue(work_queue),
7507 _overflow_stack(overflow_stack),
7508 _finger(finger),
7509 _global_finger_addr(global_finger_addr),
7510 _parent(parent)
7511 { }
7513 // Assumes thread-safe access by callers, who are
7514 // responsible for mutual exclusion.
7515 void CMSCollector::lower_restart_addr(HeapWord* low) {
7516 assert(_span.contains(low), "Out of bounds addr");
7517 if (_restart_addr == NULL) {
7518 _restart_addr = low;
7519 } else {
7520 _restart_addr = MIN2(_restart_addr, low);
7521 }
7522 }
7524 // Upon stack overflow, we discard (part of) the stack,
7525 // remembering the least address amongst those discarded
7526 // in CMSCollector's _restart_address.
7527 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7528 // Remember the least grey address discarded
7529 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7530 _collector->lower_restart_addr(ra);
7531 _markStack->reset(); // discard stack contents
7532 _markStack->expand(); // expand the stack if possible
7533 }
7535 // Upon stack overflow, we discard (part of) the stack,
7536 // remembering the least address amongst those discarded
7537 // in CMSCollector's _restart_address.
7538 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7539 // We need to do this under a mutex to prevent other
7540 // workers from interfering with the work done below.
7541 MutexLockerEx ml(_overflow_stack->par_lock(),
7542 Mutex::_no_safepoint_check_flag);
7543 // Remember the least grey address discarded
7544 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7545 _collector->lower_restart_addr(ra);
7546 _overflow_stack->reset(); // discard stack contents
7547 _overflow_stack->expand(); // expand the stack if possible
7548 }
7550 void PushOrMarkClosure::do_oop(oop obj) {
7551 // Ignore mark word because we are running concurrent with mutators.
7552 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7553 HeapWord* addr = (HeapWord*)obj;
7554 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7555 // Oop lies in _span and isn't yet grey or black
7556 _bitMap->mark(addr); // now grey
7557 if (addr < _finger) {
7558 // the bit map iteration has already either passed, or
7559 // sampled, this bit in the bit map; we'll need to
7560 // use the marking stack to scan this oop's oops.
7561 bool simulate_overflow = false;
7562 NOT_PRODUCT(
7563 if (CMSMarkStackOverflowALot &&
7564 _collector->simulate_overflow()) {
7565 // simulate a stack overflow
7566 simulate_overflow = true;
7567 }
7568 )
7569 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7570 if (PrintCMSStatistics != 0) {
7571 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7572 SIZE_FORMAT, _markStack->capacity());
7573 }
7574 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7575 handle_stack_overflow(addr);
7576 }
7577 }
7578 // anything including and to the right of _finger
7579 // will be scanned as we iterate over the remainder of the
7580 // bit map
7581 do_yield_check();
7582 }
7583 }
7585 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7586 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7588 void Par_PushOrMarkClosure::do_oop(oop obj) {
7589 // Ignore mark word because we are running concurrent with mutators.
7590 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7591 HeapWord* addr = (HeapWord*)obj;
7592 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7593 // Oop lies in _span and isn't yet grey or black
7594 // We read the global_finger (volatile read) strictly after marking oop
7595 bool res = _bit_map->par_mark(addr); // now grey
7596 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7597 // Should we push this marked oop on our stack?
7598 // -- if someone else marked it, nothing to do
7599 // -- if target oop is above global finger nothing to do
7600 // -- if target oop is in chunk and above local finger
7601 // then nothing to do
7602 // -- else push on work queue
7603 if ( !res // someone else marked it, they will deal with it
7604 || (addr >= *gfa) // will be scanned in a later task
7605 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7606 return;
7607 }
7608 // the bit map iteration has already either passed, or
7609 // sampled, this bit in the bit map; we'll need to
7610 // use the marking stack to scan this oop's oops.
7611 bool simulate_overflow = false;
7612 NOT_PRODUCT(
7613 if (CMSMarkStackOverflowALot &&
7614 _collector->simulate_overflow()) {
7615 // simulate a stack overflow
7616 simulate_overflow = true;
7617 }
7618 )
7619 if (simulate_overflow ||
7620 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7621 // stack overflow
7622 if (PrintCMSStatistics != 0) {
7623 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7624 SIZE_FORMAT, _overflow_stack->capacity());
7625 }
7626 // We cannot assert that the overflow stack is full because
7627 // it may have been emptied since.
7628 assert(simulate_overflow ||
7629 _work_queue->size() == _work_queue->max_elems(),
7630 "Else push should have succeeded");
7631 handle_stack_overflow(addr);
7632 }
7633 do_yield_check();
7634 }
7635 }
7637 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7638 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7640 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7641 ReferenceProcessor* rp,
7642 CMSMarkStack* revisit_stack) :
7643 OopClosure(rp),
7644 _collector(collector),
7645 _revisit_stack(revisit_stack),
7646 _should_remember_klasses(collector->should_unload_classes()) {}
7648 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7649 MemRegion span,
7650 ReferenceProcessor* rp,
7651 CMSBitMap* bit_map,
7652 CMSBitMap* mod_union_table,
7653 CMSMarkStack* mark_stack,
7654 CMSMarkStack* revisit_stack,
7655 bool concurrent_precleaning):
7656 KlassRememberingOopClosure(collector, rp, revisit_stack),
7657 _span(span),
7658 _bit_map(bit_map),
7659 _mod_union_table(mod_union_table),
7660 _mark_stack(mark_stack),
7661 _concurrent_precleaning(concurrent_precleaning)
7662 {
7663 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7664 }
7666 // Grey object rescan during pre-cleaning and second checkpoint phases --
7667 // the non-parallel version (the parallel version appears further below.)
7668 void PushAndMarkClosure::do_oop(oop obj) {
7669 // Ignore mark word verification. If during concurrent precleaning,
7670 // the object monitor may be locked. If during the checkpoint
7671 // phases, the object may already have been reached by a different
7672 // path and may be at the end of the global overflow list (so
7673 // the mark word may be NULL).
7674 assert(obj->is_oop_or_null(true /* ignore mark word */),
7675 "expected an oop or NULL");
7676 HeapWord* addr = (HeapWord*)obj;
7677 // Check if oop points into the CMS generation
7678 // and is not marked
7679 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7680 // a white object ...
7681 _bit_map->mark(addr); // ... now grey
7682 // push on the marking stack (grey set)
7683 bool simulate_overflow = false;
7684 NOT_PRODUCT(
7685 if (CMSMarkStackOverflowALot &&
7686 _collector->simulate_overflow()) {
7687 // simulate a stack overflow
7688 simulate_overflow = true;
7689 }
7690 )
7691 if (simulate_overflow || !_mark_stack->push(obj)) {
7692 if (_concurrent_precleaning) {
7693 // During precleaning we can just dirty the appropriate card(s)
7694 // in the mod union table, thus ensuring that the object remains
7695 // in the grey set and continue. In the case of object arrays
7696 // we need to dirty all of the cards that the object spans,
7697 // since the rescan of object arrays will be limited to the
7698 // dirty cards.
7699 // Note that no one can be intefering with us in this action
7700 // of dirtying the mod union table, so no locking or atomics
7701 // are required.
7702 if (obj->is_objArray()) {
7703 size_t sz = obj->size();
7704 HeapWord* end_card_addr = (HeapWord*)round_to(
7705 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7706 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7707 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7708 _mod_union_table->mark_range(redirty_range);
7709 } else {
7710 _mod_union_table->mark(addr);
7711 }
7712 _collector->_ser_pmc_preclean_ovflw++;
7713 } else {
7714 // During the remark phase, we need to remember this oop
7715 // in the overflow list.
7716 _collector->push_on_overflow_list(obj);
7717 _collector->_ser_pmc_remark_ovflw++;
7718 }
7719 }
7720 }
7721 }
7723 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7724 MemRegion span,
7725 ReferenceProcessor* rp,
7726 CMSBitMap* bit_map,
7727 OopTaskQueue* work_queue,
7728 CMSMarkStack* revisit_stack):
7729 Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7730 _span(span),
7731 _bit_map(bit_map),
7732 _work_queue(work_queue)
7733 {
7734 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7735 }
7737 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7738 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7740 // Grey object rescan during second checkpoint phase --
7741 // the parallel version.
7742 void Par_PushAndMarkClosure::do_oop(oop obj) {
7743 // In the assert below, we ignore the mark word because
7744 // this oop may point to an already visited object that is
7745 // on the overflow stack (in which case the mark word has
7746 // been hijacked for chaining into the overflow stack --
7747 // if this is the last object in the overflow stack then
7748 // its mark word will be NULL). Because this object may
7749 // have been subsequently popped off the global overflow
7750 // stack, and the mark word possibly restored to the prototypical
7751 // value, by the time we get to examined this failing assert in
7752 // the debugger, is_oop_or_null(false) may subsequently start
7753 // to hold.
7754 assert(obj->is_oop_or_null(true),
7755 "expected an oop or NULL");
7756 HeapWord* addr = (HeapWord*)obj;
7757 // Check if oop points into the CMS generation
7758 // and is not marked
7759 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7760 // a white object ...
7761 // If we manage to "claim" the object, by being the
7762 // first thread to mark it, then we push it on our
7763 // marking stack
7764 if (_bit_map->par_mark(addr)) { // ... now grey
7765 // push on work queue (grey set)
7766 bool simulate_overflow = false;
7767 NOT_PRODUCT(
7768 if (CMSMarkStackOverflowALot &&
7769 _collector->par_simulate_overflow()) {
7770 // simulate a stack overflow
7771 simulate_overflow = true;
7772 }
7773 )
7774 if (simulate_overflow || !_work_queue->push(obj)) {
7775 _collector->par_push_on_overflow_list(obj);
7776 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7777 }
7778 } // Else, some other thread got there first
7779 }
7780 }
7782 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7783 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7785 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7786 // TBD
7787 }
7789 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7790 // TBD
7791 }
7793 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7794 DEBUG_ONLY(RememberKlassesChecker mux(false);)
7795 Mutex* bml = _collector->bitMapLock();
7796 assert_lock_strong(bml);
7797 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7798 "CMS thread should hold CMS token");
7800 bml->unlock();
7801 ConcurrentMarkSweepThread::desynchronize(true);
7803 ConcurrentMarkSweepThread::acknowledge_yield_request();
7805 _collector->stopTimer();
7806 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7807 if (PrintCMSStatistics != 0) {
7808 _collector->incrementYields();
7809 }
7810 _collector->icms_wait();
7812 // See the comment in coordinator_yield()
7813 for (unsigned i = 0; i < CMSYieldSleepCount &&
7814 ConcurrentMarkSweepThread::should_yield() &&
7815 !CMSCollector::foregroundGCIsActive(); ++i) {
7816 os::sleep(Thread::current(), 1, false);
7817 ConcurrentMarkSweepThread::acknowledge_yield_request();
7818 }
7820 ConcurrentMarkSweepThread::synchronize(true);
7821 bml->lock();
7823 _collector->startTimer();
7824 }
7826 bool CMSPrecleanRefsYieldClosure::should_return() {
7827 if (ConcurrentMarkSweepThread::should_yield()) {
7828 do_yield_work();
7829 }
7830 return _collector->foregroundGCIsActive();
7831 }
7833 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7834 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7835 "mr should be aligned to start at a card boundary");
7836 // We'd like to assert:
7837 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7838 // "mr should be a range of cards");
7839 // However, that would be too strong in one case -- the last
7840 // partition ends at _unallocated_block which, in general, can be
7841 // an arbitrary boundary, not necessarily card aligned.
7842 if (PrintCMSStatistics != 0) {
7843 _num_dirty_cards +=
7844 mr.word_size()/CardTableModRefBS::card_size_in_words;
7845 }
7846 _space->object_iterate_mem(mr, &_scan_cl);
7847 }
7849 SweepClosure::SweepClosure(CMSCollector* collector,
7850 ConcurrentMarkSweepGeneration* g,
7851 CMSBitMap* bitMap, bool should_yield) :
7852 _collector(collector),
7853 _g(g),
7854 _sp(g->cmsSpace()),
7855 _limit(_sp->sweep_limit()),
7856 _freelistLock(_sp->freelistLock()),
7857 _bitMap(bitMap),
7858 _yield(should_yield),
7859 _inFreeRange(false), // No free range at beginning of sweep
7860 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7861 _lastFreeRangeCoalesced(false),
7862 _freeFinger(g->used_region().start())
7863 {
7864 NOT_PRODUCT(
7865 _numObjectsFreed = 0;
7866 _numWordsFreed = 0;
7867 _numObjectsLive = 0;
7868 _numWordsLive = 0;
7869 _numObjectsAlreadyFree = 0;
7870 _numWordsAlreadyFree = 0;
7871 _last_fc = NULL;
7873 _sp->initializeIndexedFreeListArrayReturnedBytes();
7874 _sp->dictionary()->initializeDictReturnedBytes();
7875 )
7876 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7877 "sweep _limit out of bounds");
7878 if (CMSTraceSweeper) {
7879 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7880 }
7881 }
7883 // We need this destructor to reclaim any space at the end
7884 // of the space, which do_blk below may not have added back to
7885 // the free lists. [basically dealing with the "fringe effect"]
7886 SweepClosure::~SweepClosure() {
7887 assert_lock_strong(_freelistLock);
7888 // this should be treated as the end of a free run if any
7889 // The current free range should be returned to the free lists
7890 // as one coalesced chunk.
7891 if (inFreeRange()) {
7892 flushCurFreeChunk(freeFinger(),
7893 pointer_delta(_limit, freeFinger()));
7894 assert(freeFinger() < _limit, "the finger pointeth off base");
7895 if (CMSTraceSweeper) {
7896 gclog_or_tty->print("destructor:");
7897 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7898 "[coalesced:"SIZE_FORMAT"]\n",
7899 freeFinger(), pointer_delta(_limit, freeFinger()),
7900 lastFreeRangeCoalesced());
7901 }
7902 }
7903 NOT_PRODUCT(
7904 if (Verbose && PrintGC) {
7905 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7906 SIZE_FORMAT " bytes",
7907 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7908 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7909 SIZE_FORMAT" bytes "
7910 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7911 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7912 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7913 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7914 sizeof(HeapWord);
7915 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7917 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7918 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7919 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7920 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7921 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7922 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7923 indexListReturnedBytes);
7924 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7925 dictReturnedBytes);
7926 }
7927 }
7928 )
7929 // Now, in debug mode, just null out the sweep_limit
7930 NOT_PRODUCT(_sp->clear_sweep_limit();)
7931 if (CMSTraceSweeper) {
7932 gclog_or_tty->print("end of sweep\n================\n");
7933 }
7934 }
7936 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7937 bool freeRangeInFreeLists) {
7938 if (CMSTraceSweeper) {
7939 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7940 freeFinger, _sp->block_size(freeFinger),
7941 freeRangeInFreeLists);
7942 }
7943 assert(!inFreeRange(), "Trampling existing free range");
7944 set_inFreeRange(true);
7945 set_lastFreeRangeCoalesced(false);
7947 set_freeFinger(freeFinger);
7948 set_freeRangeInFreeLists(freeRangeInFreeLists);
7949 if (CMSTestInFreeList) {
7950 if (freeRangeInFreeLists) {
7951 FreeChunk* fc = (FreeChunk*) freeFinger;
7952 assert(fc->isFree(), "A chunk on the free list should be free.");
7953 assert(fc->size() > 0, "Free range should have a size");
7954 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7955 }
7956 }
7957 }
7959 // Note that the sweeper runs concurrently with mutators. Thus,
7960 // it is possible for direct allocation in this generation to happen
7961 // in the middle of the sweep. Note that the sweeper also coalesces
7962 // contiguous free blocks. Thus, unless the sweeper and the allocator
7963 // synchronize appropriately freshly allocated blocks may get swept up.
7964 // This is accomplished by the sweeper locking the free lists while
7965 // it is sweeping. Thus blocks that are determined to be free are
7966 // indeed free. There is however one additional complication:
7967 // blocks that have been allocated since the final checkpoint and
7968 // mark, will not have been marked and so would be treated as
7969 // unreachable and swept up. To prevent this, the allocator marks
7970 // the bit map when allocating during the sweep phase. This leads,
7971 // however, to a further complication -- objects may have been allocated
7972 // but not yet initialized -- in the sense that the header isn't yet
7973 // installed. The sweeper can not then determine the size of the block
7974 // in order to skip over it. To deal with this case, we use a technique
7975 // (due to Printezis) to encode such uninitialized block sizes in the
7976 // bit map. Since the bit map uses a bit per every HeapWord, but the
7977 // CMS generation has a minimum object size of 3 HeapWords, it follows
7978 // that "normal marks" won't be adjacent in the bit map (there will
7979 // always be at least two 0 bits between successive 1 bits). We make use
7980 // of these "unused" bits to represent uninitialized blocks -- the bit
7981 // corresponding to the start of the uninitialized object and the next
7982 // bit are both set. Finally, a 1 bit marks the end of the object that
7983 // started with the two consecutive 1 bits to indicate its potentially
7984 // uninitialized state.
7986 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7987 FreeChunk* fc = (FreeChunk*)addr;
7988 size_t res;
7990 // Check if we are done sweeping. Below we check "addr >= _limit" rather
7991 // than "addr == _limit" because although _limit was a block boundary when
7992 // we started the sweep, it may no longer be one because heap expansion
7993 // may have caused us to coalesce the block ending at the address _limit
7994 // with a newly expanded chunk (this happens when _limit was set to the
7995 // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
7996 if (addr >= _limit) { // we have swept up to or past the limit, do nothing more
7997 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7998 "sweep _limit out of bounds");
7999 assert(addr < _sp->end(), "addr out of bounds");
8000 // help the closure application finish
8001 return pointer_delta(_sp->end(), addr);
8002 }
8003 assert(addr < _limit, "sweep invariant");
8005 // check if we should yield
8006 do_yield_check(addr);
8007 if (fc->isFree()) {
8008 // Chunk that is already free
8009 res = fc->size();
8010 doAlreadyFreeChunk(fc);
8011 debug_only(_sp->verifyFreeLists());
8012 assert(res == fc->size(), "Don't expect the size to change");
8013 NOT_PRODUCT(
8014 _numObjectsAlreadyFree++;
8015 _numWordsAlreadyFree += res;
8016 )
8017 NOT_PRODUCT(_last_fc = fc;)
8018 } else if (!_bitMap->isMarked(addr)) {
8019 // Chunk is fresh garbage
8020 res = doGarbageChunk(fc);
8021 debug_only(_sp->verifyFreeLists());
8022 NOT_PRODUCT(
8023 _numObjectsFreed++;
8024 _numWordsFreed += res;
8025 )
8026 } else {
8027 // Chunk that is alive.
8028 res = doLiveChunk(fc);
8029 debug_only(_sp->verifyFreeLists());
8030 NOT_PRODUCT(
8031 _numObjectsLive++;
8032 _numWordsLive += res;
8033 )
8034 }
8035 return res;
8036 }
8038 // For the smart allocation, record following
8039 // split deaths - a free chunk is removed from its free list because
8040 // it is being split into two or more chunks.
8041 // split birth - a free chunk is being added to its free list because
8042 // a larger free chunk has been split and resulted in this free chunk.
8043 // coal death - a free chunk is being removed from its free list because
8044 // it is being coalesced into a large free chunk.
8045 // coal birth - a free chunk is being added to its free list because
8046 // it was created when two or more free chunks where coalesced into
8047 // this free chunk.
8048 //
8049 // These statistics are used to determine the desired number of free
8050 // chunks of a given size. The desired number is chosen to be relative
8051 // to the end of a CMS sweep. The desired number at the end of a sweep
8052 // is the
8053 // count-at-end-of-previous-sweep (an amount that was enough)
8054 // - count-at-beginning-of-current-sweep (the excess)
8055 // + split-births (gains in this size during interval)
8056 // - split-deaths (demands on this size during interval)
8057 // where the interval is from the end of one sweep to the end of the
8058 // next.
8059 //
8060 // When sweeping the sweeper maintains an accumulated chunk which is
8061 // the chunk that is made up of chunks that have been coalesced. That
8062 // will be termed the left-hand chunk. A new chunk of garbage that
8063 // is being considered for coalescing will be referred to as the
8064 // right-hand chunk.
8065 //
8066 // When making a decision on whether to coalesce a right-hand chunk with
8067 // the current left-hand chunk, the current count vs. the desired count
8068 // of the left-hand chunk is considered. Also if the right-hand chunk
8069 // is near the large chunk at the end of the heap (see
8070 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8071 // left-hand chunk is coalesced.
8072 //
8073 // When making a decision about whether to split a chunk, the desired count
8074 // vs. the current count of the candidate to be split is also considered.
8075 // If the candidate is underpopulated (currently fewer chunks than desired)
8076 // a chunk of an overpopulated (currently more chunks than desired) size may
8077 // be chosen. The "hint" associated with a free list, if non-null, points
8078 // to a free list which may be overpopulated.
8079 //
8081 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
8082 size_t size = fc->size();
8083 // Chunks that cannot be coalesced are not in the
8084 // free lists.
8085 if (CMSTestInFreeList && !fc->cantCoalesce()) {
8086 assert(_sp->verifyChunkInFreeLists(fc),
8087 "free chunk should be in free lists");
8088 }
8089 // a chunk that is already free, should not have been
8090 // marked in the bit map
8091 HeapWord* addr = (HeapWord*) fc;
8092 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8093 // Verify that the bit map has no bits marked between
8094 // addr and purported end of this block.
8095 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8097 // Some chunks cannot be coalesced in under any circumstances.
8098 // See the definition of cantCoalesce().
8099 if (!fc->cantCoalesce()) {
8100 // This chunk can potentially be coalesced.
8101 if (_sp->adaptive_freelists()) {
8102 // All the work is done in
8103 doPostIsFreeOrGarbageChunk(fc, size);
8104 } else { // Not adaptive free lists
8105 // this is a free chunk that can potentially be coalesced by the sweeper;
8106 if (!inFreeRange()) {
8107 // if the next chunk is a free block that can't be coalesced
8108 // it doesn't make sense to remove this chunk from the free lists
8109 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8110 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
8111 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
8112 nextChunk->isFree() && // which is free...
8113 nextChunk->cantCoalesce()) { // ... but cant be coalesced
8114 // nothing to do
8115 } else {
8116 // Potentially the start of a new free range:
8117 // Don't eagerly remove it from the free lists.
8118 // No need to remove it if it will just be put
8119 // back again. (Also from a pragmatic point of view
8120 // if it is a free block in a region that is beyond
8121 // any allocated blocks, an assertion will fail)
8122 // Remember the start of a free run.
8123 initialize_free_range(addr, true);
8124 // end - can coalesce with next chunk
8125 }
8126 } else {
8127 // the midst of a free range, we are coalescing
8128 debug_only(record_free_block_coalesced(fc);)
8129 if (CMSTraceSweeper) {
8130 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8131 }
8132 // remove it from the free lists
8133 _sp->removeFreeChunkFromFreeLists(fc);
8134 set_lastFreeRangeCoalesced(true);
8135 // If the chunk is being coalesced and the current free range is
8136 // in the free lists, remove the current free range so that it
8137 // will be returned to the free lists in its entirety - all
8138 // the coalesced pieces included.
8139 if (freeRangeInFreeLists()) {
8140 FreeChunk* ffc = (FreeChunk*) freeFinger();
8141 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8142 "Size of free range is inconsistent with chunk size.");
8143 if (CMSTestInFreeList) {
8144 assert(_sp->verifyChunkInFreeLists(ffc),
8145 "free range is not in free lists");
8146 }
8147 _sp->removeFreeChunkFromFreeLists(ffc);
8148 set_freeRangeInFreeLists(false);
8149 }
8150 }
8151 }
8152 } else {
8153 // Code path common to both original and adaptive free lists.
8155 // cant coalesce with previous block; this should be treated
8156 // as the end of a free run if any
8157 if (inFreeRange()) {
8158 // we kicked some butt; time to pick up the garbage
8159 assert(freeFinger() < addr, "the finger pointeth off base");
8160 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8161 }
8162 // else, nothing to do, just continue
8163 }
8164 }
8166 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
8167 // This is a chunk of garbage. It is not in any free list.
8168 // Add it to a free list or let it possibly be coalesced into
8169 // a larger chunk.
8170 HeapWord* addr = (HeapWord*) fc;
8171 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8173 if (_sp->adaptive_freelists()) {
8174 // Verify that the bit map has no bits marked between
8175 // addr and purported end of just dead object.
8176 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8178 doPostIsFreeOrGarbageChunk(fc, size);
8179 } else {
8180 if (!inFreeRange()) {
8181 // start of a new free range
8182 assert(size > 0, "A free range should have a size");
8183 initialize_free_range(addr, false);
8185 } else {
8186 // this will be swept up when we hit the end of the
8187 // free range
8188 if (CMSTraceSweeper) {
8189 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8190 }
8191 // If the chunk is being coalesced and the current free range is
8192 // in the free lists, remove the current free range so that it
8193 // will be returned to the free lists in its entirety - all
8194 // the coalesced pieces included.
8195 if (freeRangeInFreeLists()) {
8196 FreeChunk* ffc = (FreeChunk*)freeFinger();
8197 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8198 "Size of free range is inconsistent with chunk size.");
8199 if (CMSTestInFreeList) {
8200 assert(_sp->verifyChunkInFreeLists(ffc),
8201 "free range is not in free lists");
8202 }
8203 _sp->removeFreeChunkFromFreeLists(ffc);
8204 set_freeRangeInFreeLists(false);
8205 }
8206 set_lastFreeRangeCoalesced(true);
8207 }
8208 // this will be swept up when we hit the end of the free range
8210 // Verify that the bit map has no bits marked between
8211 // addr and purported end of just dead object.
8212 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8213 }
8214 return size;
8215 }
8217 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
8218 HeapWord* addr = (HeapWord*) fc;
8219 // The sweeper has just found a live object. Return any accumulated
8220 // left hand chunk to the free lists.
8221 if (inFreeRange()) {
8222 if (_sp->adaptive_freelists()) {
8223 flushCurFreeChunk(freeFinger(),
8224 pointer_delta(addr, freeFinger()));
8225 } else { // not adaptive freelists
8226 set_inFreeRange(false);
8227 // Add the free range back to the free list if it is not already
8228 // there.
8229 if (!freeRangeInFreeLists()) {
8230 assert(freeFinger() < addr, "the finger pointeth off base");
8231 if (CMSTraceSweeper) {
8232 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
8233 "[coalesced:%d]\n",
8234 freeFinger(), pointer_delta(addr, freeFinger()),
8235 lastFreeRangeCoalesced());
8236 }
8237 _sp->addChunkAndRepairOffsetTable(freeFinger(),
8238 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
8239 }
8240 }
8241 }
8243 // Common code path for original and adaptive free lists.
8245 // this object is live: we'd normally expect this to be
8246 // an oop, and like to assert the following:
8247 // assert(oop(addr)->is_oop(), "live block should be an oop");
8248 // However, as we commented above, this may be an object whose
8249 // header hasn't yet been initialized.
8250 size_t size;
8251 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8252 if (_bitMap->isMarked(addr + 1)) {
8253 // Determine the size from the bit map, rather than trying to
8254 // compute it from the object header.
8255 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8256 size = pointer_delta(nextOneAddr + 1, addr);
8257 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8258 "alignment problem");
8260 #ifdef DEBUG
8261 if (oop(addr)->klass_or_null() != NULL &&
8262 ( !_collector->should_unload_classes()
8263 || (oop(addr)->is_parsable()) &&
8264 oop(addr)->is_conc_safe())) {
8265 // Ignore mark word because we are running concurrent with mutators
8266 assert(oop(addr)->is_oop(true), "live block should be an oop");
8267 // is_conc_safe is checked before performing this assertion
8268 // because an object that is not is_conc_safe may yet have
8269 // the return from size() correct.
8270 assert(size ==
8271 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8272 "P-mark and computed size do not agree");
8273 }
8274 #endif
8276 } else {
8277 // This should be an initialized object that's alive.
8278 assert(oop(addr)->klass_or_null() != NULL &&
8279 (!_collector->should_unload_classes()
8280 || oop(addr)->is_parsable()),
8281 "Should be an initialized object");
8282 // Note that there are objects used during class redefinition
8283 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
8284 // which are discarded with their is_conc_safe state still
8285 // false. These object may be floating garbage so may be
8286 // seen here. If they are floating garbage their size
8287 // should be attainable from their klass. Do not that
8288 // is_conc_safe() is true for oop(addr).
8289 // Ignore mark word because we are running concurrent with mutators
8290 assert(oop(addr)->is_oop(true), "live block should be an oop");
8291 // Verify that the bit map has no bits marked between
8292 // addr and purported end of this block.
8293 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8294 assert(size >= 3, "Necessary for Printezis marks to work");
8295 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8296 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8297 }
8298 return size;
8299 }
8301 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8302 size_t chunkSize) {
8303 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8304 // scheme.
8305 bool fcInFreeLists = fc->isFree();
8306 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8307 assert((HeapWord*)fc <= _limit, "sweep invariant");
8308 if (CMSTestInFreeList && fcInFreeLists) {
8309 assert(_sp->verifyChunkInFreeLists(fc),
8310 "free chunk is not in free lists");
8311 }
8314 if (CMSTraceSweeper) {
8315 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8316 }
8318 HeapWord* addr = (HeapWord*) fc;
8320 bool coalesce;
8321 size_t left = pointer_delta(addr, freeFinger());
8322 size_t right = chunkSize;
8323 switch (FLSCoalescePolicy) {
8324 // numeric value forms a coalition aggressiveness metric
8325 case 0: { // never coalesce
8326 coalesce = false;
8327 break;
8328 }
8329 case 1: { // coalesce if left & right chunks on overpopulated lists
8330 coalesce = _sp->coalOverPopulated(left) &&
8331 _sp->coalOverPopulated(right);
8332 break;
8333 }
8334 case 2: { // coalesce if left chunk on overpopulated list (default)
8335 coalesce = _sp->coalOverPopulated(left);
8336 break;
8337 }
8338 case 3: { // coalesce if left OR right chunk on overpopulated list
8339 coalesce = _sp->coalOverPopulated(left) ||
8340 _sp->coalOverPopulated(right);
8341 break;
8342 }
8343 case 4: { // always coalesce
8344 coalesce = true;
8345 break;
8346 }
8347 default:
8348 ShouldNotReachHere();
8349 }
8351 // Should the current free range be coalesced?
8352 // If the chunk is in a free range and either we decided to coalesce above
8353 // or the chunk is near the large block at the end of the heap
8354 // (isNearLargestChunk() returns true), then coalesce this chunk.
8355 bool doCoalesce = inFreeRange() &&
8356 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8357 if (doCoalesce) {
8358 // Coalesce the current free range on the left with the new
8359 // chunk on the right. If either is on a free list,
8360 // it must be removed from the list and stashed in the closure.
8361 if (freeRangeInFreeLists()) {
8362 FreeChunk* ffc = (FreeChunk*)freeFinger();
8363 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8364 "Size of free range is inconsistent with chunk size.");
8365 if (CMSTestInFreeList) {
8366 assert(_sp->verifyChunkInFreeLists(ffc),
8367 "Chunk is not in free lists");
8368 }
8369 _sp->coalDeath(ffc->size());
8370 _sp->removeFreeChunkFromFreeLists(ffc);
8371 set_freeRangeInFreeLists(false);
8372 }
8373 if (fcInFreeLists) {
8374 _sp->coalDeath(chunkSize);
8375 assert(fc->size() == chunkSize,
8376 "The chunk has the wrong size or is not in the free lists");
8377 _sp->removeFreeChunkFromFreeLists(fc);
8378 }
8379 set_lastFreeRangeCoalesced(true);
8380 } else { // not in a free range and/or should not coalesce
8381 // Return the current free range and start a new one.
8382 if (inFreeRange()) {
8383 // In a free range but cannot coalesce with the right hand chunk.
8384 // Put the current free range into the free lists.
8385 flushCurFreeChunk(freeFinger(),
8386 pointer_delta(addr, freeFinger()));
8387 }
8388 // Set up for new free range. Pass along whether the right hand
8389 // chunk is in the free lists.
8390 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8391 }
8392 }
8393 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8394 assert(inFreeRange(), "Should only be called if currently in a free range.");
8395 assert(size > 0,
8396 "A zero sized chunk cannot be added to the free lists.");
8397 if (!freeRangeInFreeLists()) {
8398 if(CMSTestInFreeList) {
8399 FreeChunk* fc = (FreeChunk*) chunk;
8400 fc->setSize(size);
8401 assert(!_sp->verifyChunkInFreeLists(fc),
8402 "chunk should not be in free lists yet");
8403 }
8404 if (CMSTraceSweeper) {
8405 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8406 chunk, size);
8407 }
8408 // A new free range is going to be starting. The current
8409 // free range has not been added to the free lists yet or
8410 // was removed so add it back.
8411 // If the current free range was coalesced, then the death
8412 // of the free range was recorded. Record a birth now.
8413 if (lastFreeRangeCoalesced()) {
8414 _sp->coalBirth(size);
8415 }
8416 _sp->addChunkAndRepairOffsetTable(chunk, size,
8417 lastFreeRangeCoalesced());
8418 }
8419 set_inFreeRange(false);
8420 set_freeRangeInFreeLists(false);
8421 }
8423 // We take a break if we've been at this for a while,
8424 // so as to avoid monopolizing the locks involved.
8425 void SweepClosure::do_yield_work(HeapWord* addr) {
8426 // Return current free chunk being used for coalescing (if any)
8427 // to the appropriate freelist. After yielding, the next
8428 // free block encountered will start a coalescing range of
8429 // free blocks. If the next free block is adjacent to the
8430 // chunk just flushed, they will need to wait for the next
8431 // sweep to be coalesced.
8432 if (inFreeRange()) {
8433 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8434 }
8436 // First give up the locks, then yield, then re-lock.
8437 // We should probably use a constructor/destructor idiom to
8438 // do this unlock/lock or modify the MutexUnlocker class to
8439 // serve our purpose. XXX
8440 assert_lock_strong(_bitMap->lock());
8441 assert_lock_strong(_freelistLock);
8442 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8443 "CMS thread should hold CMS token");
8444 _bitMap->lock()->unlock();
8445 _freelistLock->unlock();
8446 ConcurrentMarkSweepThread::desynchronize(true);
8447 ConcurrentMarkSweepThread::acknowledge_yield_request();
8448 _collector->stopTimer();
8449 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8450 if (PrintCMSStatistics != 0) {
8451 _collector->incrementYields();
8452 }
8453 _collector->icms_wait();
8455 // See the comment in coordinator_yield()
8456 for (unsigned i = 0; i < CMSYieldSleepCount &&
8457 ConcurrentMarkSweepThread::should_yield() &&
8458 !CMSCollector::foregroundGCIsActive(); ++i) {
8459 os::sleep(Thread::current(), 1, false);
8460 ConcurrentMarkSweepThread::acknowledge_yield_request();
8461 }
8463 ConcurrentMarkSweepThread::synchronize(true);
8464 _freelistLock->lock();
8465 _bitMap->lock()->lock_without_safepoint_check();
8466 _collector->startTimer();
8467 }
8469 #ifndef PRODUCT
8470 // This is actually very useful in a product build if it can
8471 // be called from the debugger. Compile it into the product
8472 // as needed.
8473 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8474 return debug_cms_space->verifyChunkInFreeLists(fc);
8475 }
8477 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8478 if (CMSTraceSweeper) {
8479 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8480 }
8481 }
8482 #endif
8484 // CMSIsAliveClosure
8485 bool CMSIsAliveClosure::do_object_b(oop obj) {
8486 HeapWord* addr = (HeapWord*)obj;
8487 return addr != NULL &&
8488 (!_span.contains(addr) || _bit_map->isMarked(addr));
8489 }
8491 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8492 MemRegion span,
8493 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8494 CMSMarkStack* revisit_stack, bool cpc):
8495 KlassRememberingOopClosure(collector, NULL, revisit_stack),
8496 _span(span),
8497 _bit_map(bit_map),
8498 _mark_stack(mark_stack),
8499 _concurrent_precleaning(cpc) {
8500 assert(!_span.is_empty(), "Empty span could spell trouble");
8501 }
8504 // CMSKeepAliveClosure: the serial version
8505 void CMSKeepAliveClosure::do_oop(oop obj) {
8506 HeapWord* addr = (HeapWord*)obj;
8507 if (_span.contains(addr) &&
8508 !_bit_map->isMarked(addr)) {
8509 _bit_map->mark(addr);
8510 bool simulate_overflow = false;
8511 NOT_PRODUCT(
8512 if (CMSMarkStackOverflowALot &&
8513 _collector->simulate_overflow()) {
8514 // simulate a stack overflow
8515 simulate_overflow = true;
8516 }
8517 )
8518 if (simulate_overflow || !_mark_stack->push(obj)) {
8519 if (_concurrent_precleaning) {
8520 // We dirty the overflown object and let the remark
8521 // phase deal with it.
8522 assert(_collector->overflow_list_is_empty(), "Error");
8523 // In the case of object arrays, we need to dirty all of
8524 // the cards that the object spans. No locking or atomics
8525 // are needed since no one else can be mutating the mod union
8526 // table.
8527 if (obj->is_objArray()) {
8528 size_t sz = obj->size();
8529 HeapWord* end_card_addr =
8530 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8531 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8532 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8533 _collector->_modUnionTable.mark_range(redirty_range);
8534 } else {
8535 _collector->_modUnionTable.mark(addr);
8536 }
8537 _collector->_ser_kac_preclean_ovflw++;
8538 } else {
8539 _collector->push_on_overflow_list(obj);
8540 _collector->_ser_kac_ovflw++;
8541 }
8542 }
8543 }
8544 }
8546 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8547 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8549 // CMSParKeepAliveClosure: a parallel version of the above.
8550 // The work queues are private to each closure (thread),
8551 // but (may be) available for stealing by other threads.
8552 void CMSParKeepAliveClosure::do_oop(oop obj) {
8553 HeapWord* addr = (HeapWord*)obj;
8554 if (_span.contains(addr) &&
8555 !_bit_map->isMarked(addr)) {
8556 // In general, during recursive tracing, several threads
8557 // may be concurrently getting here; the first one to
8558 // "tag" it, claims it.
8559 if (_bit_map->par_mark(addr)) {
8560 bool res = _work_queue->push(obj);
8561 assert(res, "Low water mark should be much less than capacity");
8562 // Do a recursive trim in the hope that this will keep
8563 // stack usage lower, but leave some oops for potential stealers
8564 trim_queue(_low_water_mark);
8565 } // Else, another thread got there first
8566 }
8567 }
8569 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8570 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8572 void CMSParKeepAliveClosure::trim_queue(uint max) {
8573 while (_work_queue->size() > max) {
8574 oop new_oop;
8575 if (_work_queue->pop_local(new_oop)) {
8576 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8577 assert(_bit_map->isMarked((HeapWord*)new_oop),
8578 "no white objects on this stack!");
8579 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8580 // iterate over the oops in this oop, marking and pushing
8581 // the ones in CMS heap (i.e. in _span).
8582 new_oop->oop_iterate(&_mark_and_push);
8583 }
8584 }
8585 }
8587 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8588 CMSCollector* collector,
8589 MemRegion span, CMSBitMap* bit_map,
8590 CMSMarkStack* revisit_stack,
8591 OopTaskQueue* work_queue):
8592 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8593 _span(span),
8594 _bit_map(bit_map),
8595 _work_queue(work_queue) { }
8597 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8598 HeapWord* addr = (HeapWord*)obj;
8599 if (_span.contains(addr) &&
8600 !_bit_map->isMarked(addr)) {
8601 if (_bit_map->par_mark(addr)) {
8602 bool simulate_overflow = false;
8603 NOT_PRODUCT(
8604 if (CMSMarkStackOverflowALot &&
8605 _collector->par_simulate_overflow()) {
8606 // simulate a stack overflow
8607 simulate_overflow = true;
8608 }
8609 )
8610 if (simulate_overflow || !_work_queue->push(obj)) {
8611 _collector->par_push_on_overflow_list(obj);
8612 _collector->_par_kac_ovflw++;
8613 }
8614 } // Else another thread got there already
8615 }
8616 }
8618 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8619 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8621 //////////////////////////////////////////////////////////////////
8622 // CMSExpansionCause /////////////////////////////
8623 //////////////////////////////////////////////////////////////////
8624 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8625 switch (cause) {
8626 case _no_expansion:
8627 return "No expansion";
8628 case _satisfy_free_ratio:
8629 return "Free ratio";
8630 case _satisfy_promotion:
8631 return "Satisfy promotion";
8632 case _satisfy_allocation:
8633 return "allocation";
8634 case _allocate_par_lab:
8635 return "Par LAB";
8636 case _allocate_par_spooling_space:
8637 return "Par Spooling Space";
8638 case _adaptive_size_policy:
8639 return "Ergonomics";
8640 default:
8641 return "unknown";
8642 }
8643 }
8645 void CMSDrainMarkingStackClosure::do_void() {
8646 // the max number to take from overflow list at a time
8647 const size_t num = _mark_stack->capacity()/4;
8648 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8649 "Overflow list should be NULL during concurrent phases");
8650 while (!_mark_stack->isEmpty() ||
8651 // if stack is empty, check the overflow list
8652 _collector->take_from_overflow_list(num, _mark_stack)) {
8653 oop obj = _mark_stack->pop();
8654 HeapWord* addr = (HeapWord*)obj;
8655 assert(_span.contains(addr), "Should be within span");
8656 assert(_bit_map->isMarked(addr), "Should be marked");
8657 assert(obj->is_oop(), "Should be an oop");
8658 obj->oop_iterate(_keep_alive);
8659 }
8660 }
8662 void CMSParDrainMarkingStackClosure::do_void() {
8663 // drain queue
8664 trim_queue(0);
8665 }
8667 // Trim our work_queue so its length is below max at return
8668 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8669 while (_work_queue->size() > max) {
8670 oop new_oop;
8671 if (_work_queue->pop_local(new_oop)) {
8672 assert(new_oop->is_oop(), "Expected an oop");
8673 assert(_bit_map->isMarked((HeapWord*)new_oop),
8674 "no white objects on this stack!");
8675 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8676 // iterate over the oops in this oop, marking and pushing
8677 // the ones in CMS heap (i.e. in _span).
8678 new_oop->oop_iterate(&_mark_and_push);
8679 }
8680 }
8681 }
8683 ////////////////////////////////////////////////////////////////////
8684 // Support for Marking Stack Overflow list handling and related code
8685 ////////////////////////////////////////////////////////////////////
8686 // Much of the following code is similar in shape and spirit to the
8687 // code used in ParNewGC. We should try and share that code
8688 // as much as possible in the future.
8690 #ifndef PRODUCT
8691 // Debugging support for CMSStackOverflowALot
8693 // It's OK to call this multi-threaded; the worst thing
8694 // that can happen is that we'll get a bunch of closely
8695 // spaced simulated oveflows, but that's OK, in fact
8696 // probably good as it would exercise the overflow code
8697 // under contention.
8698 bool CMSCollector::simulate_overflow() {
8699 if (_overflow_counter-- <= 0) { // just being defensive
8700 _overflow_counter = CMSMarkStackOverflowInterval;
8701 return true;
8702 } else {
8703 return false;
8704 }
8705 }
8707 bool CMSCollector::par_simulate_overflow() {
8708 return simulate_overflow();
8709 }
8710 #endif
8712 // Single-threaded
8713 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8714 assert(stack->isEmpty(), "Expected precondition");
8715 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8716 size_t i = num;
8717 oop cur = _overflow_list;
8718 const markOop proto = markOopDesc::prototype();
8719 NOT_PRODUCT(ssize_t n = 0;)
8720 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8721 next = oop(cur->mark());
8722 cur->set_mark(proto); // until proven otherwise
8723 assert(cur->is_oop(), "Should be an oop");
8724 bool res = stack->push(cur);
8725 assert(res, "Bit off more than can chew?");
8726 NOT_PRODUCT(n++;)
8727 }
8728 _overflow_list = cur;
8729 #ifndef PRODUCT
8730 assert(_num_par_pushes >= n, "Too many pops?");
8731 _num_par_pushes -=n;
8732 #endif
8733 return !stack->isEmpty();
8734 }
8736 #define BUSY (oop(0x1aff1aff))
8737 // (MT-safe) Get a prefix of at most "num" from the list.
8738 // The overflow list is chained through the mark word of
8739 // each object in the list. We fetch the entire list,
8740 // break off a prefix of the right size and return the
8741 // remainder. If other threads try to take objects from
8742 // the overflow list at that time, they will wait for
8743 // some time to see if data becomes available. If (and
8744 // only if) another thread places one or more object(s)
8745 // on the global list before we have returned the suffix
8746 // to the global list, we will walk down our local list
8747 // to find its end and append the global list to
8748 // our suffix before returning it. This suffix walk can
8749 // prove to be expensive (quadratic in the amount of traffic)
8750 // when there are many objects in the overflow list and
8751 // there is much producer-consumer contention on the list.
8752 // *NOTE*: The overflow list manipulation code here and
8753 // in ParNewGeneration:: are very similar in shape,
8754 // except that in the ParNew case we use the old (from/eden)
8755 // copy of the object to thread the list via its klass word.
8756 // Because of the common code, if you make any changes in
8757 // the code below, please check the ParNew version to see if
8758 // similar changes might be needed.
8759 // CR 6797058 has been filed to consolidate the common code.
8760 bool CMSCollector::par_take_from_overflow_list(size_t num,
8761 OopTaskQueue* work_q,
8762 int no_of_gc_threads) {
8763 assert(work_q->size() == 0, "First empty local work queue");
8764 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8765 if (_overflow_list == NULL) {
8766 return false;
8767 }
8768 // Grab the entire list; we'll put back a suffix
8769 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8770 Thread* tid = Thread::current();
8771 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8772 // set to ParallelGCThreads.
8773 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8774 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8775 // If the list is busy, we spin for a short while,
8776 // sleeping between attempts to get the list.
8777 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8778 os::sleep(tid, sleep_time_millis, false);
8779 if (_overflow_list == NULL) {
8780 // Nothing left to take
8781 return false;
8782 } else if (_overflow_list != BUSY) {
8783 // Try and grab the prefix
8784 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8785 }
8786 }
8787 // If the list was found to be empty, or we spun long
8788 // enough, we give up and return empty-handed. If we leave
8789 // the list in the BUSY state below, it must be the case that
8790 // some other thread holds the overflow list and will set it
8791 // to a non-BUSY state in the future.
8792 if (prefix == NULL || prefix == BUSY) {
8793 // Nothing to take or waited long enough
8794 if (prefix == NULL) {
8795 // Write back the NULL in case we overwrote it with BUSY above
8796 // and it is still the same value.
8797 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8798 }
8799 return false;
8800 }
8801 assert(prefix != NULL && prefix != BUSY, "Error");
8802 size_t i = num;
8803 oop cur = prefix;
8804 // Walk down the first "num" objects, unless we reach the end.
8805 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8806 if (cur->mark() == NULL) {
8807 // We have "num" or fewer elements in the list, so there
8808 // is nothing to return to the global list.
8809 // Write back the NULL in lieu of the BUSY we wrote
8810 // above, if it is still the same value.
8811 if (_overflow_list == BUSY) {
8812 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8813 }
8814 } else {
8815 // Chop off the suffix and rerturn it to the global list.
8816 assert(cur->mark() != BUSY, "Error");
8817 oop suffix_head = cur->mark(); // suffix will be put back on global list
8818 cur->set_mark(NULL); // break off suffix
8819 // It's possible that the list is still in the empty(busy) state
8820 // we left it in a short while ago; in that case we may be
8821 // able to place back the suffix without incurring the cost
8822 // of a walk down the list.
8823 oop observed_overflow_list = _overflow_list;
8824 oop cur_overflow_list = observed_overflow_list;
8825 bool attached = false;
8826 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8827 observed_overflow_list =
8828 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8829 if (cur_overflow_list == observed_overflow_list) {
8830 attached = true;
8831 break;
8832 } else cur_overflow_list = observed_overflow_list;
8833 }
8834 if (!attached) {
8835 // Too bad, someone else sneaked in (at least) an element; we'll need
8836 // to do a splice. Find tail of suffix so we can prepend suffix to global
8837 // list.
8838 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8839 oop suffix_tail = cur;
8840 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8841 "Tautology");
8842 observed_overflow_list = _overflow_list;
8843 do {
8844 cur_overflow_list = observed_overflow_list;
8845 if (cur_overflow_list != BUSY) {
8846 // Do the splice ...
8847 suffix_tail->set_mark(markOop(cur_overflow_list));
8848 } else { // cur_overflow_list == BUSY
8849 suffix_tail->set_mark(NULL);
8850 }
8851 // ... and try to place spliced list back on overflow_list ...
8852 observed_overflow_list =
8853 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8854 } while (cur_overflow_list != observed_overflow_list);
8855 // ... until we have succeeded in doing so.
8856 }
8857 }
8859 // Push the prefix elements on work_q
8860 assert(prefix != NULL, "control point invariant");
8861 const markOop proto = markOopDesc::prototype();
8862 oop next;
8863 NOT_PRODUCT(ssize_t n = 0;)
8864 for (cur = prefix; cur != NULL; cur = next) {
8865 next = oop(cur->mark());
8866 cur->set_mark(proto); // until proven otherwise
8867 assert(cur->is_oop(), "Should be an oop");
8868 bool res = work_q->push(cur);
8869 assert(res, "Bit off more than we can chew?");
8870 NOT_PRODUCT(n++;)
8871 }
8872 #ifndef PRODUCT
8873 assert(_num_par_pushes >= n, "Too many pops?");
8874 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8875 #endif
8876 return true;
8877 }
8879 // Single-threaded
8880 void CMSCollector::push_on_overflow_list(oop p) {
8881 NOT_PRODUCT(_num_par_pushes++;)
8882 assert(p->is_oop(), "Not an oop");
8883 preserve_mark_if_necessary(p);
8884 p->set_mark((markOop)_overflow_list);
8885 _overflow_list = p;
8886 }
8888 // Multi-threaded; use CAS to prepend to overflow list
8889 void CMSCollector::par_push_on_overflow_list(oop p) {
8890 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8891 assert(p->is_oop(), "Not an oop");
8892 par_preserve_mark_if_necessary(p);
8893 oop observed_overflow_list = _overflow_list;
8894 oop cur_overflow_list;
8895 do {
8896 cur_overflow_list = observed_overflow_list;
8897 if (cur_overflow_list != BUSY) {
8898 p->set_mark(markOop(cur_overflow_list));
8899 } else {
8900 p->set_mark(NULL);
8901 }
8902 observed_overflow_list =
8903 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8904 } while (cur_overflow_list != observed_overflow_list);
8905 }
8906 #undef BUSY
8908 // Single threaded
8909 // General Note on GrowableArray: pushes may silently fail
8910 // because we are (temporarily) out of C-heap for expanding
8911 // the stack. The problem is quite ubiquitous and affects
8912 // a lot of code in the JVM. The prudent thing for GrowableArray
8913 // to do (for now) is to exit with an error. However, that may
8914 // be too draconian in some cases because the caller may be
8915 // able to recover without much harm. For such cases, we
8916 // should probably introduce a "soft_push" method which returns
8917 // an indication of success or failure with the assumption that
8918 // the caller may be able to recover from a failure; code in
8919 // the VM can then be changed, incrementally, to deal with such
8920 // failures where possible, thus, incrementally hardening the VM
8921 // in such low resource situations.
8922 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8923 _preserved_oop_stack.push(p);
8924 _preserved_mark_stack.push(m);
8925 assert(m == p->mark(), "Mark word changed");
8926 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8927 "bijection");
8928 }
8930 // Single threaded
8931 void CMSCollector::preserve_mark_if_necessary(oop p) {
8932 markOop m = p->mark();
8933 if (m->must_be_preserved(p)) {
8934 preserve_mark_work(p, m);
8935 }
8936 }
8938 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8939 markOop m = p->mark();
8940 if (m->must_be_preserved(p)) {
8941 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8942 // Even though we read the mark word without holding
8943 // the lock, we are assured that it will not change
8944 // because we "own" this oop, so no other thread can
8945 // be trying to push it on the overflow list; see
8946 // the assertion in preserve_mark_work() that checks
8947 // that m == p->mark().
8948 preserve_mark_work(p, m);
8949 }
8950 }
8952 // We should be able to do this multi-threaded,
8953 // a chunk of stack being a task (this is
8954 // correct because each oop only ever appears
8955 // once in the overflow list. However, it's
8956 // not very easy to completely overlap this with
8957 // other operations, so will generally not be done
8958 // until all work's been completed. Because we
8959 // expect the preserved oop stack (set) to be small,
8960 // it's probably fine to do this single-threaded.
8961 // We can explore cleverer concurrent/overlapped/parallel
8962 // processing of preserved marks if we feel the
8963 // need for this in the future. Stack overflow should
8964 // be so rare in practice and, when it happens, its
8965 // effect on performance so great that this will
8966 // likely just be in the noise anyway.
8967 void CMSCollector::restore_preserved_marks_if_any() {
8968 assert(SafepointSynchronize::is_at_safepoint(),
8969 "world should be stopped");
8970 assert(Thread::current()->is_ConcurrentGC_thread() ||
8971 Thread::current()->is_VM_thread(),
8972 "should be single-threaded");
8973 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8974 "bijection");
8976 while (!_preserved_oop_stack.is_empty()) {
8977 oop p = _preserved_oop_stack.pop();
8978 assert(p->is_oop(), "Should be an oop");
8979 assert(_span.contains(p), "oop should be in _span");
8980 assert(p->mark() == markOopDesc::prototype(),
8981 "Set when taken from overflow list");
8982 markOop m = _preserved_mark_stack.pop();
8983 p->set_mark(m);
8984 }
8985 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8986 "stacks were cleared above");
8987 }
8989 #ifndef PRODUCT
8990 bool CMSCollector::no_preserved_marks() const {
8991 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8992 }
8993 #endif
8995 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8996 {
8997 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8998 CMSAdaptiveSizePolicy* size_policy =
8999 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9000 assert(size_policy->is_gc_cms_adaptive_size_policy(),
9001 "Wrong type for size policy");
9002 return size_policy;
9003 }
9005 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9006 size_t desired_promo_size) {
9007 if (cur_promo_size < desired_promo_size) {
9008 size_t expand_bytes = desired_promo_size - cur_promo_size;
9009 if (PrintAdaptiveSizePolicy && Verbose) {
9010 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9011 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9012 expand_bytes);
9013 }
9014 expand(expand_bytes,
9015 MinHeapDeltaBytes,
9016 CMSExpansionCause::_adaptive_size_policy);
9017 } else if (desired_promo_size < cur_promo_size) {
9018 size_t shrink_bytes = cur_promo_size - desired_promo_size;
9019 if (PrintAdaptiveSizePolicy && Verbose) {
9020 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9021 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9022 shrink_bytes);
9023 }
9024 shrink(shrink_bytes);
9025 }
9026 }
9028 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9029 GenCollectedHeap* gch = GenCollectedHeap::heap();
9030 CMSGCAdaptivePolicyCounters* counters =
9031 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9032 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9033 "Wrong kind of counters");
9034 return counters;
9035 }
9038 void ASConcurrentMarkSweepGeneration::update_counters() {
9039 if (UsePerfData) {
9040 _space_counters->update_all();
9041 _gen_counters->update_all();
9042 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9043 GenCollectedHeap* gch = GenCollectedHeap::heap();
9044 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9045 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9046 "Wrong gc statistics type");
9047 counters->update_counters(gc_stats_l);
9048 }
9049 }
9051 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9052 if (UsePerfData) {
9053 _space_counters->update_used(used);
9054 _space_counters->update_capacity();
9055 _gen_counters->update_all();
9057 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9058 GenCollectedHeap* gch = GenCollectedHeap::heap();
9059 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9060 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9061 "Wrong gc statistics type");
9062 counters->update_counters(gc_stats_l);
9063 }
9064 }
9066 // The desired expansion delta is computed so that:
9067 // . desired free percentage or greater is used
9068 void ASConcurrentMarkSweepGeneration::compute_new_size() {
9069 assert_locked_or_safepoint(Heap_lock);
9071 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9073 // If incremental collection failed, we just want to expand
9074 // to the limit.
9075 if (incremental_collection_failed()) {
9076 clear_incremental_collection_failed();
9077 grow_to_reserved();
9078 return;
9079 }
9081 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
9083 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
9084 "Wrong type of heap");
9085 int prev_level = level() - 1;
9086 assert(prev_level >= 0, "The cms generation is the lowest generation");
9087 Generation* prev_gen = gch->get_gen(prev_level);
9088 assert(prev_gen->kind() == Generation::ASParNew,
9089 "Wrong type of young generation");
9090 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
9091 size_t cur_eden = younger_gen->eden()->capacity();
9092 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
9093 size_t cur_promo = free();
9094 size_policy->compute_tenured_generation_free_space(cur_promo,
9095 max_available(),
9096 cur_eden);
9097 resize(cur_promo, size_policy->promo_size());
9099 // Record the new size of the space in the cms generation
9100 // that is available for promotions. This is temporary.
9101 // It should be the desired promo size.
9102 size_policy->avg_cms_promo()->sample(free());
9103 size_policy->avg_old_live()->sample(used());
9105 if (UsePerfData) {
9106 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9107 counters->update_cms_capacity_counter(capacity());
9108 }
9109 }
9111 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9112 assert_locked_or_safepoint(Heap_lock);
9113 assert_lock_strong(freelistLock());
9114 HeapWord* old_end = _cmsSpace->end();
9115 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9116 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9117 FreeChunk* chunk_at_end = find_chunk_at_end();
9118 if (chunk_at_end == NULL) {
9119 // No room to shrink
9120 if (PrintGCDetails && Verbose) {
9121 gclog_or_tty->print_cr("No room to shrink: old_end "
9122 PTR_FORMAT " unallocated_start " PTR_FORMAT
9123 " chunk_at_end " PTR_FORMAT,
9124 old_end, unallocated_start, chunk_at_end);
9125 }
9126 return;
9127 } else {
9129 // Find the chunk at the end of the space and determine
9130 // how much it can be shrunk.
9131 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9132 size_t aligned_shrinkable_size_in_bytes =
9133 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9134 assert(unallocated_start <= chunk_at_end->end(),
9135 "Inconsistent chunk at end of space");
9136 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9137 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9139 // Shrink the underlying space
9140 _virtual_space.shrink_by(bytes);
9141 if (PrintGCDetails && Verbose) {
9142 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9143 " desired_bytes " SIZE_FORMAT
9144 " shrinkable_size_in_bytes " SIZE_FORMAT
9145 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9146 " bytes " SIZE_FORMAT,
9147 desired_bytes, shrinkable_size_in_bytes,
9148 aligned_shrinkable_size_in_bytes, bytes);
9149 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9150 " unallocated_start " SIZE_FORMAT,
9151 old_end, unallocated_start);
9152 }
9154 // If the space did shrink (shrinking is not guaranteed),
9155 // shrink the chunk at the end by the appropriate amount.
9156 if (((HeapWord*)_virtual_space.high()) < old_end) {
9157 size_t new_word_size =
9158 heap_word_size(_virtual_space.committed_size());
9160 // Have to remove the chunk from the dictionary because it is changing
9161 // size and might be someplace elsewhere in the dictionary.
9163 // Get the chunk at end, shrink it, and put it
9164 // back.
9165 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9166 size_t word_size_change = word_size_before - new_word_size;
9167 size_t chunk_at_end_old_size = chunk_at_end->size();
9168 assert(chunk_at_end_old_size >= word_size_change,
9169 "Shrink is too large");
9170 chunk_at_end->setSize(chunk_at_end_old_size -
9171 word_size_change);
9172 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9173 word_size_change);
9175 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9177 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9178 _bts->resize(new_word_size); // resize the block offset shared array
9179 Universe::heap()->barrier_set()->resize_covered_region(mr);
9180 _cmsSpace->assert_locked();
9181 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9183 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9185 // update the space and generation capacity counters
9186 if (UsePerfData) {
9187 _space_counters->update_capacity();
9188 _gen_counters->update_all();
9189 }
9191 if (Verbose && PrintGCDetails) {
9192 size_t new_mem_size = _virtual_space.committed_size();
9193 size_t old_mem_size = new_mem_size + bytes;
9194 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9195 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9196 }
9197 }
9199 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9200 "Inconsistency at end of space");
9201 assert(chunk_at_end->end() == _cmsSpace->end(),
9202 "Shrinking is inconsistent");
9203 return;
9204 }
9205 }
9207 // Transfer some number of overflown objects to usual marking
9208 // stack. Return true if some objects were transferred.
9209 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9210 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9211 (size_t)ParGCDesiredObjsFromOverflowList);
9213 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9214 assert(_collector->overflow_list_is_empty() || res,
9215 "If list is not empty, we should have taken something");
9216 assert(!res || !_mark_stack->isEmpty(),
9217 "If we took something, it should now be on our stack");
9218 return res;
9219 }
9221 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9222 size_t res = _sp->block_size_no_stall(addr, _collector);
9223 assert(res != 0, "Should always be able to compute a size");
9224 if (_sp->block_is_obj(addr)) {
9225 if (_live_bit_map->isMarked(addr)) {
9226 // It can't have been dead in a previous cycle
9227 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9228 } else {
9229 _dead_bit_map->mark(addr); // mark the dead object
9230 }
9231 }
9232 return res;
9233 }
9235 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() {
9237 switch (phase) {
9238 case CMSCollector::InitialMarking:
9239 initialize(true /* fullGC */ ,
9240 true /* recordGCBeginTime */,
9241 true /* recordPreGCUsage */,
9242 false /* recordPeakUsage */,
9243 false /* recordPostGCusage */,
9244 true /* recordAccumulatedGCTime */,
9245 false /* recordGCEndTime */,
9246 false /* countCollection */ );
9247 break;
9249 case CMSCollector::FinalMarking:
9250 initialize(true /* fullGC */ ,
9251 false /* recordGCBeginTime */,
9252 false /* recordPreGCUsage */,
9253 false /* recordPeakUsage */,
9254 false /* recordPostGCusage */,
9255 true /* recordAccumulatedGCTime */,
9256 false /* recordGCEndTime */,
9257 false /* countCollection */ );
9258 break;
9260 case CMSCollector::Sweeping:
9261 initialize(true /* fullGC */ ,
9262 false /* recordGCBeginTime */,
9263 false /* recordPreGCUsage */,
9264 true /* recordPeakUsage */,
9265 true /* recordPostGCusage */,
9266 false /* recordAccumulatedGCTime */,
9267 true /* recordGCEndTime */,
9268 true /* countCollection */ );
9269 break;
9271 default:
9272 ShouldNotReachHere();
9273 }
9274 }
9276 // when bailing out of cms in concurrent mode failure
9277 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() {
9278 initialize(true /* fullGC */ ,
9279 true /* recordGCBeginTime */,
9280 true /* recordPreGCUsage */,
9281 true /* recordPeakUsage */,
9282 true /* recordPostGCusage */,
9283 true /* recordAccumulatedGCTime */,
9284 true /* recordGCEndTime */,
9285 true /* countCollection */ );
9286 }