Thu, 21 Aug 2014 16:44:41 +0200
8055098: WB API should be extended to provide information about size and age of object.
Summary: Extend the WhiteBox API to provide information about the size and age of objects. Further add a mechanism to trigger a young GC.
Reviewed-by: tschatzl, sjohanss
Contributed-by: Leonid Mesnik <leonid.mesnik@oracle.com>
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
35 #include "gc_implementation/g1/g1RemSet.hpp"
36 #include "gc_implementation/g1/heapRegion.inline.hpp"
37 #include "gc_implementation/g1/heapRegionRemSet.hpp"
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
39 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
40 #include "gc_implementation/shared/vmGCOperations.hpp"
41 #include "gc_implementation/shared/gcTimer.hpp"
42 #include "gc_implementation/shared/gcTrace.hpp"
43 #include "gc_implementation/shared/gcTraceTime.hpp"
44 #include "memory/allocation.hpp"
45 #include "memory/genOopClosures.inline.hpp"
46 #include "memory/referencePolicy.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/prefetch.inline.hpp"
52 #include "services/memTracker.hpp"
54 // Concurrent marking bit map wrapper
56 CMBitMapRO::CMBitMapRO(int shifter) :
57 _bm(),
58 _shifter(shifter) {
59 _bmStartWord = 0;
60 _bmWordSize = 0;
61 }
63 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
64 const HeapWord* limit) const {
65 // First we must round addr *up* to a possible object boundary.
66 addr = (HeapWord*)align_size_up((intptr_t)addr,
67 HeapWordSize << _shifter);
68 size_t addrOffset = heapWordToOffset(addr);
69 if (limit == NULL) {
70 limit = _bmStartWord + _bmWordSize;
71 }
72 size_t limitOffset = heapWordToOffset(limit);
73 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
74 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
75 assert(nextAddr >= addr, "get_next_one postcondition");
76 assert(nextAddr == limit || isMarked(nextAddr),
77 "get_next_one postcondition");
78 return nextAddr;
79 }
81 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
82 const HeapWord* limit) const {
83 size_t addrOffset = heapWordToOffset(addr);
84 if (limit == NULL) {
85 limit = _bmStartWord + _bmWordSize;
86 }
87 size_t limitOffset = heapWordToOffset(limit);
88 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
89 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
90 assert(nextAddr >= addr, "get_next_one postcondition");
91 assert(nextAddr == limit || !isMarked(nextAddr),
92 "get_next_one postcondition");
93 return nextAddr;
94 }
96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
97 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
98 return (int) (diff >> _shifter);
99 }
101 #ifndef PRODUCT
102 bool CMBitMapRO::covers(MemRegion heap_rs) const {
103 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
104 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
105 "size inconsistency");
106 return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
107 _bmWordSize == heap_rs.word_size();
108 }
109 #endif
111 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
112 _bm.print_on_error(st, prefix);
113 }
115 size_t CMBitMap::compute_size(size_t heap_size) {
116 return heap_size / mark_distance();
117 }
119 size_t CMBitMap::mark_distance() {
120 return MinObjAlignmentInBytes * BitsPerByte;
121 }
123 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
124 _bmStartWord = heap.start();
125 _bmWordSize = heap.word_size();
127 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
128 _bm.set_size(_bmWordSize >> _shifter);
130 storage->set_mapping_changed_listener(&_listener);
131 }
133 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
134 // We need to clear the bitmap on commit, removing any existing information.
135 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
136 _bm->clearRange(mr);
137 }
139 // Closure used for clearing the given mark bitmap.
140 class ClearBitmapHRClosure : public HeapRegionClosure {
141 private:
142 ConcurrentMark* _cm;
143 CMBitMap* _bitmap;
144 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
145 public:
146 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
147 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
148 }
150 virtual bool doHeapRegion(HeapRegion* r) {
151 size_t const chunk_size_in_words = M / HeapWordSize;
153 HeapWord* cur = r->bottom();
154 HeapWord* const end = r->end();
156 while (cur < end) {
157 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
158 _bitmap->clearRange(mr);
160 cur += chunk_size_in_words;
162 // Abort iteration if after yielding the marking has been aborted.
163 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
164 return true;
165 }
166 // Repeat the asserts from before the start of the closure. We will do them
167 // as asserts here to minimize their overhead on the product. However, we
168 // will have them as guarantees at the beginning / end of the bitmap
169 // clearing to get some checking in the product.
170 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
171 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
172 }
174 return false;
175 }
176 };
178 void CMBitMap::clearAll() {
179 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
180 G1CollectedHeap::heap()->heap_region_iterate(&cl);
181 guarantee(cl.complete(), "Must have completed iteration.");
182 return;
183 }
185 void CMBitMap::markRange(MemRegion mr) {
186 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
187 assert(!mr.is_empty(), "unexpected empty region");
188 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
189 ((HeapWord *) mr.end())),
190 "markRange memory region end is not card aligned");
191 // convert address range into offset range
192 _bm.at_put_range(heapWordToOffset(mr.start()),
193 heapWordToOffset(mr.end()), true);
194 }
196 void CMBitMap::clearRange(MemRegion mr) {
197 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
198 assert(!mr.is_empty(), "unexpected empty region");
199 // convert address range into offset range
200 _bm.at_put_range(heapWordToOffset(mr.start()),
201 heapWordToOffset(mr.end()), false);
202 }
204 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
205 HeapWord* end_addr) {
206 HeapWord* start = getNextMarkedWordAddress(addr);
207 start = MIN2(start, end_addr);
208 HeapWord* end = getNextUnmarkedWordAddress(start);
209 end = MIN2(end, end_addr);
210 assert(start <= end, "Consistency check");
211 MemRegion mr(start, end);
212 if (!mr.is_empty()) {
213 clearRange(mr);
214 }
215 return mr;
216 }
218 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
219 _base(NULL), _cm(cm)
220 #ifdef ASSERT
221 , _drain_in_progress(false)
222 , _drain_in_progress_yields(false)
223 #endif
224 {}
226 bool CMMarkStack::allocate(size_t capacity) {
227 // allocate a stack of the requisite depth
228 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
229 if (!rs.is_reserved()) {
230 warning("ConcurrentMark MarkStack allocation failure");
231 return false;
232 }
233 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
234 if (!_virtual_space.initialize(rs, rs.size())) {
235 warning("ConcurrentMark MarkStack backing store failure");
236 // Release the virtual memory reserved for the marking stack
237 rs.release();
238 return false;
239 }
240 assert(_virtual_space.committed_size() == rs.size(),
241 "Didn't reserve backing store for all of ConcurrentMark stack?");
242 _base = (oop*) _virtual_space.low();
243 setEmpty();
244 _capacity = (jint) capacity;
245 _saved_index = -1;
246 _should_expand = false;
247 NOT_PRODUCT(_max_depth = 0);
248 return true;
249 }
251 void CMMarkStack::expand() {
252 // Called, during remark, if we've overflown the marking stack during marking.
253 assert(isEmpty(), "stack should been emptied while handling overflow");
254 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
255 // Clear expansion flag
256 _should_expand = false;
257 if (_capacity == (jint) MarkStackSizeMax) {
258 if (PrintGCDetails && Verbose) {
259 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
260 }
261 return;
262 }
263 // Double capacity if possible
264 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
265 // Do not give up existing stack until we have managed to
266 // get the double capacity that we desired.
267 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
268 sizeof(oop)));
269 if (rs.is_reserved()) {
270 // Release the backing store associated with old stack
271 _virtual_space.release();
272 // Reinitialize virtual space for new stack
273 if (!_virtual_space.initialize(rs, rs.size())) {
274 fatal("Not enough swap for expanded marking stack capacity");
275 }
276 _base = (oop*)(_virtual_space.low());
277 _index = 0;
278 _capacity = new_capacity;
279 } else {
280 if (PrintGCDetails && Verbose) {
281 // Failed to double capacity, continue;
282 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
283 SIZE_FORMAT"K to " SIZE_FORMAT"K",
284 _capacity / K, new_capacity / K);
285 }
286 }
287 }
289 void CMMarkStack::set_should_expand() {
290 // If we're resetting the marking state because of an
291 // marking stack overflow, record that we should, if
292 // possible, expand the stack.
293 _should_expand = _cm->has_overflown();
294 }
296 CMMarkStack::~CMMarkStack() {
297 if (_base != NULL) {
298 _base = NULL;
299 _virtual_space.release();
300 }
301 }
303 void CMMarkStack::par_push(oop ptr) {
304 while (true) {
305 if (isFull()) {
306 _overflow = true;
307 return;
308 }
309 // Otherwise...
310 jint index = _index;
311 jint next_index = index+1;
312 jint res = Atomic::cmpxchg(next_index, &_index, index);
313 if (res == index) {
314 _base[index] = ptr;
315 // Note that we don't maintain this atomically. We could, but it
316 // doesn't seem necessary.
317 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
318 return;
319 }
320 // Otherwise, we need to try again.
321 }
322 }
324 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
325 while (true) {
326 if (isFull()) {
327 _overflow = true;
328 return;
329 }
330 // Otherwise...
331 jint index = _index;
332 jint next_index = index + n;
333 if (next_index > _capacity) {
334 _overflow = true;
335 return;
336 }
337 jint res = Atomic::cmpxchg(next_index, &_index, index);
338 if (res == index) {
339 for (int i = 0; i < n; i++) {
340 int ind = index + i;
341 assert(ind < _capacity, "By overflow test above.");
342 _base[ind] = ptr_arr[i];
343 }
344 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
345 return;
346 }
347 // Otherwise, we need to try again.
348 }
349 }
351 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
352 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
353 jint start = _index;
354 jint next_index = start + n;
355 if (next_index > _capacity) {
356 _overflow = true;
357 return;
358 }
359 // Otherwise.
360 _index = next_index;
361 for (int i = 0; i < n; i++) {
362 int ind = start + i;
363 assert(ind < _capacity, "By overflow test above.");
364 _base[ind] = ptr_arr[i];
365 }
366 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
367 }
369 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
370 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
371 jint index = _index;
372 if (index == 0) {
373 *n = 0;
374 return false;
375 } else {
376 int k = MIN2(max, index);
377 jint new_ind = index - k;
378 for (int j = 0; j < k; j++) {
379 ptr_arr[j] = _base[new_ind + j];
380 }
381 _index = new_ind;
382 *n = k;
383 return true;
384 }
385 }
387 template<class OopClosureClass>
388 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
389 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
390 || SafepointSynchronize::is_at_safepoint(),
391 "Drain recursion must be yield-safe.");
392 bool res = true;
393 debug_only(_drain_in_progress = true);
394 debug_only(_drain_in_progress_yields = yield_after);
395 while (!isEmpty()) {
396 oop newOop = pop();
397 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
398 assert(newOop->is_oop(), "Expected an oop");
399 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
400 "only grey objects on this stack");
401 newOop->oop_iterate(cl);
402 if (yield_after && _cm->do_yield_check()) {
403 res = false;
404 break;
405 }
406 }
407 debug_only(_drain_in_progress = false);
408 return res;
409 }
411 void CMMarkStack::note_start_of_gc() {
412 assert(_saved_index == -1,
413 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
414 _saved_index = _index;
415 }
417 void CMMarkStack::note_end_of_gc() {
418 // This is intentionally a guarantee, instead of an assert. If we
419 // accidentally add something to the mark stack during GC, it
420 // will be a correctness issue so it's better if we crash. we'll
421 // only check this once per GC anyway, so it won't be a performance
422 // issue in any way.
423 guarantee(_saved_index == _index,
424 err_msg("saved index: %d index: %d", _saved_index, _index));
425 _saved_index = -1;
426 }
428 void CMMarkStack::oops_do(OopClosure* f) {
429 assert(_saved_index == _index,
430 err_msg("saved index: %d index: %d", _saved_index, _index));
431 for (int i = 0; i < _index; i += 1) {
432 f->do_oop(&_base[i]);
433 }
434 }
436 bool ConcurrentMark::not_yet_marked(oop obj) const {
437 return _g1h->is_obj_ill(obj);
438 }
440 CMRootRegions::CMRootRegions() :
441 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
442 _should_abort(false), _next_survivor(NULL) { }
444 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
445 _young_list = g1h->young_list();
446 _cm = cm;
447 }
449 void CMRootRegions::prepare_for_scan() {
450 assert(!scan_in_progress(), "pre-condition");
452 // Currently, only survivors can be root regions.
453 assert(_next_survivor == NULL, "pre-condition");
454 _next_survivor = _young_list->first_survivor_region();
455 _scan_in_progress = (_next_survivor != NULL);
456 _should_abort = false;
457 }
459 HeapRegion* CMRootRegions::claim_next() {
460 if (_should_abort) {
461 // If someone has set the should_abort flag, we return NULL to
462 // force the caller to bail out of their loop.
463 return NULL;
464 }
466 // Currently, only survivors can be root regions.
467 HeapRegion* res = _next_survivor;
468 if (res != NULL) {
469 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
470 // Read it again in case it changed while we were waiting for the lock.
471 res = _next_survivor;
472 if (res != NULL) {
473 if (res == _young_list->last_survivor_region()) {
474 // We just claimed the last survivor so store NULL to indicate
475 // that we're done.
476 _next_survivor = NULL;
477 } else {
478 _next_survivor = res->get_next_young_region();
479 }
480 } else {
481 // Someone else claimed the last survivor while we were trying
482 // to take the lock so nothing else to do.
483 }
484 }
485 assert(res == NULL || res->is_survivor(), "post-condition");
487 return res;
488 }
490 void CMRootRegions::scan_finished() {
491 assert(scan_in_progress(), "pre-condition");
493 // Currently, only survivors can be root regions.
494 if (!_should_abort) {
495 assert(_next_survivor == NULL, "we should have claimed all survivors");
496 }
497 _next_survivor = NULL;
499 {
500 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
501 _scan_in_progress = false;
502 RootRegionScan_lock->notify_all();
503 }
504 }
506 bool CMRootRegions::wait_until_scan_finished() {
507 if (!scan_in_progress()) return false;
509 {
510 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
511 while (scan_in_progress()) {
512 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
513 }
514 }
515 return true;
516 }
518 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
519 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
520 #endif // _MSC_VER
522 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
523 return MAX2((n_par_threads + 2) / 4, 1U);
524 }
526 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
527 _g1h(g1h),
528 _markBitMap1(),
529 _markBitMap2(),
530 _parallel_marking_threads(0),
531 _max_parallel_marking_threads(0),
532 _sleep_factor(0.0),
533 _marking_task_overhead(1.0),
534 _cleanup_sleep_factor(0.0),
535 _cleanup_task_overhead(1.0),
536 _cleanup_list("Cleanup List"),
537 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
538 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
539 CardTableModRefBS::card_shift,
540 false /* in_resource_area*/),
542 _prevMarkBitMap(&_markBitMap1),
543 _nextMarkBitMap(&_markBitMap2),
545 _markStack(this),
546 // _finger set in set_non_marking_state
548 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
549 // _active_tasks set in set_non_marking_state
550 // _tasks set inside the constructor
551 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
552 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
554 _has_overflown(false),
555 _concurrent(false),
556 _has_aborted(false),
557 _aborted_gc_id(GCId::undefined()),
558 _restart_for_overflow(false),
559 _concurrent_marking_in_progress(false),
561 // _verbose_level set below
563 _init_times(),
564 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
565 _cleanup_times(),
566 _total_counting_time(0.0),
567 _total_rs_scrub_time(0.0),
569 _parallel_workers(NULL),
571 _count_card_bitmaps(NULL),
572 _count_marked_bytes(NULL),
573 _completed_initialization(false) {
574 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
575 if (verbose_level < no_verbose) {
576 verbose_level = no_verbose;
577 }
578 if (verbose_level > high_verbose) {
579 verbose_level = high_verbose;
580 }
581 _verbose_level = verbose_level;
583 if (verbose_low()) {
584 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
585 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
586 }
588 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
589 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
591 // Create & start a ConcurrentMark thread.
592 _cmThread = new ConcurrentMarkThread(this);
593 assert(cmThread() != NULL, "CM Thread should have been created");
594 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
595 if (_cmThread->osthread() == NULL) {
596 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
597 }
599 assert(CGC_lock != NULL, "Where's the CGC_lock?");
600 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
601 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
603 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
604 satb_qs.set_buffer_size(G1SATBBufferSize);
606 _root_regions.init(_g1h, this);
608 if (ConcGCThreads > ParallelGCThreads) {
609 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
610 "than ParallelGCThreads (" UINTX_FORMAT ").",
611 ConcGCThreads, ParallelGCThreads);
612 return;
613 }
614 if (ParallelGCThreads == 0) {
615 // if we are not running with any parallel GC threads we will not
616 // spawn any marking threads either
617 _parallel_marking_threads = 0;
618 _max_parallel_marking_threads = 0;
619 _sleep_factor = 0.0;
620 _marking_task_overhead = 1.0;
621 } else {
622 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
623 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
624 // if both are set
625 _sleep_factor = 0.0;
626 _marking_task_overhead = 1.0;
627 } else if (G1MarkingOverheadPercent > 0) {
628 // We will calculate the number of parallel marking threads based
629 // on a target overhead with respect to the soft real-time goal
630 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
631 double overall_cm_overhead =
632 (double) MaxGCPauseMillis * marking_overhead /
633 (double) GCPauseIntervalMillis;
634 double cpu_ratio = 1.0 / (double) os::processor_count();
635 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
636 double marking_task_overhead =
637 overall_cm_overhead / marking_thread_num *
638 (double) os::processor_count();
639 double sleep_factor =
640 (1.0 - marking_task_overhead) / marking_task_overhead;
642 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
643 _sleep_factor = sleep_factor;
644 _marking_task_overhead = marking_task_overhead;
645 } else {
646 // Calculate the number of parallel marking threads by scaling
647 // the number of parallel GC threads.
648 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
649 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
650 _sleep_factor = 0.0;
651 _marking_task_overhead = 1.0;
652 }
654 assert(ConcGCThreads > 0, "Should have been set");
655 _parallel_marking_threads = (uint) ConcGCThreads;
656 _max_parallel_marking_threads = _parallel_marking_threads;
658 if (parallel_marking_threads() > 1) {
659 _cleanup_task_overhead = 1.0;
660 } else {
661 _cleanup_task_overhead = marking_task_overhead();
662 }
663 _cleanup_sleep_factor =
664 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
666 #if 0
667 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
668 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
669 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
670 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
671 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
672 #endif
674 guarantee(parallel_marking_threads() > 0, "peace of mind");
675 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
676 _max_parallel_marking_threads, false, true);
677 if (_parallel_workers == NULL) {
678 vm_exit_during_initialization("Failed necessary allocation.");
679 } else {
680 _parallel_workers->initialize_workers();
681 }
682 }
684 if (FLAG_IS_DEFAULT(MarkStackSize)) {
685 uintx mark_stack_size =
686 MIN2(MarkStackSizeMax,
687 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
688 // Verify that the calculated value for MarkStackSize is in range.
689 // It would be nice to use the private utility routine from Arguments.
690 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
691 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
692 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
693 mark_stack_size, (uintx) 1, MarkStackSizeMax);
694 return;
695 }
696 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
697 } else {
698 // Verify MarkStackSize is in range.
699 if (FLAG_IS_CMDLINE(MarkStackSize)) {
700 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
701 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
702 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
703 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
704 MarkStackSize, (uintx) 1, MarkStackSizeMax);
705 return;
706 }
707 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
708 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
709 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
710 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
711 MarkStackSize, MarkStackSizeMax);
712 return;
713 }
714 }
715 }
716 }
718 if (!_markStack.allocate(MarkStackSize)) {
719 warning("Failed to allocate CM marking stack");
720 return;
721 }
723 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
724 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
726 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
727 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
729 BitMap::idx_t card_bm_size = _card_bm.size();
731 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
732 _active_tasks = _max_worker_id;
734 size_t max_regions = (size_t) _g1h->max_regions();
735 for (uint i = 0; i < _max_worker_id; ++i) {
736 CMTaskQueue* task_queue = new CMTaskQueue();
737 task_queue->initialize();
738 _task_queues->register_queue(i, task_queue);
740 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
741 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
743 _tasks[i] = new CMTask(i, this,
744 _count_marked_bytes[i],
745 &_count_card_bitmaps[i],
746 task_queue, _task_queues);
748 _accum_task_vtime[i] = 0.0;
749 }
751 // Calculate the card number for the bottom of the heap. Used
752 // in biasing indexes into the accounting card bitmaps.
753 _heap_bottom_card_num =
754 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
755 CardTableModRefBS::card_shift);
757 // Clear all the liveness counting data
758 clear_all_count_data();
760 // so that the call below can read a sensible value
761 _heap_start = g1h->reserved_region().start();
762 set_non_marking_state();
763 _completed_initialization = true;
764 }
766 void ConcurrentMark::reset() {
767 // Starting values for these two. This should be called in a STW
768 // phase.
769 MemRegion reserved = _g1h->g1_reserved();
770 _heap_start = reserved.start();
771 _heap_end = reserved.end();
773 // Separated the asserts so that we know which one fires.
774 assert(_heap_start != NULL, "heap bounds should look ok");
775 assert(_heap_end != NULL, "heap bounds should look ok");
776 assert(_heap_start < _heap_end, "heap bounds should look ok");
778 // Reset all the marking data structures and any necessary flags
779 reset_marking_state();
781 if (verbose_low()) {
782 gclog_or_tty->print_cr("[global] resetting");
783 }
785 // We do reset all of them, since different phases will use
786 // different number of active threads. So, it's easiest to have all
787 // of them ready.
788 for (uint i = 0; i < _max_worker_id; ++i) {
789 _tasks[i]->reset(_nextMarkBitMap);
790 }
792 // we need this to make sure that the flag is on during the evac
793 // pause with initial mark piggy-backed
794 set_concurrent_marking_in_progress();
795 }
798 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
799 _markStack.set_should_expand();
800 _markStack.setEmpty(); // Also clears the _markStack overflow flag
801 if (clear_overflow) {
802 clear_has_overflown();
803 } else {
804 assert(has_overflown(), "pre-condition");
805 }
806 _finger = _heap_start;
808 for (uint i = 0; i < _max_worker_id; ++i) {
809 CMTaskQueue* queue = _task_queues->queue(i);
810 queue->set_empty();
811 }
812 }
814 void ConcurrentMark::set_concurrency(uint active_tasks) {
815 assert(active_tasks <= _max_worker_id, "we should not have more");
817 _active_tasks = active_tasks;
818 // Need to update the three data structures below according to the
819 // number of active threads for this phase.
820 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
821 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
822 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
823 }
825 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
826 set_concurrency(active_tasks);
828 _concurrent = concurrent;
829 // We propagate this to all tasks, not just the active ones.
830 for (uint i = 0; i < _max_worker_id; ++i)
831 _tasks[i]->set_concurrent(concurrent);
833 if (concurrent) {
834 set_concurrent_marking_in_progress();
835 } else {
836 // We currently assume that the concurrent flag has been set to
837 // false before we start remark. At this point we should also be
838 // in a STW phase.
839 assert(!concurrent_marking_in_progress(), "invariant");
840 assert(out_of_regions(),
841 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
842 p2i(_finger), p2i(_heap_end)));
843 }
844 }
846 void ConcurrentMark::set_non_marking_state() {
847 // We set the global marking state to some default values when we're
848 // not doing marking.
849 reset_marking_state();
850 _active_tasks = 0;
851 clear_concurrent_marking_in_progress();
852 }
854 ConcurrentMark::~ConcurrentMark() {
855 // The ConcurrentMark instance is never freed.
856 ShouldNotReachHere();
857 }
859 void ConcurrentMark::clearNextBitmap() {
860 G1CollectedHeap* g1h = G1CollectedHeap::heap();
862 // Make sure that the concurrent mark thread looks to still be in
863 // the current cycle.
864 guarantee(cmThread()->during_cycle(), "invariant");
866 // We are finishing up the current cycle by clearing the next
867 // marking bitmap and getting it ready for the next cycle. During
868 // this time no other cycle can start. So, let's make sure that this
869 // is the case.
870 guarantee(!g1h->mark_in_progress(), "invariant");
872 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
873 g1h->heap_region_iterate(&cl);
875 // Clear the liveness counting data. If the marking has been aborted, the abort()
876 // call already did that.
877 if (cl.complete()) {
878 clear_all_count_data();
879 }
881 // Repeat the asserts from above.
882 guarantee(cmThread()->during_cycle(), "invariant");
883 guarantee(!g1h->mark_in_progress(), "invariant");
884 }
886 class CheckBitmapClearHRClosure : public HeapRegionClosure {
887 CMBitMap* _bitmap;
888 bool _error;
889 public:
890 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
891 }
893 virtual bool doHeapRegion(HeapRegion* r) {
894 return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
895 }
896 };
898 bool ConcurrentMark::nextMarkBitmapIsClear() {
899 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
900 _g1h->heap_region_iterate(&cl);
901 return cl.complete();
902 }
904 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
905 public:
906 bool doHeapRegion(HeapRegion* r) {
907 if (!r->continuesHumongous()) {
908 r->note_start_of_marking();
909 }
910 return false;
911 }
912 };
914 void ConcurrentMark::checkpointRootsInitialPre() {
915 G1CollectedHeap* g1h = G1CollectedHeap::heap();
916 G1CollectorPolicy* g1p = g1h->g1_policy();
918 _has_aborted = false;
920 #ifndef PRODUCT
921 if (G1PrintReachableAtInitialMark) {
922 print_reachable("at-cycle-start",
923 VerifyOption_G1UsePrevMarking, true /* all */);
924 }
925 #endif
927 // Initialise marking structures. This has to be done in a STW phase.
928 reset();
930 // For each region note start of marking.
931 NoteStartOfMarkHRClosure startcl;
932 g1h->heap_region_iterate(&startcl);
933 }
936 void ConcurrentMark::checkpointRootsInitialPost() {
937 G1CollectedHeap* g1h = G1CollectedHeap::heap();
939 // If we force an overflow during remark, the remark operation will
940 // actually abort and we'll restart concurrent marking. If we always
941 // force an oveflow during remark we'll never actually complete the
942 // marking phase. So, we initilize this here, at the start of the
943 // cycle, so that at the remaining overflow number will decrease at
944 // every remark and we'll eventually not need to cause one.
945 force_overflow_stw()->init();
947 // Start Concurrent Marking weak-reference discovery.
948 ReferenceProcessor* rp = g1h->ref_processor_cm();
949 // enable ("weak") refs discovery
950 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
951 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
953 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
954 // This is the start of the marking cycle, we're expected all
955 // threads to have SATB queues with active set to false.
956 satb_mq_set.set_active_all_threads(true, /* new active value */
957 false /* expected_active */);
959 _root_regions.prepare_for_scan();
961 // update_g1_committed() will be called at the end of an evac pause
962 // when marking is on. So, it's also called at the end of the
963 // initial-mark pause to update the heap end, if the heap expands
964 // during it. No need to call it here.
965 }
967 /*
968 * Notice that in the next two methods, we actually leave the STS
969 * during the barrier sync and join it immediately afterwards. If we
970 * do not do this, the following deadlock can occur: one thread could
971 * be in the barrier sync code, waiting for the other thread to also
972 * sync up, whereas another one could be trying to yield, while also
973 * waiting for the other threads to sync up too.
974 *
975 * Note, however, that this code is also used during remark and in
976 * this case we should not attempt to leave / enter the STS, otherwise
977 * we'll either hit an asseert (debug / fastdebug) or deadlock
978 * (product). So we should only leave / enter the STS if we are
979 * operating concurrently.
980 *
981 * Because the thread that does the sync barrier has left the STS, it
982 * is possible to be suspended for a Full GC or an evacuation pause
983 * could occur. This is actually safe, since the entering the sync
984 * barrier is one of the last things do_marking_step() does, and it
985 * doesn't manipulate any data structures afterwards.
986 */
988 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
989 if (verbose_low()) {
990 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
991 }
993 if (concurrent()) {
994 SuspendibleThreadSet::leave();
995 }
997 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
999 if (concurrent()) {
1000 SuspendibleThreadSet::join();
1001 }
1002 // at this point everyone should have synced up and not be doing any
1003 // more work
1005 if (verbose_low()) {
1006 if (barrier_aborted) {
1007 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1008 } else {
1009 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1010 }
1011 }
1013 if (barrier_aborted) {
1014 // If the barrier aborted we ignore the overflow condition and
1015 // just abort the whole marking phase as quickly as possible.
1016 return;
1017 }
1019 // If we're executing the concurrent phase of marking, reset the marking
1020 // state; otherwise the marking state is reset after reference processing,
1021 // during the remark pause.
1022 // If we reset here as a result of an overflow during the remark we will
1023 // see assertion failures from any subsequent set_concurrency_and_phase()
1024 // calls.
1025 if (concurrent()) {
1026 // let the task associated with with worker 0 do this
1027 if (worker_id == 0) {
1028 // task 0 is responsible for clearing the global data structures
1029 // We should be here because of an overflow. During STW we should
1030 // not clear the overflow flag since we rely on it being true when
1031 // we exit this method to abort the pause and restart concurent
1032 // marking.
1033 reset_marking_state(true /* clear_overflow */);
1034 force_overflow()->update();
1036 if (G1Log::fine()) {
1037 gclog_or_tty->gclog_stamp(concurrent_gc_id());
1038 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1039 }
1040 }
1041 }
1043 // after this, each task should reset its own data structures then
1044 // then go into the second barrier
1045 }
1047 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1048 if (verbose_low()) {
1049 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1050 }
1052 if (concurrent()) {
1053 SuspendibleThreadSet::leave();
1054 }
1056 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1058 if (concurrent()) {
1059 SuspendibleThreadSet::join();
1060 }
1061 // at this point everything should be re-initialized and ready to go
1063 if (verbose_low()) {
1064 if (barrier_aborted) {
1065 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1066 } else {
1067 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1068 }
1069 }
1070 }
1072 #ifndef PRODUCT
1073 void ForceOverflowSettings::init() {
1074 _num_remaining = G1ConcMarkForceOverflow;
1075 _force = false;
1076 update();
1077 }
1079 void ForceOverflowSettings::update() {
1080 if (_num_remaining > 0) {
1081 _num_remaining -= 1;
1082 _force = true;
1083 } else {
1084 _force = false;
1085 }
1086 }
1088 bool ForceOverflowSettings::should_force() {
1089 if (_force) {
1090 _force = false;
1091 return true;
1092 } else {
1093 return false;
1094 }
1095 }
1096 #endif // !PRODUCT
1098 class CMConcurrentMarkingTask: public AbstractGangTask {
1099 private:
1100 ConcurrentMark* _cm;
1101 ConcurrentMarkThread* _cmt;
1103 public:
1104 void work(uint worker_id) {
1105 assert(Thread::current()->is_ConcurrentGC_thread(),
1106 "this should only be done by a conc GC thread");
1107 ResourceMark rm;
1109 double start_vtime = os::elapsedVTime();
1111 SuspendibleThreadSet::join();
1113 assert(worker_id < _cm->active_tasks(), "invariant");
1114 CMTask* the_task = _cm->task(worker_id);
1115 the_task->record_start_time();
1116 if (!_cm->has_aborted()) {
1117 do {
1118 double start_vtime_sec = os::elapsedVTime();
1119 double start_time_sec = os::elapsedTime();
1120 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1122 the_task->do_marking_step(mark_step_duration_ms,
1123 true /* do_termination */,
1124 false /* is_serial*/);
1126 double end_time_sec = os::elapsedTime();
1127 double end_vtime_sec = os::elapsedVTime();
1128 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1129 double elapsed_time_sec = end_time_sec - start_time_sec;
1130 _cm->clear_has_overflown();
1132 bool ret = _cm->do_yield_check(worker_id);
1134 jlong sleep_time_ms;
1135 if (!_cm->has_aborted() && the_task->has_aborted()) {
1136 sleep_time_ms =
1137 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1138 SuspendibleThreadSet::leave();
1139 os::sleep(Thread::current(), sleep_time_ms, false);
1140 SuspendibleThreadSet::join();
1141 }
1142 double end_time2_sec = os::elapsedTime();
1143 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1145 #if 0
1146 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1147 "overhead %1.4lf",
1148 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1149 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1150 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1151 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1152 #endif
1153 } while (!_cm->has_aborted() && the_task->has_aborted());
1154 }
1155 the_task->record_end_time();
1156 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1158 SuspendibleThreadSet::leave();
1160 double end_vtime = os::elapsedVTime();
1161 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1162 }
1164 CMConcurrentMarkingTask(ConcurrentMark* cm,
1165 ConcurrentMarkThread* cmt) :
1166 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1168 ~CMConcurrentMarkingTask() { }
1169 };
1171 // Calculates the number of active workers for a concurrent
1172 // phase.
1173 uint ConcurrentMark::calc_parallel_marking_threads() {
1174 if (G1CollectedHeap::use_parallel_gc_threads()) {
1175 uint n_conc_workers = 0;
1176 if (!UseDynamicNumberOfGCThreads ||
1177 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1178 !ForceDynamicNumberOfGCThreads)) {
1179 n_conc_workers = max_parallel_marking_threads();
1180 } else {
1181 n_conc_workers =
1182 AdaptiveSizePolicy::calc_default_active_workers(
1183 max_parallel_marking_threads(),
1184 1, /* Minimum workers */
1185 parallel_marking_threads(),
1186 Threads::number_of_non_daemon_threads());
1187 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1188 // that scaling has already gone into "_max_parallel_marking_threads".
1189 }
1190 assert(n_conc_workers > 0, "Always need at least 1");
1191 return n_conc_workers;
1192 }
1193 // If we are not running with any parallel GC threads we will not
1194 // have spawned any marking threads either. Hence the number of
1195 // concurrent workers should be 0.
1196 return 0;
1197 }
1199 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1200 // Currently, only survivors can be root regions.
1201 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1202 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1204 const uintx interval = PrefetchScanIntervalInBytes;
1205 HeapWord* curr = hr->bottom();
1206 const HeapWord* end = hr->top();
1207 while (curr < end) {
1208 Prefetch::read(curr, interval);
1209 oop obj = oop(curr);
1210 int size = obj->oop_iterate(&cl);
1211 assert(size == obj->size(), "sanity");
1212 curr += size;
1213 }
1214 }
1216 class CMRootRegionScanTask : public AbstractGangTask {
1217 private:
1218 ConcurrentMark* _cm;
1220 public:
1221 CMRootRegionScanTask(ConcurrentMark* cm) :
1222 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1224 void work(uint worker_id) {
1225 assert(Thread::current()->is_ConcurrentGC_thread(),
1226 "this should only be done by a conc GC thread");
1228 CMRootRegions* root_regions = _cm->root_regions();
1229 HeapRegion* hr = root_regions->claim_next();
1230 while (hr != NULL) {
1231 _cm->scanRootRegion(hr, worker_id);
1232 hr = root_regions->claim_next();
1233 }
1234 }
1235 };
1237 void ConcurrentMark::scanRootRegions() {
1238 // Start of concurrent marking.
1239 ClassLoaderDataGraph::clear_claimed_marks();
1241 // scan_in_progress() will have been set to true only if there was
1242 // at least one root region to scan. So, if it's false, we
1243 // should not attempt to do any further work.
1244 if (root_regions()->scan_in_progress()) {
1245 _parallel_marking_threads = calc_parallel_marking_threads();
1246 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1247 "Maximum number of marking threads exceeded");
1248 uint active_workers = MAX2(1U, parallel_marking_threads());
1250 CMRootRegionScanTask task(this);
1251 if (use_parallel_marking_threads()) {
1252 _parallel_workers->set_active_workers((int) active_workers);
1253 _parallel_workers->run_task(&task);
1254 } else {
1255 task.work(0);
1256 }
1258 // It's possible that has_aborted() is true here without actually
1259 // aborting the survivor scan earlier. This is OK as it's
1260 // mainly used for sanity checking.
1261 root_regions()->scan_finished();
1262 }
1263 }
1265 void ConcurrentMark::markFromRoots() {
1266 // we might be tempted to assert that:
1267 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1268 // "inconsistent argument?");
1269 // However that wouldn't be right, because it's possible that
1270 // a safepoint is indeed in progress as a younger generation
1271 // stop-the-world GC happens even as we mark in this generation.
1273 _restart_for_overflow = false;
1274 force_overflow_conc()->init();
1276 // _g1h has _n_par_threads
1277 _parallel_marking_threads = calc_parallel_marking_threads();
1278 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1279 "Maximum number of marking threads exceeded");
1281 uint active_workers = MAX2(1U, parallel_marking_threads());
1283 // Parallel task terminator is set in "set_concurrency_and_phase()"
1284 set_concurrency_and_phase(active_workers, true /* concurrent */);
1286 CMConcurrentMarkingTask markingTask(this, cmThread());
1287 if (use_parallel_marking_threads()) {
1288 _parallel_workers->set_active_workers((int)active_workers);
1289 // Don't set _n_par_threads because it affects MT in process_roots()
1290 // and the decisions on that MT processing is made elsewhere.
1291 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1292 _parallel_workers->run_task(&markingTask);
1293 } else {
1294 markingTask.work(0);
1295 }
1296 print_stats();
1297 }
1299 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1300 // world is stopped at this checkpoint
1301 assert(SafepointSynchronize::is_at_safepoint(),
1302 "world should be stopped");
1304 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1306 // If a full collection has happened, we shouldn't do this.
1307 if (has_aborted()) {
1308 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1309 return;
1310 }
1312 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1314 if (VerifyDuringGC) {
1315 HandleMark hm; // handle scope
1316 Universe::heap()->prepare_for_verify();
1317 Universe::verify(VerifyOption_G1UsePrevMarking,
1318 " VerifyDuringGC:(before)");
1319 }
1320 g1h->check_bitmaps("Remark Start");
1322 G1CollectorPolicy* g1p = g1h->g1_policy();
1323 g1p->record_concurrent_mark_remark_start();
1325 double start = os::elapsedTime();
1327 checkpointRootsFinalWork();
1329 double mark_work_end = os::elapsedTime();
1331 weakRefsWork(clear_all_soft_refs);
1333 if (has_overflown()) {
1334 // Oops. We overflowed. Restart concurrent marking.
1335 _restart_for_overflow = true;
1336 if (G1TraceMarkStackOverflow) {
1337 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1338 }
1340 // Verify the heap w.r.t. the previous marking bitmap.
1341 if (VerifyDuringGC) {
1342 HandleMark hm; // handle scope
1343 Universe::heap()->prepare_for_verify();
1344 Universe::verify(VerifyOption_G1UsePrevMarking,
1345 " VerifyDuringGC:(overflow)");
1346 }
1348 // Clear the marking state because we will be restarting
1349 // marking due to overflowing the global mark stack.
1350 reset_marking_state();
1351 } else {
1352 // Aggregate the per-task counting data that we have accumulated
1353 // while marking.
1354 aggregate_count_data();
1356 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1357 // We're done with marking.
1358 // This is the end of the marking cycle, we're expected all
1359 // threads to have SATB queues with active set to true.
1360 satb_mq_set.set_active_all_threads(false, /* new active value */
1361 true /* expected_active */);
1363 if (VerifyDuringGC) {
1364 HandleMark hm; // handle scope
1365 Universe::heap()->prepare_for_verify();
1366 Universe::verify(VerifyOption_G1UseNextMarking,
1367 " VerifyDuringGC:(after)");
1368 }
1369 g1h->check_bitmaps("Remark End");
1370 assert(!restart_for_overflow(), "sanity");
1371 // Completely reset the marking state since marking completed
1372 set_non_marking_state();
1373 }
1375 // Expand the marking stack, if we have to and if we can.
1376 if (_markStack.should_expand()) {
1377 _markStack.expand();
1378 }
1380 // Statistics
1381 double now = os::elapsedTime();
1382 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1383 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1384 _remark_times.add((now - start) * 1000.0);
1386 g1p->record_concurrent_mark_remark_end();
1388 G1CMIsAliveClosure is_alive(g1h);
1389 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1390 }
1392 // Base class of the closures that finalize and verify the
1393 // liveness counting data.
1394 class CMCountDataClosureBase: public HeapRegionClosure {
1395 protected:
1396 G1CollectedHeap* _g1h;
1397 ConcurrentMark* _cm;
1398 CardTableModRefBS* _ct_bs;
1400 BitMap* _region_bm;
1401 BitMap* _card_bm;
1403 // Takes a region that's not empty (i.e., it has at least one
1404 // live object in it and sets its corresponding bit on the region
1405 // bitmap to 1. If the region is "starts humongous" it will also set
1406 // to 1 the bits on the region bitmap that correspond to its
1407 // associated "continues humongous" regions.
1408 void set_bit_for_region(HeapRegion* hr) {
1409 assert(!hr->continuesHumongous(), "should have filtered those out");
1411 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1412 if (!hr->startsHumongous()) {
1413 // Normal (non-humongous) case: just set the bit.
1414 _region_bm->par_at_put(index, true);
1415 } else {
1416 // Starts humongous case: calculate how many regions are part of
1417 // this humongous region and then set the bit range.
1418 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1419 _region_bm->par_at_put_range(index, end_index, true);
1420 }
1421 }
1423 public:
1424 CMCountDataClosureBase(G1CollectedHeap* g1h,
1425 BitMap* region_bm, BitMap* card_bm):
1426 _g1h(g1h), _cm(g1h->concurrent_mark()),
1427 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1428 _region_bm(region_bm), _card_bm(card_bm) { }
1429 };
1431 // Closure that calculates the # live objects per region. Used
1432 // for verification purposes during the cleanup pause.
1433 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1434 CMBitMapRO* _bm;
1435 size_t _region_marked_bytes;
1437 public:
1438 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1439 BitMap* region_bm, BitMap* card_bm) :
1440 CMCountDataClosureBase(g1h, region_bm, card_bm),
1441 _bm(bm), _region_marked_bytes(0) { }
1443 bool doHeapRegion(HeapRegion* hr) {
1445 if (hr->continuesHumongous()) {
1446 // We will ignore these here and process them when their
1447 // associated "starts humongous" region is processed (see
1448 // set_bit_for_heap_region()). Note that we cannot rely on their
1449 // associated "starts humongous" region to have their bit set to
1450 // 1 since, due to the region chunking in the parallel region
1451 // iteration, a "continues humongous" region might be visited
1452 // before its associated "starts humongous".
1453 return false;
1454 }
1456 HeapWord* ntams = hr->next_top_at_mark_start();
1457 HeapWord* start = hr->bottom();
1459 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1460 err_msg("Preconditions not met - "
1461 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1462 p2i(start), p2i(ntams), p2i(hr->end())));
1464 // Find the first marked object at or after "start".
1465 start = _bm->getNextMarkedWordAddress(start, ntams);
1467 size_t marked_bytes = 0;
1469 while (start < ntams) {
1470 oop obj = oop(start);
1471 int obj_sz = obj->size();
1472 HeapWord* obj_end = start + obj_sz;
1474 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1475 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1477 // Note: if we're looking at the last region in heap - obj_end
1478 // could be actually just beyond the end of the heap; end_idx
1479 // will then correspond to a (non-existent) card that is also
1480 // just beyond the heap.
1481 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1482 // end of object is not card aligned - increment to cover
1483 // all the cards spanned by the object
1484 end_idx += 1;
1485 }
1487 // Set the bits in the card BM for the cards spanned by this object.
1488 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1490 // Add the size of this object to the number of marked bytes.
1491 marked_bytes += (size_t)obj_sz * HeapWordSize;
1493 // Find the next marked object after this one.
1494 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1495 }
1497 // Mark the allocated-since-marking portion...
1498 HeapWord* top = hr->top();
1499 if (ntams < top) {
1500 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1501 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1503 // Note: if we're looking at the last region in heap - top
1504 // could be actually just beyond the end of the heap; end_idx
1505 // will then correspond to a (non-existent) card that is also
1506 // just beyond the heap.
1507 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1508 // end of object is not card aligned - increment to cover
1509 // all the cards spanned by the object
1510 end_idx += 1;
1511 }
1512 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1514 // This definitely means the region has live objects.
1515 set_bit_for_region(hr);
1516 }
1518 // Update the live region bitmap.
1519 if (marked_bytes > 0) {
1520 set_bit_for_region(hr);
1521 }
1523 // Set the marked bytes for the current region so that
1524 // it can be queried by a calling verificiation routine
1525 _region_marked_bytes = marked_bytes;
1527 return false;
1528 }
1530 size_t region_marked_bytes() const { return _region_marked_bytes; }
1531 };
1533 // Heap region closure used for verifying the counting data
1534 // that was accumulated concurrently and aggregated during
1535 // the remark pause. This closure is applied to the heap
1536 // regions during the STW cleanup pause.
1538 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1539 G1CollectedHeap* _g1h;
1540 ConcurrentMark* _cm;
1541 CalcLiveObjectsClosure _calc_cl;
1542 BitMap* _region_bm; // Region BM to be verified
1543 BitMap* _card_bm; // Card BM to be verified
1544 bool _verbose; // verbose output?
1546 BitMap* _exp_region_bm; // Expected Region BM values
1547 BitMap* _exp_card_bm; // Expected card BM values
1549 int _failures;
1551 public:
1552 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1553 BitMap* region_bm,
1554 BitMap* card_bm,
1555 BitMap* exp_region_bm,
1556 BitMap* exp_card_bm,
1557 bool verbose) :
1558 _g1h(g1h), _cm(g1h->concurrent_mark()),
1559 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1560 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1561 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1562 _failures(0) { }
1564 int failures() const { return _failures; }
1566 bool doHeapRegion(HeapRegion* hr) {
1567 if (hr->continuesHumongous()) {
1568 // We will ignore these here and process them when their
1569 // associated "starts humongous" region is processed (see
1570 // set_bit_for_heap_region()). Note that we cannot rely on their
1571 // associated "starts humongous" region to have their bit set to
1572 // 1 since, due to the region chunking in the parallel region
1573 // iteration, a "continues humongous" region might be visited
1574 // before its associated "starts humongous".
1575 return false;
1576 }
1578 int failures = 0;
1580 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1581 // this region and set the corresponding bits in the expected region
1582 // and card bitmaps.
1583 bool res = _calc_cl.doHeapRegion(hr);
1584 assert(res == false, "should be continuing");
1586 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1587 Mutex::_no_safepoint_check_flag);
1589 // Verify the marked bytes for this region.
1590 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1591 size_t act_marked_bytes = hr->next_marked_bytes();
1593 // We're not OK if expected marked bytes > actual marked bytes. It means
1594 // we have missed accounting some objects during the actual marking.
1595 if (exp_marked_bytes > act_marked_bytes) {
1596 if (_verbose) {
1597 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1598 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1599 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1600 }
1601 failures += 1;
1602 }
1604 // Verify the bit, for this region, in the actual and expected
1605 // (which was just calculated) region bit maps.
1606 // We're not OK if the bit in the calculated expected region
1607 // bitmap is set and the bit in the actual region bitmap is not.
1608 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1610 bool expected = _exp_region_bm->at(index);
1611 bool actual = _region_bm->at(index);
1612 if (expected && !actual) {
1613 if (_verbose) {
1614 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1615 "expected: %s, actual: %s",
1616 hr->hrs_index(),
1617 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1618 }
1619 failures += 1;
1620 }
1622 // Verify that the card bit maps for the cards spanned by the current
1623 // region match. We have an error if we have a set bit in the expected
1624 // bit map and the corresponding bit in the actual bitmap is not set.
1626 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1627 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1629 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1630 expected = _exp_card_bm->at(i);
1631 actual = _card_bm->at(i);
1633 if (expected && !actual) {
1634 if (_verbose) {
1635 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1636 "expected: %s, actual: %s",
1637 hr->hrs_index(), i,
1638 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1639 }
1640 failures += 1;
1641 }
1642 }
1644 if (failures > 0 && _verbose) {
1645 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1646 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1647 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1648 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1649 }
1651 _failures += failures;
1653 // We could stop iteration over the heap when we
1654 // find the first violating region by returning true.
1655 return false;
1656 }
1657 };
1659 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1660 protected:
1661 G1CollectedHeap* _g1h;
1662 ConcurrentMark* _cm;
1663 BitMap* _actual_region_bm;
1664 BitMap* _actual_card_bm;
1666 uint _n_workers;
1668 BitMap* _expected_region_bm;
1669 BitMap* _expected_card_bm;
1671 int _failures;
1672 bool _verbose;
1674 public:
1675 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1676 BitMap* region_bm, BitMap* card_bm,
1677 BitMap* expected_region_bm, BitMap* expected_card_bm)
1678 : AbstractGangTask("G1 verify final counting"),
1679 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1680 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1681 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1682 _failures(0), _verbose(false),
1683 _n_workers(0) {
1684 assert(VerifyDuringGC, "don't call this otherwise");
1686 // Use the value already set as the number of active threads
1687 // in the call to run_task().
1688 if (G1CollectedHeap::use_parallel_gc_threads()) {
1689 assert( _g1h->workers()->active_workers() > 0,
1690 "Should have been previously set");
1691 _n_workers = _g1h->workers()->active_workers();
1692 } else {
1693 _n_workers = 1;
1694 }
1696 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1697 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1699 _verbose = _cm->verbose_medium();
1700 }
1702 void work(uint worker_id) {
1703 assert(worker_id < _n_workers, "invariant");
1705 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1706 _actual_region_bm, _actual_card_bm,
1707 _expected_region_bm,
1708 _expected_card_bm,
1709 _verbose);
1711 if (G1CollectedHeap::use_parallel_gc_threads()) {
1712 _g1h->heap_region_par_iterate_chunked(&verify_cl,
1713 worker_id,
1714 _n_workers,
1715 HeapRegion::VerifyCountClaimValue);
1716 } else {
1717 _g1h->heap_region_iterate(&verify_cl);
1718 }
1720 Atomic::add(verify_cl.failures(), &_failures);
1721 }
1723 int failures() const { return _failures; }
1724 };
1726 // Closure that finalizes the liveness counting data.
1727 // Used during the cleanup pause.
1728 // Sets the bits corresponding to the interval [NTAMS, top]
1729 // (which contains the implicitly live objects) in the
1730 // card liveness bitmap. Also sets the bit for each region,
1731 // containing live data, in the region liveness bitmap.
1733 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1734 public:
1735 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1736 BitMap* region_bm,
1737 BitMap* card_bm) :
1738 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1740 bool doHeapRegion(HeapRegion* hr) {
1742 if (hr->continuesHumongous()) {
1743 // We will ignore these here and process them when their
1744 // associated "starts humongous" region is processed (see
1745 // set_bit_for_heap_region()). Note that we cannot rely on their
1746 // associated "starts humongous" region to have their bit set to
1747 // 1 since, due to the region chunking in the parallel region
1748 // iteration, a "continues humongous" region might be visited
1749 // before its associated "starts humongous".
1750 return false;
1751 }
1753 HeapWord* ntams = hr->next_top_at_mark_start();
1754 HeapWord* top = hr->top();
1756 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1758 // Mark the allocated-since-marking portion...
1759 if (ntams < top) {
1760 // This definitely means the region has live objects.
1761 set_bit_for_region(hr);
1763 // Now set the bits in the card bitmap for [ntams, top)
1764 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1765 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1767 // Note: if we're looking at the last region in heap - top
1768 // could be actually just beyond the end of the heap; end_idx
1769 // will then correspond to a (non-existent) card that is also
1770 // just beyond the heap.
1771 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1772 // end of object is not card aligned - increment to cover
1773 // all the cards spanned by the object
1774 end_idx += 1;
1775 }
1777 assert(end_idx <= _card_bm->size(),
1778 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1779 end_idx, _card_bm->size()));
1780 assert(start_idx < _card_bm->size(),
1781 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1782 start_idx, _card_bm->size()));
1784 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1785 }
1787 // Set the bit for the region if it contains live data
1788 if (hr->next_marked_bytes() > 0) {
1789 set_bit_for_region(hr);
1790 }
1792 return false;
1793 }
1794 };
1796 class G1ParFinalCountTask: public AbstractGangTask {
1797 protected:
1798 G1CollectedHeap* _g1h;
1799 ConcurrentMark* _cm;
1800 BitMap* _actual_region_bm;
1801 BitMap* _actual_card_bm;
1803 uint _n_workers;
1805 public:
1806 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1807 : AbstractGangTask("G1 final counting"),
1808 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1809 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1810 _n_workers(0) {
1811 // Use the value already set as the number of active threads
1812 // in the call to run_task().
1813 if (G1CollectedHeap::use_parallel_gc_threads()) {
1814 assert( _g1h->workers()->active_workers() > 0,
1815 "Should have been previously set");
1816 _n_workers = _g1h->workers()->active_workers();
1817 } else {
1818 _n_workers = 1;
1819 }
1820 }
1822 void work(uint worker_id) {
1823 assert(worker_id < _n_workers, "invariant");
1825 FinalCountDataUpdateClosure final_update_cl(_g1h,
1826 _actual_region_bm,
1827 _actual_card_bm);
1829 if (G1CollectedHeap::use_parallel_gc_threads()) {
1830 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1831 worker_id,
1832 _n_workers,
1833 HeapRegion::FinalCountClaimValue);
1834 } else {
1835 _g1h->heap_region_iterate(&final_update_cl);
1836 }
1837 }
1838 };
1840 class G1ParNoteEndTask;
1842 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1843 G1CollectedHeap* _g1;
1844 size_t _max_live_bytes;
1845 uint _regions_claimed;
1846 size_t _freed_bytes;
1847 FreeRegionList* _local_cleanup_list;
1848 HeapRegionSetCount _old_regions_removed;
1849 HeapRegionSetCount _humongous_regions_removed;
1850 HRRSCleanupTask* _hrrs_cleanup_task;
1851 double _claimed_region_time;
1852 double _max_region_time;
1854 public:
1855 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1856 FreeRegionList* local_cleanup_list,
1857 HRRSCleanupTask* hrrs_cleanup_task) :
1858 _g1(g1),
1859 _max_live_bytes(0), _regions_claimed(0),
1860 _freed_bytes(0),
1861 _claimed_region_time(0.0), _max_region_time(0.0),
1862 _local_cleanup_list(local_cleanup_list),
1863 _old_regions_removed(),
1864 _humongous_regions_removed(),
1865 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1867 size_t freed_bytes() { return _freed_bytes; }
1868 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1869 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1871 bool doHeapRegion(HeapRegion *hr) {
1872 if (hr->continuesHumongous()) {
1873 return false;
1874 }
1875 // We use a claim value of zero here because all regions
1876 // were claimed with value 1 in the FinalCount task.
1877 _g1->reset_gc_time_stamps(hr);
1878 double start = os::elapsedTime();
1879 _regions_claimed++;
1880 hr->note_end_of_marking();
1881 _max_live_bytes += hr->max_live_bytes();
1883 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1884 _freed_bytes += hr->used();
1885 hr->set_containing_set(NULL);
1886 if (hr->isHumongous()) {
1887 assert(hr->startsHumongous(), "we should only see starts humongous");
1888 _humongous_regions_removed.increment(1u, hr->capacity());
1889 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1890 } else {
1891 _old_regions_removed.increment(1u, hr->capacity());
1892 _g1->free_region(hr, _local_cleanup_list, true);
1893 }
1894 } else {
1895 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1896 }
1898 double region_time = (os::elapsedTime() - start);
1899 _claimed_region_time += region_time;
1900 if (region_time > _max_region_time) {
1901 _max_region_time = region_time;
1902 }
1903 return false;
1904 }
1906 size_t max_live_bytes() { return _max_live_bytes; }
1907 uint regions_claimed() { return _regions_claimed; }
1908 double claimed_region_time_sec() { return _claimed_region_time; }
1909 double max_region_time_sec() { return _max_region_time; }
1910 };
1912 class G1ParNoteEndTask: public AbstractGangTask {
1913 friend class G1NoteEndOfConcMarkClosure;
1915 protected:
1916 G1CollectedHeap* _g1h;
1917 size_t _max_live_bytes;
1918 size_t _freed_bytes;
1919 FreeRegionList* _cleanup_list;
1921 public:
1922 G1ParNoteEndTask(G1CollectedHeap* g1h,
1923 FreeRegionList* cleanup_list) :
1924 AbstractGangTask("G1 note end"), _g1h(g1h),
1925 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1927 void work(uint worker_id) {
1928 double start = os::elapsedTime();
1929 FreeRegionList local_cleanup_list("Local Cleanup List");
1930 HRRSCleanupTask hrrs_cleanup_task;
1931 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1932 &hrrs_cleanup_task);
1933 if (G1CollectedHeap::use_parallel_gc_threads()) {
1934 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1935 _g1h->workers()->active_workers(),
1936 HeapRegion::NoteEndClaimValue);
1937 } else {
1938 _g1h->heap_region_iterate(&g1_note_end);
1939 }
1940 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1942 // Now update the lists
1943 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1944 {
1945 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1946 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1947 _max_live_bytes += g1_note_end.max_live_bytes();
1948 _freed_bytes += g1_note_end.freed_bytes();
1950 // If we iterate over the global cleanup list at the end of
1951 // cleanup to do this printing we will not guarantee to only
1952 // generate output for the newly-reclaimed regions (the list
1953 // might not be empty at the beginning of cleanup; we might
1954 // still be working on its previous contents). So we do the
1955 // printing here, before we append the new regions to the global
1956 // cleanup list.
1958 G1HRPrinter* hr_printer = _g1h->hr_printer();
1959 if (hr_printer->is_active()) {
1960 FreeRegionListIterator iter(&local_cleanup_list);
1961 while (iter.more_available()) {
1962 HeapRegion* hr = iter.get_next();
1963 hr_printer->cleanup(hr);
1964 }
1965 }
1967 _cleanup_list->add_ordered(&local_cleanup_list);
1968 assert(local_cleanup_list.is_empty(), "post-condition");
1970 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1971 }
1972 }
1973 size_t max_live_bytes() { return _max_live_bytes; }
1974 size_t freed_bytes() { return _freed_bytes; }
1975 };
1977 class G1ParScrubRemSetTask: public AbstractGangTask {
1978 protected:
1979 G1RemSet* _g1rs;
1980 BitMap* _region_bm;
1981 BitMap* _card_bm;
1982 public:
1983 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1984 BitMap* region_bm, BitMap* card_bm) :
1985 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1986 _region_bm(region_bm), _card_bm(card_bm) { }
1988 void work(uint worker_id) {
1989 if (G1CollectedHeap::use_parallel_gc_threads()) {
1990 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1991 HeapRegion::ScrubRemSetClaimValue);
1992 } else {
1993 _g1rs->scrub(_region_bm, _card_bm);
1994 }
1995 }
1997 };
1999 void ConcurrentMark::cleanup() {
2000 // world is stopped at this checkpoint
2001 assert(SafepointSynchronize::is_at_safepoint(),
2002 "world should be stopped");
2003 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2005 // If a full collection has happened, we shouldn't do this.
2006 if (has_aborted()) {
2007 g1h->set_marking_complete(); // So bitmap clearing isn't confused
2008 return;
2009 }
2011 g1h->verify_region_sets_optional();
2013 if (VerifyDuringGC) {
2014 HandleMark hm; // handle scope
2015 Universe::heap()->prepare_for_verify();
2016 Universe::verify(VerifyOption_G1UsePrevMarking,
2017 " VerifyDuringGC:(before)");
2018 }
2019 g1h->check_bitmaps("Cleanup Start");
2021 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2022 g1p->record_concurrent_mark_cleanup_start();
2024 double start = os::elapsedTime();
2026 HeapRegionRemSet::reset_for_cleanup_tasks();
2028 uint n_workers;
2030 // Do counting once more with the world stopped for good measure.
2031 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2033 if (G1CollectedHeap::use_parallel_gc_threads()) {
2034 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2035 "sanity check");
2037 g1h->set_par_threads();
2038 n_workers = g1h->n_par_threads();
2039 assert(g1h->n_par_threads() == n_workers,
2040 "Should not have been reset");
2041 g1h->workers()->run_task(&g1_par_count_task);
2042 // Done with the parallel phase so reset to 0.
2043 g1h->set_par_threads(0);
2045 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2046 "sanity check");
2047 } else {
2048 n_workers = 1;
2049 g1_par_count_task.work(0);
2050 }
2052 if (VerifyDuringGC) {
2053 // Verify that the counting data accumulated during marking matches
2054 // that calculated by walking the marking bitmap.
2056 // Bitmaps to hold expected values
2057 BitMap expected_region_bm(_region_bm.size(), true);
2058 BitMap expected_card_bm(_card_bm.size(), true);
2060 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2061 &_region_bm,
2062 &_card_bm,
2063 &expected_region_bm,
2064 &expected_card_bm);
2066 if (G1CollectedHeap::use_parallel_gc_threads()) {
2067 g1h->set_par_threads((int)n_workers);
2068 g1h->workers()->run_task(&g1_par_verify_task);
2069 // Done with the parallel phase so reset to 0.
2070 g1h->set_par_threads(0);
2072 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2073 "sanity check");
2074 } else {
2075 g1_par_verify_task.work(0);
2076 }
2078 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2079 }
2081 size_t start_used_bytes = g1h->used();
2082 g1h->set_marking_complete();
2084 double count_end = os::elapsedTime();
2085 double this_final_counting_time = (count_end - start);
2086 _total_counting_time += this_final_counting_time;
2088 if (G1PrintRegionLivenessInfo) {
2089 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2090 _g1h->heap_region_iterate(&cl);
2091 }
2093 // Install newly created mark bitMap as "prev".
2094 swapMarkBitMaps();
2096 g1h->reset_gc_time_stamp();
2098 // Note end of marking in all heap regions.
2099 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2100 if (G1CollectedHeap::use_parallel_gc_threads()) {
2101 g1h->set_par_threads((int)n_workers);
2102 g1h->workers()->run_task(&g1_par_note_end_task);
2103 g1h->set_par_threads(0);
2105 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2106 "sanity check");
2107 } else {
2108 g1_par_note_end_task.work(0);
2109 }
2110 g1h->check_gc_time_stamps();
2112 if (!cleanup_list_is_empty()) {
2113 // The cleanup list is not empty, so we'll have to process it
2114 // concurrently. Notify anyone else that might be wanting free
2115 // regions that there will be more free regions coming soon.
2116 g1h->set_free_regions_coming();
2117 }
2119 // call below, since it affects the metric by which we sort the heap
2120 // regions.
2121 if (G1ScrubRemSets) {
2122 double rs_scrub_start = os::elapsedTime();
2123 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2124 if (G1CollectedHeap::use_parallel_gc_threads()) {
2125 g1h->set_par_threads((int)n_workers);
2126 g1h->workers()->run_task(&g1_par_scrub_rs_task);
2127 g1h->set_par_threads(0);
2129 assert(g1h->check_heap_region_claim_values(
2130 HeapRegion::ScrubRemSetClaimValue),
2131 "sanity check");
2132 } else {
2133 g1_par_scrub_rs_task.work(0);
2134 }
2136 double rs_scrub_end = os::elapsedTime();
2137 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2138 _total_rs_scrub_time += this_rs_scrub_time;
2139 }
2141 // this will also free any regions totally full of garbage objects,
2142 // and sort the regions.
2143 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2145 // Statistics.
2146 double end = os::elapsedTime();
2147 _cleanup_times.add((end - start) * 1000.0);
2149 if (G1Log::fine()) {
2150 g1h->print_size_transition(gclog_or_tty,
2151 start_used_bytes,
2152 g1h->used(),
2153 g1h->capacity());
2154 }
2156 // Clean up will have freed any regions completely full of garbage.
2157 // Update the soft reference policy with the new heap occupancy.
2158 Universe::update_heap_info_at_gc();
2160 if (VerifyDuringGC) {
2161 HandleMark hm; // handle scope
2162 Universe::heap()->prepare_for_verify();
2163 Universe::verify(VerifyOption_G1UsePrevMarking,
2164 " VerifyDuringGC:(after)");
2165 }
2166 g1h->check_bitmaps("Cleanup End");
2168 g1h->verify_region_sets_optional();
2170 // We need to make this be a "collection" so any collection pause that
2171 // races with it goes around and waits for completeCleanup to finish.
2172 g1h->increment_total_collections();
2174 // Clean out dead classes and update Metaspace sizes.
2175 if (ClassUnloadingWithConcurrentMark) {
2176 ClassLoaderDataGraph::purge();
2177 }
2178 MetaspaceGC::compute_new_size();
2180 // We reclaimed old regions so we should calculate the sizes to make
2181 // sure we update the old gen/space data.
2182 g1h->g1mm()->update_sizes();
2184 g1h->trace_heap_after_concurrent_cycle();
2185 }
2187 void ConcurrentMark::completeCleanup() {
2188 if (has_aborted()) return;
2190 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2192 _cleanup_list.verify_optional();
2193 FreeRegionList tmp_free_list("Tmp Free List");
2195 if (G1ConcRegionFreeingVerbose) {
2196 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2197 "cleanup list has %u entries",
2198 _cleanup_list.length());
2199 }
2201 // No one else should be accessing the _cleanup_list at this point,
2202 // so it is not necessary to take any locks
2203 while (!_cleanup_list.is_empty()) {
2204 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
2205 assert(hr != NULL, "Got NULL from a non-empty list");
2206 hr->par_clear();
2207 tmp_free_list.add_ordered(hr);
2209 // Instead of adding one region at a time to the secondary_free_list,
2210 // we accumulate them in the local list and move them a few at a
2211 // time. This also cuts down on the number of notify_all() calls
2212 // we do during this process. We'll also append the local list when
2213 // _cleanup_list is empty (which means we just removed the last
2214 // region from the _cleanup_list).
2215 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2216 _cleanup_list.is_empty()) {
2217 if (G1ConcRegionFreeingVerbose) {
2218 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2219 "appending %u entries to the secondary_free_list, "
2220 "cleanup list still has %u entries",
2221 tmp_free_list.length(),
2222 _cleanup_list.length());
2223 }
2225 {
2226 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2227 g1h->secondary_free_list_add(&tmp_free_list);
2228 SecondaryFreeList_lock->notify_all();
2229 }
2231 if (G1StressConcRegionFreeing) {
2232 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2233 os::sleep(Thread::current(), (jlong) 1, false);
2234 }
2235 }
2236 }
2237 }
2238 assert(tmp_free_list.is_empty(), "post-condition");
2239 }
2241 // Supporting Object and Oop closures for reference discovery
2242 // and processing in during marking
2244 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2245 HeapWord* addr = (HeapWord*)obj;
2246 return addr != NULL &&
2247 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2248 }
2250 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2251 // Uses the CMTask associated with a worker thread (for serial reference
2252 // processing the CMTask for worker 0 is used) to preserve (mark) and
2253 // trace referent objects.
2254 //
2255 // Using the CMTask and embedded local queues avoids having the worker
2256 // threads operating on the global mark stack. This reduces the risk
2257 // of overflowing the stack - which we would rather avoid at this late
2258 // state. Also using the tasks' local queues removes the potential
2259 // of the workers interfering with each other that could occur if
2260 // operating on the global stack.
2262 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2263 ConcurrentMark* _cm;
2264 CMTask* _task;
2265 int _ref_counter_limit;
2266 int _ref_counter;
2267 bool _is_serial;
2268 public:
2269 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2270 _cm(cm), _task(task), _is_serial(is_serial),
2271 _ref_counter_limit(G1RefProcDrainInterval) {
2272 assert(_ref_counter_limit > 0, "sanity");
2273 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2274 _ref_counter = _ref_counter_limit;
2275 }
2277 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2278 virtual void do_oop( oop* p) { do_oop_work(p); }
2280 template <class T> void do_oop_work(T* p) {
2281 if (!_cm->has_overflown()) {
2282 oop obj = oopDesc::load_decode_heap_oop(p);
2283 if (_cm->verbose_high()) {
2284 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2285 "*"PTR_FORMAT" = "PTR_FORMAT,
2286 _task->worker_id(), p2i(p), p2i((void*) obj));
2287 }
2289 _task->deal_with_reference(obj);
2290 _ref_counter--;
2292 if (_ref_counter == 0) {
2293 // We have dealt with _ref_counter_limit references, pushing them
2294 // and objects reachable from them on to the local stack (and
2295 // possibly the global stack). Call CMTask::do_marking_step() to
2296 // process these entries.
2297 //
2298 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2299 // there's nothing more to do (i.e. we're done with the entries that
2300 // were pushed as a result of the CMTask::deal_with_reference() calls
2301 // above) or we overflow.
2302 //
2303 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2304 // flag while there may still be some work to do. (See the comment at
2305 // the beginning of CMTask::do_marking_step() for those conditions -
2306 // one of which is reaching the specified time target.) It is only
2307 // when CMTask::do_marking_step() returns without setting the
2308 // has_aborted() flag that the marking step has completed.
2309 do {
2310 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2311 _task->do_marking_step(mark_step_duration_ms,
2312 false /* do_termination */,
2313 _is_serial);
2314 } while (_task->has_aborted() && !_cm->has_overflown());
2315 _ref_counter = _ref_counter_limit;
2316 }
2317 } else {
2318 if (_cm->verbose_high()) {
2319 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2320 }
2321 }
2322 }
2323 };
2325 // 'Drain' oop closure used by both serial and parallel reference processing.
2326 // Uses the CMTask associated with a given worker thread (for serial
2327 // reference processing the CMtask for worker 0 is used). Calls the
2328 // do_marking_step routine, with an unbelievably large timeout value,
2329 // to drain the marking data structures of the remaining entries
2330 // added by the 'keep alive' oop closure above.
2332 class G1CMDrainMarkingStackClosure: public VoidClosure {
2333 ConcurrentMark* _cm;
2334 CMTask* _task;
2335 bool _is_serial;
2336 public:
2337 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2338 _cm(cm), _task(task), _is_serial(is_serial) {
2339 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2340 }
2342 void do_void() {
2343 do {
2344 if (_cm->verbose_high()) {
2345 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2346 _task->worker_id(), BOOL_TO_STR(_is_serial));
2347 }
2349 // We call CMTask::do_marking_step() to completely drain the local
2350 // and global marking stacks of entries pushed by the 'keep alive'
2351 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2352 //
2353 // CMTask::do_marking_step() is called in a loop, which we'll exit
2354 // if there's nothing more to do (i.e. we'completely drained the
2355 // entries that were pushed as a a result of applying the 'keep alive'
2356 // closure to the entries on the discovered ref lists) or we overflow
2357 // the global marking stack.
2358 //
2359 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2360 // flag while there may still be some work to do. (See the comment at
2361 // the beginning of CMTask::do_marking_step() for those conditions -
2362 // one of which is reaching the specified time target.) It is only
2363 // when CMTask::do_marking_step() returns without setting the
2364 // has_aborted() flag that the marking step has completed.
2366 _task->do_marking_step(1000000000.0 /* something very large */,
2367 true /* do_termination */,
2368 _is_serial);
2369 } while (_task->has_aborted() && !_cm->has_overflown());
2370 }
2371 };
2373 // Implementation of AbstractRefProcTaskExecutor for parallel
2374 // reference processing at the end of G1 concurrent marking
2376 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2377 private:
2378 G1CollectedHeap* _g1h;
2379 ConcurrentMark* _cm;
2380 WorkGang* _workers;
2381 int _active_workers;
2383 public:
2384 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2385 ConcurrentMark* cm,
2386 WorkGang* workers,
2387 int n_workers) :
2388 _g1h(g1h), _cm(cm),
2389 _workers(workers), _active_workers(n_workers) { }
2391 // Executes the given task using concurrent marking worker threads.
2392 virtual void execute(ProcessTask& task);
2393 virtual void execute(EnqueueTask& task);
2394 };
2396 class G1CMRefProcTaskProxy: public AbstractGangTask {
2397 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2398 ProcessTask& _proc_task;
2399 G1CollectedHeap* _g1h;
2400 ConcurrentMark* _cm;
2402 public:
2403 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2404 G1CollectedHeap* g1h,
2405 ConcurrentMark* cm) :
2406 AbstractGangTask("Process reference objects in parallel"),
2407 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2408 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2409 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2410 }
2412 virtual void work(uint worker_id) {
2413 ResourceMark rm;
2414 HandleMark hm;
2415 CMTask* task = _cm->task(worker_id);
2416 G1CMIsAliveClosure g1_is_alive(_g1h);
2417 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2418 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2420 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2421 }
2422 };
2424 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2425 assert(_workers != NULL, "Need parallel worker threads.");
2426 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2428 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2430 // We need to reset the concurrency level before each
2431 // proxy task execution, so that the termination protocol
2432 // and overflow handling in CMTask::do_marking_step() knows
2433 // how many workers to wait for.
2434 _cm->set_concurrency(_active_workers);
2435 _g1h->set_par_threads(_active_workers);
2436 _workers->run_task(&proc_task_proxy);
2437 _g1h->set_par_threads(0);
2438 }
2440 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2441 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2442 EnqueueTask& _enq_task;
2444 public:
2445 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2446 AbstractGangTask("Enqueue reference objects in parallel"),
2447 _enq_task(enq_task) { }
2449 virtual void work(uint worker_id) {
2450 _enq_task.work(worker_id);
2451 }
2452 };
2454 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2455 assert(_workers != NULL, "Need parallel worker threads.");
2456 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2458 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2460 // Not strictly necessary but...
2461 //
2462 // We need to reset the concurrency level before each
2463 // proxy task execution, so that the termination protocol
2464 // and overflow handling in CMTask::do_marking_step() knows
2465 // how many workers to wait for.
2466 _cm->set_concurrency(_active_workers);
2467 _g1h->set_par_threads(_active_workers);
2468 _workers->run_task(&enq_task_proxy);
2469 _g1h->set_par_threads(0);
2470 }
2472 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2473 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2474 }
2476 // Helper class to get rid of some boilerplate code.
2477 class G1RemarkGCTraceTime : public GCTraceTime {
2478 static bool doit_and_prepend(bool doit) {
2479 if (doit) {
2480 gclog_or_tty->put(' ');
2481 }
2482 return doit;
2483 }
2485 public:
2486 G1RemarkGCTraceTime(const char* title, bool doit)
2487 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2488 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2489 }
2490 };
2492 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2493 if (has_overflown()) {
2494 // Skip processing the discovered references if we have
2495 // overflown the global marking stack. Reference objects
2496 // only get discovered once so it is OK to not
2497 // de-populate the discovered reference lists. We could have,
2498 // but the only benefit would be that, when marking restarts,
2499 // less reference objects are discovered.
2500 return;
2501 }
2503 ResourceMark rm;
2504 HandleMark hm;
2506 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2508 // Is alive closure.
2509 G1CMIsAliveClosure g1_is_alive(g1h);
2511 // Inner scope to exclude the cleaning of the string and symbol
2512 // tables from the displayed time.
2513 {
2514 if (G1Log::finer()) {
2515 gclog_or_tty->put(' ');
2516 }
2517 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
2519 ReferenceProcessor* rp = g1h->ref_processor_cm();
2521 // See the comment in G1CollectedHeap::ref_processing_init()
2522 // about how reference processing currently works in G1.
2524 // Set the soft reference policy
2525 rp->setup_policy(clear_all_soft_refs);
2526 assert(_markStack.isEmpty(), "mark stack should be empty");
2528 // Instances of the 'Keep Alive' and 'Complete GC' closures used
2529 // in serial reference processing. Note these closures are also
2530 // used for serially processing (by the the current thread) the
2531 // JNI references during parallel reference processing.
2532 //
2533 // These closures do not need to synchronize with the worker
2534 // threads involved in parallel reference processing as these
2535 // instances are executed serially by the current thread (e.g.
2536 // reference processing is not multi-threaded and is thus
2537 // performed by the current thread instead of a gang worker).
2538 //
2539 // The gang tasks involved in parallel reference procssing create
2540 // their own instances of these closures, which do their own
2541 // synchronization among themselves.
2542 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2543 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2545 // We need at least one active thread. If reference processing
2546 // is not multi-threaded we use the current (VMThread) thread,
2547 // otherwise we use the work gang from the G1CollectedHeap and
2548 // we utilize all the worker threads we can.
2549 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2550 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2551 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2553 // Parallel processing task executor.
2554 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2555 g1h->workers(), active_workers);
2556 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2558 // Set the concurrency level. The phase was already set prior to
2559 // executing the remark task.
2560 set_concurrency(active_workers);
2562 // Set the degree of MT processing here. If the discovery was done MT,
2563 // the number of threads involved during discovery could differ from
2564 // the number of active workers. This is OK as long as the discovered
2565 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2566 rp->set_active_mt_degree(active_workers);
2568 // Process the weak references.
2569 const ReferenceProcessorStats& stats =
2570 rp->process_discovered_references(&g1_is_alive,
2571 &g1_keep_alive,
2572 &g1_drain_mark_stack,
2573 executor,
2574 g1h->gc_timer_cm(),
2575 concurrent_gc_id());
2576 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2578 // The do_oop work routines of the keep_alive and drain_marking_stack
2579 // oop closures will set the has_overflown flag if we overflow the
2580 // global marking stack.
2582 assert(_markStack.overflow() || _markStack.isEmpty(),
2583 "mark stack should be empty (unless it overflowed)");
2585 if (_markStack.overflow()) {
2586 // This should have been done already when we tried to push an
2587 // entry on to the global mark stack. But let's do it again.
2588 set_has_overflown();
2589 }
2591 assert(rp->num_q() == active_workers, "why not");
2593 rp->enqueue_discovered_references(executor);
2595 rp->verify_no_references_recorded();
2596 assert(!rp->discovery_enabled(), "Post condition");
2597 }
2599 if (has_overflown()) {
2600 // We can not trust g1_is_alive if the marking stack overflowed
2601 return;
2602 }
2604 assert(_markStack.isEmpty(), "Marking should have completed");
2606 // Unload Klasses, String, Symbols, Code Cache, etc.
2607 {
2608 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2610 if (ClassUnloadingWithConcurrentMark) {
2611 bool purged_classes;
2613 {
2614 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2615 purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2616 }
2618 {
2619 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2620 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2621 }
2622 }
2624 if (G1StringDedup::is_enabled()) {
2625 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2626 G1StringDedup::unlink(&g1_is_alive);
2627 }
2628 }
2629 }
2631 void ConcurrentMark::swapMarkBitMaps() {
2632 CMBitMapRO* temp = _prevMarkBitMap;
2633 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2634 _nextMarkBitMap = (CMBitMap*) temp;
2635 }
2637 class CMObjectClosure;
2639 // Closure for iterating over objects, currently only used for
2640 // processing SATB buffers.
2641 class CMObjectClosure : public ObjectClosure {
2642 private:
2643 CMTask* _task;
2645 public:
2646 void do_object(oop obj) {
2647 _task->deal_with_reference(obj);
2648 }
2650 CMObjectClosure(CMTask* task) : _task(task) { }
2651 };
2653 class G1RemarkThreadsClosure : public ThreadClosure {
2654 CMObjectClosure _cm_obj;
2655 G1CMOopClosure _cm_cl;
2656 MarkingCodeBlobClosure _code_cl;
2657 int _thread_parity;
2658 bool _is_par;
2660 public:
2661 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2662 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2663 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2665 void do_thread(Thread* thread) {
2666 if (thread->is_Java_thread()) {
2667 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2668 JavaThread* jt = (JavaThread*)thread;
2670 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2671 // however the liveness of oops reachable from nmethods have very complex lifecycles:
2672 // * Alive if on the stack of an executing method
2673 // * Weakly reachable otherwise
2674 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2675 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2676 jt->nmethods_do(&_code_cl);
2678 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2679 }
2680 } else if (thread->is_VM_thread()) {
2681 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2682 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2683 }
2684 }
2685 }
2686 };
2688 class CMRemarkTask: public AbstractGangTask {
2689 private:
2690 ConcurrentMark* _cm;
2691 bool _is_serial;
2692 public:
2693 void work(uint worker_id) {
2694 // Since all available tasks are actually started, we should
2695 // only proceed if we're supposed to be actived.
2696 if (worker_id < _cm->active_tasks()) {
2697 CMTask* task = _cm->task(worker_id);
2698 task->record_start_time();
2699 {
2700 ResourceMark rm;
2701 HandleMark hm;
2703 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2704 Threads::threads_do(&threads_f);
2705 }
2707 do {
2708 task->do_marking_step(1000000000.0 /* something very large */,
2709 true /* do_termination */,
2710 _is_serial);
2711 } while (task->has_aborted() && !_cm->has_overflown());
2712 // If we overflow, then we do not want to restart. We instead
2713 // want to abort remark and do concurrent marking again.
2714 task->record_end_time();
2715 }
2716 }
2718 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2719 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2720 _cm->terminator()->reset_for_reuse(active_workers);
2721 }
2722 };
2724 void ConcurrentMark::checkpointRootsFinalWork() {
2725 ResourceMark rm;
2726 HandleMark hm;
2727 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2729 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2731 g1h->ensure_parsability(false);
2733 if (G1CollectedHeap::use_parallel_gc_threads()) {
2734 G1CollectedHeap::StrongRootsScope srs(g1h);
2735 // this is remark, so we'll use up all active threads
2736 uint active_workers = g1h->workers()->active_workers();
2737 if (active_workers == 0) {
2738 assert(active_workers > 0, "Should have been set earlier");
2739 active_workers = (uint) ParallelGCThreads;
2740 g1h->workers()->set_active_workers(active_workers);
2741 }
2742 set_concurrency_and_phase(active_workers, false /* concurrent */);
2743 // Leave _parallel_marking_threads at it's
2744 // value originally calculated in the ConcurrentMark
2745 // constructor and pass values of the active workers
2746 // through the gang in the task.
2748 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2749 // We will start all available threads, even if we decide that the
2750 // active_workers will be fewer. The extra ones will just bail out
2751 // immediately.
2752 g1h->set_par_threads(active_workers);
2753 g1h->workers()->run_task(&remarkTask);
2754 g1h->set_par_threads(0);
2755 } else {
2756 G1CollectedHeap::StrongRootsScope srs(g1h);
2757 uint active_workers = 1;
2758 set_concurrency_and_phase(active_workers, false /* concurrent */);
2760 // Note - if there's no work gang then the VMThread will be
2761 // the thread to execute the remark - serially. We have
2762 // to pass true for the is_serial parameter so that
2763 // CMTask::do_marking_step() doesn't enter the sync
2764 // barriers in the event of an overflow. Doing so will
2765 // cause an assert that the current thread is not a
2766 // concurrent GC thread.
2767 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2768 remarkTask.work(0);
2769 }
2770 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2771 guarantee(has_overflown() ||
2772 satb_mq_set.completed_buffers_num() == 0,
2773 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2774 BOOL_TO_STR(has_overflown()),
2775 satb_mq_set.completed_buffers_num()));
2777 print_stats();
2778 }
2780 #ifndef PRODUCT
2782 class PrintReachableOopClosure: public OopClosure {
2783 private:
2784 G1CollectedHeap* _g1h;
2785 outputStream* _out;
2786 VerifyOption _vo;
2787 bool _all;
2789 public:
2790 PrintReachableOopClosure(outputStream* out,
2791 VerifyOption vo,
2792 bool all) :
2793 _g1h(G1CollectedHeap::heap()),
2794 _out(out), _vo(vo), _all(all) { }
2796 void do_oop(narrowOop* p) { do_oop_work(p); }
2797 void do_oop( oop* p) { do_oop_work(p); }
2799 template <class T> void do_oop_work(T* p) {
2800 oop obj = oopDesc::load_decode_heap_oop(p);
2801 const char* str = NULL;
2802 const char* str2 = "";
2804 if (obj == NULL) {
2805 str = "";
2806 } else if (!_g1h->is_in_g1_reserved(obj)) {
2807 str = " O";
2808 } else {
2809 HeapRegion* hr = _g1h->heap_region_containing(obj);
2810 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2811 bool marked = _g1h->is_marked(obj, _vo);
2813 if (over_tams) {
2814 str = " >";
2815 if (marked) {
2816 str2 = " AND MARKED";
2817 }
2818 } else if (marked) {
2819 str = " M";
2820 } else {
2821 str = " NOT";
2822 }
2823 }
2825 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
2826 p2i(p), p2i((void*) obj), str, str2);
2827 }
2828 };
2830 class PrintReachableObjectClosure : public ObjectClosure {
2831 private:
2832 G1CollectedHeap* _g1h;
2833 outputStream* _out;
2834 VerifyOption _vo;
2835 bool _all;
2836 HeapRegion* _hr;
2838 public:
2839 PrintReachableObjectClosure(outputStream* out,
2840 VerifyOption vo,
2841 bool all,
2842 HeapRegion* hr) :
2843 _g1h(G1CollectedHeap::heap()),
2844 _out(out), _vo(vo), _all(all), _hr(hr) { }
2846 void do_object(oop o) {
2847 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2848 bool marked = _g1h->is_marked(o, _vo);
2849 bool print_it = _all || over_tams || marked;
2851 if (print_it) {
2852 _out->print_cr(" "PTR_FORMAT"%s",
2853 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2854 PrintReachableOopClosure oopCl(_out, _vo, _all);
2855 o->oop_iterate_no_header(&oopCl);
2856 }
2857 }
2858 };
2860 class PrintReachableRegionClosure : public HeapRegionClosure {
2861 private:
2862 G1CollectedHeap* _g1h;
2863 outputStream* _out;
2864 VerifyOption _vo;
2865 bool _all;
2867 public:
2868 bool doHeapRegion(HeapRegion* hr) {
2869 HeapWord* b = hr->bottom();
2870 HeapWord* e = hr->end();
2871 HeapWord* t = hr->top();
2872 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2873 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2874 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2875 _out->cr();
2877 HeapWord* from = b;
2878 HeapWord* to = t;
2880 if (to > from) {
2881 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2882 _out->cr();
2883 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2884 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2885 _out->cr();
2886 }
2888 return false;
2889 }
2891 PrintReachableRegionClosure(outputStream* out,
2892 VerifyOption vo,
2893 bool all) :
2894 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2895 };
2897 void ConcurrentMark::print_reachable(const char* str,
2898 VerifyOption vo,
2899 bool all) {
2900 gclog_or_tty->cr();
2901 gclog_or_tty->print_cr("== Doing heap dump... ");
2903 if (G1PrintReachableBaseFile == NULL) {
2904 gclog_or_tty->print_cr(" #### error: no base file defined");
2905 return;
2906 }
2908 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2909 (JVM_MAXPATHLEN - 1)) {
2910 gclog_or_tty->print_cr(" #### error: file name too long");
2911 return;
2912 }
2914 char file_name[JVM_MAXPATHLEN];
2915 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2916 gclog_or_tty->print_cr(" dumping to file %s", file_name);
2918 fileStream fout(file_name);
2919 if (!fout.is_open()) {
2920 gclog_or_tty->print_cr(" #### error: could not open file");
2921 return;
2922 }
2924 outputStream* out = &fout;
2925 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2926 out->cr();
2928 out->print_cr("--- ITERATING OVER REGIONS");
2929 out->cr();
2930 PrintReachableRegionClosure rcl(out, vo, all);
2931 _g1h->heap_region_iterate(&rcl);
2932 out->cr();
2934 gclog_or_tty->print_cr(" done");
2935 gclog_or_tty->flush();
2936 }
2938 #endif // PRODUCT
2940 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2941 // Note we are overriding the read-only view of the prev map here, via
2942 // the cast.
2943 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2944 }
2946 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2947 _nextMarkBitMap->clearRange(mr);
2948 }
2950 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2951 clearRangePrevBitmap(mr);
2952 clearRangeNextBitmap(mr);
2953 }
2955 HeapRegion*
2956 ConcurrentMark::claim_region(uint worker_id) {
2957 // "checkpoint" the finger
2958 HeapWord* finger = _finger;
2960 // _heap_end will not change underneath our feet; it only changes at
2961 // yield points.
2962 while (finger < _heap_end) {
2963 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2965 // Note on how this code handles humongous regions. In the
2966 // normal case the finger will reach the start of a "starts
2967 // humongous" (SH) region. Its end will either be the end of the
2968 // last "continues humongous" (CH) region in the sequence, or the
2969 // standard end of the SH region (if the SH is the only region in
2970 // the sequence). That way claim_region() will skip over the CH
2971 // regions. However, there is a subtle race between a CM thread
2972 // executing this method and a mutator thread doing a humongous
2973 // object allocation. The two are not mutually exclusive as the CM
2974 // thread does not need to hold the Heap_lock when it gets
2975 // here. So there is a chance that claim_region() will come across
2976 // a free region that's in the progress of becoming a SH or a CH
2977 // region. In the former case, it will either
2978 // a) Miss the update to the region's end, in which case it will
2979 // visit every subsequent CH region, will find their bitmaps
2980 // empty, and do nothing, or
2981 // b) Will observe the update of the region's end (in which case
2982 // it will skip the subsequent CH regions).
2983 // If it comes across a region that suddenly becomes CH, the
2984 // scenario will be similar to b). So, the race between
2985 // claim_region() and a humongous object allocation might force us
2986 // to do a bit of unnecessary work (due to some unnecessary bitmap
2987 // iterations) but it should not introduce and correctness issues.
2988 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2990 // Above heap_region_containing_raw may return NULL as we always scan claim
2991 // until the end of the heap. In this case, just jump to the next region.
2992 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
2994 // Is the gap between reading the finger and doing the CAS too long?
2995 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2996 if (res == finger && curr_region != NULL) {
2997 // we succeeded
2998 HeapWord* bottom = curr_region->bottom();
2999 HeapWord* limit = curr_region->next_top_at_mark_start();
3001 if (verbose_low()) {
3002 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
3003 "["PTR_FORMAT", "PTR_FORMAT"), "
3004 "limit = "PTR_FORMAT,
3005 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
3006 }
3008 // notice that _finger == end cannot be guaranteed here since,
3009 // someone else might have moved the finger even further
3010 assert(_finger >= end, "the finger should have moved forward");
3012 if (verbose_low()) {
3013 gclog_or_tty->print_cr("[%u] we were successful with region = "
3014 PTR_FORMAT, worker_id, p2i(curr_region));
3015 }
3017 if (limit > bottom) {
3018 if (verbose_low()) {
3019 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
3020 "returning it ", worker_id, p2i(curr_region));
3021 }
3022 return curr_region;
3023 } else {
3024 assert(limit == bottom,
3025 "the region limit should be at bottom");
3026 if (verbose_low()) {
3027 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
3028 "returning NULL", worker_id, p2i(curr_region));
3029 }
3030 // we return NULL and the caller should try calling
3031 // claim_region() again.
3032 return NULL;
3033 }
3034 } else {
3035 assert(_finger > finger, "the finger should have moved forward");
3036 if (verbose_low()) {
3037 if (curr_region == NULL) {
3038 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
3039 "global finger = "PTR_FORMAT", "
3040 "our finger = "PTR_FORMAT,
3041 worker_id, p2i(_finger), p2i(finger));
3042 } else {
3043 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3044 "global finger = "PTR_FORMAT", "
3045 "our finger = "PTR_FORMAT,
3046 worker_id, p2i(_finger), p2i(finger));
3047 }
3048 }
3050 // read it again
3051 finger = _finger;
3052 }
3053 }
3055 return NULL;
3056 }
3058 #ifndef PRODUCT
3059 enum VerifyNoCSetOopsPhase {
3060 VerifyNoCSetOopsStack,
3061 VerifyNoCSetOopsQueues,
3062 VerifyNoCSetOopsSATBCompleted,
3063 VerifyNoCSetOopsSATBThread
3064 };
3066 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
3067 private:
3068 G1CollectedHeap* _g1h;
3069 VerifyNoCSetOopsPhase _phase;
3070 int _info;
3072 const char* phase_str() {
3073 switch (_phase) {
3074 case VerifyNoCSetOopsStack: return "Stack";
3075 case VerifyNoCSetOopsQueues: return "Queue";
3076 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
3077 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
3078 default: ShouldNotReachHere();
3079 }
3080 return NULL;
3081 }
3083 void do_object_work(oop obj) {
3084 guarantee(!_g1h->obj_in_cs(obj),
3085 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3086 p2i((void*) obj), phase_str(), _info));
3087 }
3089 public:
3090 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3092 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3093 _phase = phase;
3094 _info = info;
3095 }
3097 virtual void do_oop(oop* p) {
3098 oop obj = oopDesc::load_decode_heap_oop(p);
3099 do_object_work(obj);
3100 }
3102 virtual void do_oop(narrowOop* p) {
3103 // We should not come across narrow oops while scanning marking
3104 // stacks and SATB buffers.
3105 ShouldNotReachHere();
3106 }
3108 virtual void do_object(oop obj) {
3109 do_object_work(obj);
3110 }
3111 };
3113 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
3114 bool verify_enqueued_buffers,
3115 bool verify_thread_buffers,
3116 bool verify_fingers) {
3117 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3118 if (!G1CollectedHeap::heap()->mark_in_progress()) {
3119 return;
3120 }
3122 VerifyNoCSetOopsClosure cl;
3124 if (verify_stacks) {
3125 // Verify entries on the global mark stack
3126 cl.set_phase(VerifyNoCSetOopsStack);
3127 _markStack.oops_do(&cl);
3129 // Verify entries on the task queues
3130 for (uint i = 0; i < _max_worker_id; i += 1) {
3131 cl.set_phase(VerifyNoCSetOopsQueues, i);
3132 CMTaskQueue* queue = _task_queues->queue(i);
3133 queue->oops_do(&cl);
3134 }
3135 }
3137 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
3139 // Verify entries on the enqueued SATB buffers
3140 if (verify_enqueued_buffers) {
3141 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
3142 satb_qs.iterate_completed_buffers_read_only(&cl);
3143 }
3145 // Verify entries on the per-thread SATB buffers
3146 if (verify_thread_buffers) {
3147 cl.set_phase(VerifyNoCSetOopsSATBThread);
3148 satb_qs.iterate_thread_buffers_read_only(&cl);
3149 }
3151 if (verify_fingers) {
3152 // Verify the global finger
3153 HeapWord* global_finger = finger();
3154 if (global_finger != NULL && global_finger < _heap_end) {
3155 // The global finger always points to a heap region boundary. We
3156 // use heap_region_containing_raw() to get the containing region
3157 // given that the global finger could be pointing to a free region
3158 // which subsequently becomes continues humongous. If that
3159 // happens, heap_region_containing() will return the bottom of the
3160 // corresponding starts humongous region and the check below will
3161 // not hold any more.
3162 // Since we always iterate over all regions, we might get a NULL HeapRegion
3163 // here.
3164 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3165 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
3166 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3167 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3168 }
3170 // Verify the task fingers
3171 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3172 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3173 CMTask* task = _tasks[i];
3174 HeapWord* task_finger = task->finger();
3175 if (task_finger != NULL && task_finger < _heap_end) {
3176 // See above note on the global finger verification.
3177 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3178 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
3179 !task_hr->in_collection_set(),
3180 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3181 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3182 }
3183 }
3184 }
3185 }
3186 #endif // PRODUCT
3188 // Aggregate the counting data that was constructed concurrently
3189 // with marking.
3190 class AggregateCountDataHRClosure: public HeapRegionClosure {
3191 G1CollectedHeap* _g1h;
3192 ConcurrentMark* _cm;
3193 CardTableModRefBS* _ct_bs;
3194 BitMap* _cm_card_bm;
3195 uint _max_worker_id;
3197 public:
3198 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3199 BitMap* cm_card_bm,
3200 uint max_worker_id) :
3201 _g1h(g1h), _cm(g1h->concurrent_mark()),
3202 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3203 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3205 bool doHeapRegion(HeapRegion* hr) {
3206 if (hr->continuesHumongous()) {
3207 // We will ignore these here and process them when their
3208 // associated "starts humongous" region is processed.
3209 // Note that we cannot rely on their associated
3210 // "starts humongous" region to have their bit set to 1
3211 // since, due to the region chunking in the parallel region
3212 // iteration, a "continues humongous" region might be visited
3213 // before its associated "starts humongous".
3214 return false;
3215 }
3217 HeapWord* start = hr->bottom();
3218 HeapWord* limit = hr->next_top_at_mark_start();
3219 HeapWord* end = hr->end();
3221 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3222 err_msg("Preconditions not met - "
3223 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3224 "top: "PTR_FORMAT", end: "PTR_FORMAT,
3225 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3227 assert(hr->next_marked_bytes() == 0, "Precondition");
3229 if (start == limit) {
3230 // NTAMS of this region has not been set so nothing to do.
3231 return false;
3232 }
3234 // 'start' should be in the heap.
3235 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3236 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3237 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3239 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3240 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3241 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3243 // If ntams is not card aligned then we bump card bitmap index
3244 // for limit so that we get the all the cards spanned by
3245 // the object ending at ntams.
3246 // Note: if this is the last region in the heap then ntams
3247 // could be actually just beyond the end of the the heap;
3248 // limit_idx will then correspond to a (non-existent) card
3249 // that is also outside the heap.
3250 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3251 limit_idx += 1;
3252 }
3254 assert(limit_idx <= end_idx, "or else use atomics");
3256 // Aggregate the "stripe" in the count data associated with hr.
3257 uint hrs_index = hr->hrs_index();
3258 size_t marked_bytes = 0;
3260 for (uint i = 0; i < _max_worker_id; i += 1) {
3261 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3262 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3264 // Fetch the marked_bytes in this region for task i and
3265 // add it to the running total for this region.
3266 marked_bytes += marked_bytes_array[hrs_index];
3268 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3269 // into the global card bitmap.
3270 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3272 while (scan_idx < limit_idx) {
3273 assert(task_card_bm->at(scan_idx) == true, "should be");
3274 _cm_card_bm->set_bit(scan_idx);
3275 assert(_cm_card_bm->at(scan_idx) == true, "should be");
3277 // BitMap::get_next_one_offset() can handle the case when
3278 // its left_offset parameter is greater than its right_offset
3279 // parameter. It does, however, have an early exit if
3280 // left_offset == right_offset. So let's limit the value
3281 // passed in for left offset here.
3282 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3283 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3284 }
3285 }
3287 // Update the marked bytes for this region.
3288 hr->add_to_marked_bytes(marked_bytes);
3290 // Next heap region
3291 return false;
3292 }
3293 };
3295 class G1AggregateCountDataTask: public AbstractGangTask {
3296 protected:
3297 G1CollectedHeap* _g1h;
3298 ConcurrentMark* _cm;
3299 BitMap* _cm_card_bm;
3300 uint _max_worker_id;
3301 int _active_workers;
3303 public:
3304 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3305 ConcurrentMark* cm,
3306 BitMap* cm_card_bm,
3307 uint max_worker_id,
3308 int n_workers) :
3309 AbstractGangTask("Count Aggregation"),
3310 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3311 _max_worker_id(max_worker_id),
3312 _active_workers(n_workers) { }
3314 void work(uint worker_id) {
3315 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3317 if (G1CollectedHeap::use_parallel_gc_threads()) {
3318 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3319 _active_workers,
3320 HeapRegion::AggregateCountClaimValue);
3321 } else {
3322 _g1h->heap_region_iterate(&cl);
3323 }
3324 }
3325 };
3328 void ConcurrentMark::aggregate_count_data() {
3329 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3330 _g1h->workers()->active_workers() :
3331 1);
3333 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3334 _max_worker_id, n_workers);
3336 if (G1CollectedHeap::use_parallel_gc_threads()) {
3337 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3338 "sanity check");
3339 _g1h->set_par_threads(n_workers);
3340 _g1h->workers()->run_task(&g1_par_agg_task);
3341 _g1h->set_par_threads(0);
3343 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3344 "sanity check");
3345 _g1h->reset_heap_region_claim_values();
3346 } else {
3347 g1_par_agg_task.work(0);
3348 }
3349 }
3351 // Clear the per-worker arrays used to store the per-region counting data
3352 void ConcurrentMark::clear_all_count_data() {
3353 // Clear the global card bitmap - it will be filled during
3354 // liveness count aggregation (during remark) and the
3355 // final counting task.
3356 _card_bm.clear();
3358 // Clear the global region bitmap - it will be filled as part
3359 // of the final counting task.
3360 _region_bm.clear();
3362 uint max_regions = _g1h->max_regions();
3363 assert(_max_worker_id > 0, "uninitialized");
3365 for (uint i = 0; i < _max_worker_id; i += 1) {
3366 BitMap* task_card_bm = count_card_bitmap_for(i);
3367 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3369 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3370 assert(marked_bytes_array != NULL, "uninitialized");
3372 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3373 task_card_bm->clear();
3374 }
3375 }
3377 void ConcurrentMark::print_stats() {
3378 if (verbose_stats()) {
3379 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3380 for (size_t i = 0; i < _active_tasks; ++i) {
3381 _tasks[i]->print_stats();
3382 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3383 }
3384 }
3385 }
3387 // abandon current marking iteration due to a Full GC
3388 void ConcurrentMark::abort() {
3389 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3390 // concurrent bitmap clearing.
3391 _nextMarkBitMap->clearAll();
3393 // Note we cannot clear the previous marking bitmap here
3394 // since VerifyDuringGC verifies the objects marked during
3395 // a full GC against the previous bitmap.
3397 // Clear the liveness counting data
3398 clear_all_count_data();
3399 // Empty mark stack
3400 reset_marking_state();
3401 for (uint i = 0; i < _max_worker_id; ++i) {
3402 _tasks[i]->clear_region_fields();
3403 }
3404 _first_overflow_barrier_sync.abort();
3405 _second_overflow_barrier_sync.abort();
3406 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3407 if (!gc_id.is_undefined()) {
3408 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3409 // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3410 _aborted_gc_id = gc_id;
3411 }
3412 _has_aborted = true;
3414 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3415 satb_mq_set.abandon_partial_marking();
3416 // This can be called either during or outside marking, we'll read
3417 // the expected_active value from the SATB queue set.
3418 satb_mq_set.set_active_all_threads(
3419 false, /* new active value */
3420 satb_mq_set.is_active() /* expected_active */);
3422 _g1h->trace_heap_after_concurrent_cycle();
3423 _g1h->register_concurrent_cycle_end();
3424 }
3426 const GCId& ConcurrentMark::concurrent_gc_id() {
3427 if (has_aborted()) {
3428 return _aborted_gc_id;
3429 }
3430 return _g1h->gc_tracer_cm()->gc_id();
3431 }
3433 static void print_ms_time_info(const char* prefix, const char* name,
3434 NumberSeq& ns) {
3435 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3436 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3437 if (ns.num() > 0) {
3438 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
3439 prefix, ns.sd(), ns.maximum());
3440 }
3441 }
3443 void ConcurrentMark::print_summary_info() {
3444 gclog_or_tty->print_cr(" Concurrent marking:");
3445 print_ms_time_info(" ", "init marks", _init_times);
3446 print_ms_time_info(" ", "remarks", _remark_times);
3447 {
3448 print_ms_time_info(" ", "final marks", _remark_mark_times);
3449 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
3451 }
3452 print_ms_time_info(" ", "cleanups", _cleanup_times);
3453 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
3454 _total_counting_time,
3455 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3456 (double)_cleanup_times.num()
3457 : 0.0));
3458 if (G1ScrubRemSets) {
3459 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
3460 _total_rs_scrub_time,
3461 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3462 (double)_cleanup_times.num()
3463 : 0.0));
3464 }
3465 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
3466 (_init_times.sum() + _remark_times.sum() +
3467 _cleanup_times.sum())/1000.0);
3468 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
3469 "(%8.2f s marking).",
3470 cmThread()->vtime_accum(),
3471 cmThread()->vtime_mark_accum());
3472 }
3474 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3475 if (use_parallel_marking_threads()) {
3476 _parallel_workers->print_worker_threads_on(st);
3477 }
3478 }
3480 void ConcurrentMark::print_on_error(outputStream* st) const {
3481 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3482 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3483 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3484 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3485 }
3487 // We take a break if someone is trying to stop the world.
3488 bool ConcurrentMark::do_yield_check(uint worker_id) {
3489 if (SuspendibleThreadSet::should_yield()) {
3490 if (worker_id == 0) {
3491 _g1h->g1_policy()->record_concurrent_pause();
3492 }
3493 SuspendibleThreadSet::yield();
3494 return true;
3495 } else {
3496 return false;
3497 }
3498 }
3500 bool ConcurrentMark::containing_card_is_marked(void* p) {
3501 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3502 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3503 }
3505 bool ConcurrentMark::containing_cards_are_marked(void* start,
3506 void* last) {
3507 return containing_card_is_marked(start) &&
3508 containing_card_is_marked(last);
3509 }
3511 #ifndef PRODUCT
3512 // for debugging purposes
3513 void ConcurrentMark::print_finger() {
3514 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3515 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3516 for (uint i = 0; i < _max_worker_id; ++i) {
3517 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3518 }
3519 gclog_or_tty->cr();
3520 }
3521 #endif
3523 void CMTask::scan_object(oop obj) {
3524 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3526 if (_cm->verbose_high()) {
3527 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3528 _worker_id, p2i((void*) obj));
3529 }
3531 size_t obj_size = obj->size();
3532 _words_scanned += obj_size;
3534 obj->oop_iterate(_cm_oop_closure);
3535 statsOnly( ++_objs_scanned );
3536 check_limits();
3537 }
3539 // Closure for iteration over bitmaps
3540 class CMBitMapClosure : public BitMapClosure {
3541 private:
3542 // the bitmap that is being iterated over
3543 CMBitMap* _nextMarkBitMap;
3544 ConcurrentMark* _cm;
3545 CMTask* _task;
3547 public:
3548 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3549 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3551 bool do_bit(size_t offset) {
3552 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3553 assert(_nextMarkBitMap->isMarked(addr), "invariant");
3554 assert( addr < _cm->finger(), "invariant");
3556 statsOnly( _task->increase_objs_found_on_bitmap() );
3557 assert(addr >= _task->finger(), "invariant");
3559 // We move that task's local finger along.
3560 _task->move_finger_to(addr);
3562 _task->scan_object(oop(addr));
3563 // we only partially drain the local queue and global stack
3564 _task->drain_local_queue(true);
3565 _task->drain_global_stack(true);
3567 // if the has_aborted flag has been raised, we need to bail out of
3568 // the iteration
3569 return !_task->has_aborted();
3570 }
3571 };
3573 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3574 ConcurrentMark* cm,
3575 CMTask* task)
3576 : _g1h(g1h), _cm(cm), _task(task) {
3577 assert(_ref_processor == NULL, "should be initialized to NULL");
3579 if (G1UseConcMarkReferenceProcessing) {
3580 _ref_processor = g1h->ref_processor_cm();
3581 assert(_ref_processor != NULL, "should not be NULL");
3582 }
3583 }
3585 void CMTask::setup_for_region(HeapRegion* hr) {
3586 assert(hr != NULL,
3587 "claim_region() should have filtered out NULL regions");
3588 assert(!hr->continuesHumongous(),
3589 "claim_region() should have filtered out continues humongous regions");
3591 if (_cm->verbose_low()) {
3592 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3593 _worker_id, p2i(hr));
3594 }
3596 _curr_region = hr;
3597 _finger = hr->bottom();
3598 update_region_limit();
3599 }
3601 void CMTask::update_region_limit() {
3602 HeapRegion* hr = _curr_region;
3603 HeapWord* bottom = hr->bottom();
3604 HeapWord* limit = hr->next_top_at_mark_start();
3606 if (limit == bottom) {
3607 if (_cm->verbose_low()) {
3608 gclog_or_tty->print_cr("[%u] found an empty region "
3609 "["PTR_FORMAT", "PTR_FORMAT")",
3610 _worker_id, p2i(bottom), p2i(limit));
3611 }
3612 // The region was collected underneath our feet.
3613 // We set the finger to bottom to ensure that the bitmap
3614 // iteration that will follow this will not do anything.
3615 // (this is not a condition that holds when we set the region up,
3616 // as the region is not supposed to be empty in the first place)
3617 _finger = bottom;
3618 } else if (limit >= _region_limit) {
3619 assert(limit >= _finger, "peace of mind");
3620 } else {
3621 assert(limit < _region_limit, "only way to get here");
3622 // This can happen under some pretty unusual circumstances. An
3623 // evacuation pause empties the region underneath our feet (NTAMS
3624 // at bottom). We then do some allocation in the region (NTAMS
3625 // stays at bottom), followed by the region being used as a GC
3626 // alloc region (NTAMS will move to top() and the objects
3627 // originally below it will be grayed). All objects now marked in
3628 // the region are explicitly grayed, if below the global finger,
3629 // and we do not need in fact to scan anything else. So, we simply
3630 // set _finger to be limit to ensure that the bitmap iteration
3631 // doesn't do anything.
3632 _finger = limit;
3633 }
3635 _region_limit = limit;
3636 }
3638 void CMTask::giveup_current_region() {
3639 assert(_curr_region != NULL, "invariant");
3640 if (_cm->verbose_low()) {
3641 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3642 _worker_id, p2i(_curr_region));
3643 }
3644 clear_region_fields();
3645 }
3647 void CMTask::clear_region_fields() {
3648 // Values for these three fields that indicate that we're not
3649 // holding on to a region.
3650 _curr_region = NULL;
3651 _finger = NULL;
3652 _region_limit = NULL;
3653 }
3655 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3656 if (cm_oop_closure == NULL) {
3657 assert(_cm_oop_closure != NULL, "invariant");
3658 } else {
3659 assert(_cm_oop_closure == NULL, "invariant");
3660 }
3661 _cm_oop_closure = cm_oop_closure;
3662 }
3664 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3665 guarantee(nextMarkBitMap != NULL, "invariant");
3667 if (_cm->verbose_low()) {
3668 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3669 }
3671 _nextMarkBitMap = nextMarkBitMap;
3672 clear_region_fields();
3674 _calls = 0;
3675 _elapsed_time_ms = 0.0;
3676 _termination_time_ms = 0.0;
3677 _termination_start_time_ms = 0.0;
3679 #if _MARKING_STATS_
3680 _local_pushes = 0;
3681 _local_pops = 0;
3682 _local_max_size = 0;
3683 _objs_scanned = 0;
3684 _global_pushes = 0;
3685 _global_pops = 0;
3686 _global_max_size = 0;
3687 _global_transfers_to = 0;
3688 _global_transfers_from = 0;
3689 _regions_claimed = 0;
3690 _objs_found_on_bitmap = 0;
3691 _satb_buffers_processed = 0;
3692 _steal_attempts = 0;
3693 _steals = 0;
3694 _aborted = 0;
3695 _aborted_overflow = 0;
3696 _aborted_cm_aborted = 0;
3697 _aborted_yield = 0;
3698 _aborted_timed_out = 0;
3699 _aborted_satb = 0;
3700 _aborted_termination = 0;
3701 #endif // _MARKING_STATS_
3702 }
3704 bool CMTask::should_exit_termination() {
3705 regular_clock_call();
3706 // This is called when we are in the termination protocol. We should
3707 // quit if, for some reason, this task wants to abort or the global
3708 // stack is not empty (this means that we can get work from it).
3709 return !_cm->mark_stack_empty() || has_aborted();
3710 }
3712 void CMTask::reached_limit() {
3713 assert(_words_scanned >= _words_scanned_limit ||
3714 _refs_reached >= _refs_reached_limit ,
3715 "shouldn't have been called otherwise");
3716 regular_clock_call();
3717 }
3719 void CMTask::regular_clock_call() {
3720 if (has_aborted()) return;
3722 // First, we need to recalculate the words scanned and refs reached
3723 // limits for the next clock call.
3724 recalculate_limits();
3726 // During the regular clock call we do the following
3728 // (1) If an overflow has been flagged, then we abort.
3729 if (_cm->has_overflown()) {
3730 set_has_aborted();
3731 return;
3732 }
3734 // If we are not concurrent (i.e. we're doing remark) we don't need
3735 // to check anything else. The other steps are only needed during
3736 // the concurrent marking phase.
3737 if (!concurrent()) return;
3739 // (2) If marking has been aborted for Full GC, then we also abort.
3740 if (_cm->has_aborted()) {
3741 set_has_aborted();
3742 statsOnly( ++_aborted_cm_aborted );
3743 return;
3744 }
3746 double curr_time_ms = os::elapsedVTime() * 1000.0;
3748 // (3) If marking stats are enabled, then we update the step history.
3749 #if _MARKING_STATS_
3750 if (_words_scanned >= _words_scanned_limit) {
3751 ++_clock_due_to_scanning;
3752 }
3753 if (_refs_reached >= _refs_reached_limit) {
3754 ++_clock_due_to_marking;
3755 }
3757 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3758 _interval_start_time_ms = curr_time_ms;
3759 _all_clock_intervals_ms.add(last_interval_ms);
3761 if (_cm->verbose_medium()) {
3762 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3763 "scanned = %d%s, refs reached = %d%s",
3764 _worker_id, last_interval_ms,
3765 _words_scanned,
3766 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3767 _refs_reached,
3768 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3769 }
3770 #endif // _MARKING_STATS_
3772 // (4) We check whether we should yield. If we have to, then we abort.
3773 if (SuspendibleThreadSet::should_yield()) {
3774 // We should yield. To do this we abort the task. The caller is
3775 // responsible for yielding.
3776 set_has_aborted();
3777 statsOnly( ++_aborted_yield );
3778 return;
3779 }
3781 // (5) We check whether we've reached our time quota. If we have,
3782 // then we abort.
3783 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3784 if (elapsed_time_ms > _time_target_ms) {
3785 set_has_aborted();
3786 _has_timed_out = true;
3787 statsOnly( ++_aborted_timed_out );
3788 return;
3789 }
3791 // (6) Finally, we check whether there are enough completed STAB
3792 // buffers available for processing. If there are, we abort.
3793 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3794 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3795 if (_cm->verbose_low()) {
3796 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3797 _worker_id);
3798 }
3799 // we do need to process SATB buffers, we'll abort and restart
3800 // the marking task to do so
3801 set_has_aborted();
3802 statsOnly( ++_aborted_satb );
3803 return;
3804 }
3805 }
3807 void CMTask::recalculate_limits() {
3808 _real_words_scanned_limit = _words_scanned + words_scanned_period;
3809 _words_scanned_limit = _real_words_scanned_limit;
3811 _real_refs_reached_limit = _refs_reached + refs_reached_period;
3812 _refs_reached_limit = _real_refs_reached_limit;
3813 }
3815 void CMTask::decrease_limits() {
3816 // This is called when we believe that we're going to do an infrequent
3817 // operation which will increase the per byte scanned cost (i.e. move
3818 // entries to/from the global stack). It basically tries to decrease the
3819 // scanning limit so that the clock is called earlier.
3821 if (_cm->verbose_medium()) {
3822 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3823 }
3825 _words_scanned_limit = _real_words_scanned_limit -
3826 3 * words_scanned_period / 4;
3827 _refs_reached_limit = _real_refs_reached_limit -
3828 3 * refs_reached_period / 4;
3829 }
3831 void CMTask::move_entries_to_global_stack() {
3832 // local array where we'll store the entries that will be popped
3833 // from the local queue
3834 oop buffer[global_stack_transfer_size];
3836 int n = 0;
3837 oop obj;
3838 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3839 buffer[n] = obj;
3840 ++n;
3841 }
3843 if (n > 0) {
3844 // we popped at least one entry from the local queue
3846 statsOnly( ++_global_transfers_to; _local_pops += n );
3848 if (!_cm->mark_stack_push(buffer, n)) {
3849 if (_cm->verbose_low()) {
3850 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3851 _worker_id);
3852 }
3853 set_has_aborted();
3854 } else {
3855 // the transfer was successful
3857 if (_cm->verbose_medium()) {
3858 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3859 _worker_id, n);
3860 }
3861 statsOnly( int tmp_size = _cm->mark_stack_size();
3862 if (tmp_size > _global_max_size) {
3863 _global_max_size = tmp_size;
3864 }
3865 _global_pushes += n );
3866 }
3867 }
3869 // this operation was quite expensive, so decrease the limits
3870 decrease_limits();
3871 }
3873 void CMTask::get_entries_from_global_stack() {
3874 // local array where we'll store the entries that will be popped
3875 // from the global stack.
3876 oop buffer[global_stack_transfer_size];
3877 int n;
3878 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3879 assert(n <= global_stack_transfer_size,
3880 "we should not pop more than the given limit");
3881 if (n > 0) {
3882 // yes, we did actually pop at least one entry
3884 statsOnly( ++_global_transfers_from; _global_pops += n );
3885 if (_cm->verbose_medium()) {
3886 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3887 _worker_id, n);
3888 }
3889 for (int i = 0; i < n; ++i) {
3890 bool success = _task_queue->push(buffer[i]);
3891 // We only call this when the local queue is empty or under a
3892 // given target limit. So, we do not expect this push to fail.
3893 assert(success, "invariant");
3894 }
3896 statsOnly( int tmp_size = _task_queue->size();
3897 if (tmp_size > _local_max_size) {
3898 _local_max_size = tmp_size;
3899 }
3900 _local_pushes += n );
3901 }
3903 // this operation was quite expensive, so decrease the limits
3904 decrease_limits();
3905 }
3907 void CMTask::drain_local_queue(bool partially) {
3908 if (has_aborted()) return;
3910 // Decide what the target size is, depending whether we're going to
3911 // drain it partially (so that other tasks can steal if they run out
3912 // of things to do) or totally (at the very end).
3913 size_t target_size;
3914 if (partially) {
3915 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3916 } else {
3917 target_size = 0;
3918 }
3920 if (_task_queue->size() > target_size) {
3921 if (_cm->verbose_high()) {
3922 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3923 _worker_id, target_size);
3924 }
3926 oop obj;
3927 bool ret = _task_queue->pop_local(obj);
3928 while (ret) {
3929 statsOnly( ++_local_pops );
3931 if (_cm->verbose_high()) {
3932 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3933 p2i((void*) obj));
3934 }
3936 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3937 assert(!_g1h->is_on_master_free_list(
3938 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3940 scan_object(obj);
3942 if (_task_queue->size() <= target_size || has_aborted()) {
3943 ret = false;
3944 } else {
3945 ret = _task_queue->pop_local(obj);
3946 }
3947 }
3949 if (_cm->verbose_high()) {
3950 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3951 _worker_id, _task_queue->size());
3952 }
3953 }
3954 }
3956 void CMTask::drain_global_stack(bool partially) {
3957 if (has_aborted()) return;
3959 // We have a policy to drain the local queue before we attempt to
3960 // drain the global stack.
3961 assert(partially || _task_queue->size() == 0, "invariant");
3963 // Decide what the target size is, depending whether we're going to
3964 // drain it partially (so that other tasks can steal if they run out
3965 // of things to do) or totally (at the very end). Notice that,
3966 // because we move entries from the global stack in chunks or
3967 // because another task might be doing the same, we might in fact
3968 // drop below the target. But, this is not a problem.
3969 size_t target_size;
3970 if (partially) {
3971 target_size = _cm->partial_mark_stack_size_target();
3972 } else {
3973 target_size = 0;
3974 }
3976 if (_cm->mark_stack_size() > target_size) {
3977 if (_cm->verbose_low()) {
3978 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3979 _worker_id, target_size);
3980 }
3982 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3983 get_entries_from_global_stack();
3984 drain_local_queue(partially);
3985 }
3987 if (_cm->verbose_low()) {
3988 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3989 _worker_id, _cm->mark_stack_size());
3990 }
3991 }
3992 }
3994 // SATB Queue has several assumptions on whether to call the par or
3995 // non-par versions of the methods. this is why some of the code is
3996 // replicated. We should really get rid of the single-threaded version
3997 // of the code to simplify things.
3998 void CMTask::drain_satb_buffers() {
3999 if (has_aborted()) return;
4001 // We set this so that the regular clock knows that we're in the
4002 // middle of draining buffers and doesn't set the abort flag when it
4003 // notices that SATB buffers are available for draining. It'd be
4004 // very counter productive if it did that. :-)
4005 _draining_satb_buffers = true;
4007 CMObjectClosure oc(this);
4008 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
4009 if (G1CollectedHeap::use_parallel_gc_threads()) {
4010 satb_mq_set.set_par_closure(_worker_id, &oc);
4011 } else {
4012 satb_mq_set.set_closure(&oc);
4013 }
4015 // This keeps claiming and applying the closure to completed buffers
4016 // until we run out of buffers or we need to abort.
4017 if (G1CollectedHeap::use_parallel_gc_threads()) {
4018 while (!has_aborted() &&
4019 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
4020 if (_cm->verbose_medium()) {
4021 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4022 }
4023 statsOnly( ++_satb_buffers_processed );
4024 regular_clock_call();
4025 }
4026 } else {
4027 while (!has_aborted() &&
4028 satb_mq_set.apply_closure_to_completed_buffer()) {
4029 if (_cm->verbose_medium()) {
4030 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4031 }
4032 statsOnly( ++_satb_buffers_processed );
4033 regular_clock_call();
4034 }
4035 }
4037 _draining_satb_buffers = false;
4039 assert(has_aborted() ||
4040 concurrent() ||
4041 satb_mq_set.completed_buffers_num() == 0, "invariant");
4043 if (G1CollectedHeap::use_parallel_gc_threads()) {
4044 satb_mq_set.set_par_closure(_worker_id, NULL);
4045 } else {
4046 satb_mq_set.set_closure(NULL);
4047 }
4049 // again, this was a potentially expensive operation, decrease the
4050 // limits to get the regular clock call early
4051 decrease_limits();
4052 }
4054 void CMTask::print_stats() {
4055 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
4056 _worker_id, _calls);
4057 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
4058 _elapsed_time_ms, _termination_time_ms);
4059 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4060 _step_times_ms.num(), _step_times_ms.avg(),
4061 _step_times_ms.sd());
4062 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4063 _step_times_ms.maximum(), _step_times_ms.sum());
4065 #if _MARKING_STATS_
4066 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4067 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
4068 _all_clock_intervals_ms.sd());
4069 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4070 _all_clock_intervals_ms.maximum(),
4071 _all_clock_intervals_ms.sum());
4072 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
4073 _clock_due_to_scanning, _clock_due_to_marking);
4074 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
4075 _objs_scanned, _objs_found_on_bitmap);
4076 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
4077 _local_pushes, _local_pops, _local_max_size);
4078 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
4079 _global_pushes, _global_pops, _global_max_size);
4080 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
4081 _global_transfers_to,_global_transfers_from);
4082 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
4083 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
4084 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
4085 _steal_attempts, _steals);
4086 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
4087 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
4088 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
4089 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
4090 _aborted_timed_out, _aborted_satb, _aborted_termination);
4091 #endif // _MARKING_STATS_
4092 }
4094 /*****************************************************************************
4096 The do_marking_step(time_target_ms, ...) method is the building
4097 block of the parallel marking framework. It can be called in parallel
4098 with other invocations of do_marking_step() on different tasks
4099 (but only one per task, obviously) and concurrently with the
4100 mutator threads, or during remark, hence it eliminates the need
4101 for two versions of the code. When called during remark, it will
4102 pick up from where the task left off during the concurrent marking
4103 phase. Interestingly, tasks are also claimable during evacuation
4104 pauses too, since do_marking_step() ensures that it aborts before
4105 it needs to yield.
4107 The data structures that it uses to do marking work are the
4108 following:
4110 (1) Marking Bitmap. If there are gray objects that appear only
4111 on the bitmap (this happens either when dealing with an overflow
4112 or when the initial marking phase has simply marked the roots
4113 and didn't push them on the stack), then tasks claim heap
4114 regions whose bitmap they then scan to find gray objects. A
4115 global finger indicates where the end of the last claimed region
4116 is. A local finger indicates how far into the region a task has
4117 scanned. The two fingers are used to determine how to gray an
4118 object (i.e. whether simply marking it is OK, as it will be
4119 visited by a task in the future, or whether it needs to be also
4120 pushed on a stack).
4122 (2) Local Queue. The local queue of the task which is accessed
4123 reasonably efficiently by the task. Other tasks can steal from
4124 it when they run out of work. Throughout the marking phase, a
4125 task attempts to keep its local queue short but not totally
4126 empty, so that entries are available for stealing by other
4127 tasks. Only when there is no more work, a task will totally
4128 drain its local queue.
4130 (3) Global Mark Stack. This handles local queue overflow. During
4131 marking only sets of entries are moved between it and the local
4132 queues, as access to it requires a mutex and more fine-grain
4133 interaction with it which might cause contention. If it
4134 overflows, then the marking phase should restart and iterate
4135 over the bitmap to identify gray objects. Throughout the marking
4136 phase, tasks attempt to keep the global mark stack at a small
4137 length but not totally empty, so that entries are available for
4138 popping by other tasks. Only when there is no more work, tasks
4139 will totally drain the global mark stack.
4141 (4) SATB Buffer Queue. This is where completed SATB buffers are
4142 made available. Buffers are regularly removed from this queue
4143 and scanned for roots, so that the queue doesn't get too
4144 long. During remark, all completed buffers are processed, as
4145 well as the filled in parts of any uncompleted buffers.
4147 The do_marking_step() method tries to abort when the time target
4148 has been reached. There are a few other cases when the
4149 do_marking_step() method also aborts:
4151 (1) When the marking phase has been aborted (after a Full GC).
4153 (2) When a global overflow (on the global stack) has been
4154 triggered. Before the task aborts, it will actually sync up with
4155 the other tasks to ensure that all the marking data structures
4156 (local queues, stacks, fingers etc.) are re-initialized so that
4157 when do_marking_step() completes, the marking phase can
4158 immediately restart.
4160 (3) When enough completed SATB buffers are available. The
4161 do_marking_step() method only tries to drain SATB buffers right
4162 at the beginning. So, if enough buffers are available, the
4163 marking step aborts and the SATB buffers are processed at
4164 the beginning of the next invocation.
4166 (4) To yield. when we have to yield then we abort and yield
4167 right at the end of do_marking_step(). This saves us from a lot
4168 of hassle as, by yielding we might allow a Full GC. If this
4169 happens then objects will be compacted underneath our feet, the
4170 heap might shrink, etc. We save checking for this by just
4171 aborting and doing the yield right at the end.
4173 From the above it follows that the do_marking_step() method should
4174 be called in a loop (or, otherwise, regularly) until it completes.
4176 If a marking step completes without its has_aborted() flag being
4177 true, it means it has completed the current marking phase (and
4178 also all other marking tasks have done so and have all synced up).
4180 A method called regular_clock_call() is invoked "regularly" (in
4181 sub ms intervals) throughout marking. It is this clock method that
4182 checks all the abort conditions which were mentioned above and
4183 decides when the task should abort. A work-based scheme is used to
4184 trigger this clock method: when the number of object words the
4185 marking phase has scanned or the number of references the marking
4186 phase has visited reach a given limit. Additional invocations to
4187 the method clock have been planted in a few other strategic places
4188 too. The initial reason for the clock method was to avoid calling
4189 vtime too regularly, as it is quite expensive. So, once it was in
4190 place, it was natural to piggy-back all the other conditions on it
4191 too and not constantly check them throughout the code.
4193 If do_termination is true then do_marking_step will enter its
4194 termination protocol.
4196 The value of is_serial must be true when do_marking_step is being
4197 called serially (i.e. by the VMThread) and do_marking_step should
4198 skip any synchronization in the termination and overflow code.
4199 Examples include the serial remark code and the serial reference
4200 processing closures.
4202 The value of is_serial must be false when do_marking_step is
4203 being called by any of the worker threads in a work gang.
4204 Examples include the concurrent marking code (CMMarkingTask),
4205 the MT remark code, and the MT reference processing closures.
4207 *****************************************************************************/
4209 void CMTask::do_marking_step(double time_target_ms,
4210 bool do_termination,
4211 bool is_serial) {
4212 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4213 assert(concurrent() == _cm->concurrent(), "they should be the same");
4215 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4216 assert(_task_queues != NULL, "invariant");
4217 assert(_task_queue != NULL, "invariant");
4218 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4220 assert(!_claimed,
4221 "only one thread should claim this task at any one time");
4223 // OK, this doesn't safeguard again all possible scenarios, as it is
4224 // possible for two threads to set the _claimed flag at the same
4225 // time. But it is only for debugging purposes anyway and it will
4226 // catch most problems.
4227 _claimed = true;
4229 _start_time_ms = os::elapsedVTime() * 1000.0;
4230 statsOnly( _interval_start_time_ms = _start_time_ms );
4232 // If do_stealing is true then do_marking_step will attempt to
4233 // steal work from the other CMTasks. It only makes sense to
4234 // enable stealing when the termination protocol is enabled
4235 // and do_marking_step() is not being called serially.
4236 bool do_stealing = do_termination && !is_serial;
4238 double diff_prediction_ms =
4239 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4240 _time_target_ms = time_target_ms - diff_prediction_ms;
4242 // set up the variables that are used in the work-based scheme to
4243 // call the regular clock method
4244 _words_scanned = 0;
4245 _refs_reached = 0;
4246 recalculate_limits();
4248 // clear all flags
4249 clear_has_aborted();
4250 _has_timed_out = false;
4251 _draining_satb_buffers = false;
4253 ++_calls;
4255 if (_cm->verbose_low()) {
4256 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4257 "target = %1.2lfms >>>>>>>>>>",
4258 _worker_id, _calls, _time_target_ms);
4259 }
4261 // Set up the bitmap and oop closures. Anything that uses them is
4262 // eventually called from this method, so it is OK to allocate these
4263 // statically.
4264 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4265 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4266 set_cm_oop_closure(&cm_oop_closure);
4268 if (_cm->has_overflown()) {
4269 // This can happen if the mark stack overflows during a GC pause
4270 // and this task, after a yield point, restarts. We have to abort
4271 // as we need to get into the overflow protocol which happens
4272 // right at the end of this task.
4273 set_has_aborted();
4274 }
4276 // First drain any available SATB buffers. After this, we will not
4277 // look at SATB buffers before the next invocation of this method.
4278 // If enough completed SATB buffers are queued up, the regular clock
4279 // will abort this task so that it restarts.
4280 drain_satb_buffers();
4281 // ...then partially drain the local queue and the global stack
4282 drain_local_queue(true);
4283 drain_global_stack(true);
4285 do {
4286 if (!has_aborted() && _curr_region != NULL) {
4287 // This means that we're already holding on to a region.
4288 assert(_finger != NULL, "if region is not NULL, then the finger "
4289 "should not be NULL either");
4291 // We might have restarted this task after an evacuation pause
4292 // which might have evacuated the region we're holding on to
4293 // underneath our feet. Let's read its limit again to make sure
4294 // that we do not iterate over a region of the heap that
4295 // contains garbage (update_region_limit() will also move
4296 // _finger to the start of the region if it is found empty).
4297 update_region_limit();
4298 // We will start from _finger not from the start of the region,
4299 // as we might be restarting this task after aborting half-way
4300 // through scanning this region. In this case, _finger points to
4301 // the address where we last found a marked object. If this is a
4302 // fresh region, _finger points to start().
4303 MemRegion mr = MemRegion(_finger, _region_limit);
4305 if (_cm->verbose_low()) {
4306 gclog_or_tty->print_cr("[%u] we're scanning part "
4307 "["PTR_FORMAT", "PTR_FORMAT") "
4308 "of region "HR_FORMAT,
4309 _worker_id, p2i(_finger), p2i(_region_limit),
4310 HR_FORMAT_PARAMS(_curr_region));
4311 }
4313 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4314 "humongous regions should go around loop once only");
4316 // Some special cases:
4317 // If the memory region is empty, we can just give up the region.
4318 // If the current region is humongous then we only need to check
4319 // the bitmap for the bit associated with the start of the object,
4320 // scan the object if it's live, and give up the region.
4321 // Otherwise, let's iterate over the bitmap of the part of the region
4322 // that is left.
4323 // If the iteration is successful, give up the region.
4324 if (mr.is_empty()) {
4325 giveup_current_region();
4326 regular_clock_call();
4327 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4328 if (_nextMarkBitMap->isMarked(mr.start())) {
4329 // The object is marked - apply the closure
4330 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4331 bitmap_closure.do_bit(offset);
4332 }
4333 // Even if this task aborted while scanning the humongous object
4334 // we can (and should) give up the current region.
4335 giveup_current_region();
4336 regular_clock_call();
4337 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4338 giveup_current_region();
4339 regular_clock_call();
4340 } else {
4341 assert(has_aborted(), "currently the only way to do so");
4342 // The only way to abort the bitmap iteration is to return
4343 // false from the do_bit() method. However, inside the
4344 // do_bit() method we move the _finger to point to the
4345 // object currently being looked at. So, if we bail out, we
4346 // have definitely set _finger to something non-null.
4347 assert(_finger != NULL, "invariant");
4349 // Region iteration was actually aborted. So now _finger
4350 // points to the address of the object we last scanned. If we
4351 // leave it there, when we restart this task, we will rescan
4352 // the object. It is easy to avoid this. We move the finger by
4353 // enough to point to the next possible object header (the
4354 // bitmap knows by how much we need to move it as it knows its
4355 // granularity).
4356 assert(_finger < _region_limit, "invariant");
4357 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4358 // Check if bitmap iteration was aborted while scanning the last object
4359 if (new_finger >= _region_limit) {
4360 giveup_current_region();
4361 } else {
4362 move_finger_to(new_finger);
4363 }
4364 }
4365 }
4366 // At this point we have either completed iterating over the
4367 // region we were holding on to, or we have aborted.
4369 // We then partially drain the local queue and the global stack.
4370 // (Do we really need this?)
4371 drain_local_queue(true);
4372 drain_global_stack(true);
4374 // Read the note on the claim_region() method on why it might
4375 // return NULL with potentially more regions available for
4376 // claiming and why we have to check out_of_regions() to determine
4377 // whether we're done or not.
4378 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4379 // We are going to try to claim a new region. We should have
4380 // given up on the previous one.
4381 // Separated the asserts so that we know which one fires.
4382 assert(_curr_region == NULL, "invariant");
4383 assert(_finger == NULL, "invariant");
4384 assert(_region_limit == NULL, "invariant");
4385 if (_cm->verbose_low()) {
4386 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4387 }
4388 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4389 if (claimed_region != NULL) {
4390 // Yes, we managed to claim one
4391 statsOnly( ++_regions_claimed );
4393 if (_cm->verbose_low()) {
4394 gclog_or_tty->print_cr("[%u] we successfully claimed "
4395 "region "PTR_FORMAT,
4396 _worker_id, p2i(claimed_region));
4397 }
4399 setup_for_region(claimed_region);
4400 assert(_curr_region == claimed_region, "invariant");
4401 }
4402 // It is important to call the regular clock here. It might take
4403 // a while to claim a region if, for example, we hit a large
4404 // block of empty regions. So we need to call the regular clock
4405 // method once round the loop to make sure it's called
4406 // frequently enough.
4407 regular_clock_call();
4408 }
4410 if (!has_aborted() && _curr_region == NULL) {
4411 assert(_cm->out_of_regions(),
4412 "at this point we should be out of regions");
4413 }
4414 } while ( _curr_region != NULL && !has_aborted());
4416 if (!has_aborted()) {
4417 // We cannot check whether the global stack is empty, since other
4418 // tasks might be pushing objects to it concurrently.
4419 assert(_cm->out_of_regions(),
4420 "at this point we should be out of regions");
4422 if (_cm->verbose_low()) {
4423 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4424 }
4426 // Try to reduce the number of available SATB buffers so that
4427 // remark has less work to do.
4428 drain_satb_buffers();
4429 }
4431 // Since we've done everything else, we can now totally drain the
4432 // local queue and global stack.
4433 drain_local_queue(false);
4434 drain_global_stack(false);
4436 // Attempt at work stealing from other task's queues.
4437 if (do_stealing && !has_aborted()) {
4438 // We have not aborted. This means that we have finished all that
4439 // we could. Let's try to do some stealing...
4441 // We cannot check whether the global stack is empty, since other
4442 // tasks might be pushing objects to it concurrently.
4443 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4444 "only way to reach here");
4446 if (_cm->verbose_low()) {
4447 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4448 }
4450 while (!has_aborted()) {
4451 oop obj;
4452 statsOnly( ++_steal_attempts );
4454 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4455 if (_cm->verbose_medium()) {
4456 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4457 _worker_id, p2i((void*) obj));
4458 }
4460 statsOnly( ++_steals );
4462 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4463 "any stolen object should be marked");
4464 scan_object(obj);
4466 // And since we're towards the end, let's totally drain the
4467 // local queue and global stack.
4468 drain_local_queue(false);
4469 drain_global_stack(false);
4470 } else {
4471 break;
4472 }
4473 }
4474 }
4476 // If we are about to wrap up and go into termination, check if we
4477 // should raise the overflow flag.
4478 if (do_termination && !has_aborted()) {
4479 if (_cm->force_overflow()->should_force()) {
4480 _cm->set_has_overflown();
4481 regular_clock_call();
4482 }
4483 }
4485 // We still haven't aborted. Now, let's try to get into the
4486 // termination protocol.
4487 if (do_termination && !has_aborted()) {
4488 // We cannot check whether the global stack is empty, since other
4489 // tasks might be concurrently pushing objects on it.
4490 // Separated the asserts so that we know which one fires.
4491 assert(_cm->out_of_regions(), "only way to reach here");
4492 assert(_task_queue->size() == 0, "only way to reach here");
4494 if (_cm->verbose_low()) {
4495 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4496 }
4498 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4500 // The CMTask class also extends the TerminatorTerminator class,
4501 // hence its should_exit_termination() method will also decide
4502 // whether to exit the termination protocol or not.
4503 bool finished = (is_serial ||
4504 _cm->terminator()->offer_termination(this));
4505 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4506 _termination_time_ms +=
4507 termination_end_time_ms - _termination_start_time_ms;
4509 if (finished) {
4510 // We're all done.
4512 if (_worker_id == 0) {
4513 // let's allow task 0 to do this
4514 if (concurrent()) {
4515 assert(_cm->concurrent_marking_in_progress(), "invariant");
4516 // we need to set this to false before the next
4517 // safepoint. This way we ensure that the marking phase
4518 // doesn't observe any more heap expansions.
4519 _cm->clear_concurrent_marking_in_progress();
4520 }
4521 }
4523 // We can now guarantee that the global stack is empty, since
4524 // all other tasks have finished. We separated the guarantees so
4525 // that, if a condition is false, we can immediately find out
4526 // which one.
4527 guarantee(_cm->out_of_regions(), "only way to reach here");
4528 guarantee(_cm->mark_stack_empty(), "only way to reach here");
4529 guarantee(_task_queue->size() == 0, "only way to reach here");
4530 guarantee(!_cm->has_overflown(), "only way to reach here");
4531 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4533 if (_cm->verbose_low()) {
4534 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4535 }
4536 } else {
4537 // Apparently there's more work to do. Let's abort this task. It
4538 // will restart it and we can hopefully find more things to do.
4540 if (_cm->verbose_low()) {
4541 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4542 _worker_id);
4543 }
4545 set_has_aborted();
4546 statsOnly( ++_aborted_termination );
4547 }
4548 }
4550 // Mainly for debugging purposes to make sure that a pointer to the
4551 // closure which was statically allocated in this frame doesn't
4552 // escape it by accident.
4553 set_cm_oop_closure(NULL);
4554 double end_time_ms = os::elapsedVTime() * 1000.0;
4555 double elapsed_time_ms = end_time_ms - _start_time_ms;
4556 // Update the step history.
4557 _step_times_ms.add(elapsed_time_ms);
4559 if (has_aborted()) {
4560 // The task was aborted for some reason.
4562 statsOnly( ++_aborted );
4564 if (_has_timed_out) {
4565 double diff_ms = elapsed_time_ms - _time_target_ms;
4566 // Keep statistics of how well we did with respect to hitting
4567 // our target only if we actually timed out (if we aborted for
4568 // other reasons, then the results might get skewed).
4569 _marking_step_diffs_ms.add(diff_ms);
4570 }
4572 if (_cm->has_overflown()) {
4573 // This is the interesting one. We aborted because a global
4574 // overflow was raised. This means we have to restart the
4575 // marking phase and start iterating over regions. However, in
4576 // order to do this we have to make sure that all tasks stop
4577 // what they are doing and re-initialise in a safe manner. We
4578 // will achieve this with the use of two barrier sync points.
4580 if (_cm->verbose_low()) {
4581 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4582 }
4584 if (!is_serial) {
4585 // We only need to enter the sync barrier if being called
4586 // from a parallel context
4587 _cm->enter_first_sync_barrier(_worker_id);
4589 // When we exit this sync barrier we know that all tasks have
4590 // stopped doing marking work. So, it's now safe to
4591 // re-initialise our data structures. At the end of this method,
4592 // task 0 will clear the global data structures.
4593 }
4595 statsOnly( ++_aborted_overflow );
4597 // We clear the local state of this task...
4598 clear_region_fields();
4600 if (!is_serial) {
4601 // ...and enter the second barrier.
4602 _cm->enter_second_sync_barrier(_worker_id);
4603 }
4604 // At this point, if we're during the concurrent phase of
4605 // marking, everything has been re-initialized and we're
4606 // ready to restart.
4607 }
4609 if (_cm->verbose_low()) {
4610 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4611 "elapsed = %1.2lfms <<<<<<<<<<",
4612 _worker_id, _time_target_ms, elapsed_time_ms);
4613 if (_cm->has_aborted()) {
4614 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4615 _worker_id);
4616 }
4617 }
4618 } else {
4619 if (_cm->verbose_low()) {
4620 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4621 "elapsed = %1.2lfms <<<<<<<<<<",
4622 _worker_id, _time_target_ms, elapsed_time_ms);
4623 }
4624 }
4626 _claimed = false;
4627 }
4629 CMTask::CMTask(uint worker_id,
4630 ConcurrentMark* cm,
4631 size_t* marked_bytes,
4632 BitMap* card_bm,
4633 CMTaskQueue* task_queue,
4634 CMTaskQueueSet* task_queues)
4635 : _g1h(G1CollectedHeap::heap()),
4636 _worker_id(worker_id), _cm(cm),
4637 _claimed(false),
4638 _nextMarkBitMap(NULL), _hash_seed(17),
4639 _task_queue(task_queue),
4640 _task_queues(task_queues),
4641 _cm_oop_closure(NULL),
4642 _marked_bytes_array(marked_bytes),
4643 _card_bm(card_bm) {
4644 guarantee(task_queue != NULL, "invariant");
4645 guarantee(task_queues != NULL, "invariant");
4647 statsOnly( _clock_due_to_scanning = 0;
4648 _clock_due_to_marking = 0 );
4650 _marking_step_diffs_ms.add(0.5);
4651 }
4653 // These are formatting macros that are used below to ensure
4654 // consistent formatting. The *_H_* versions are used to format the
4655 // header for a particular value and they should be kept consistent
4656 // with the corresponding macro. Also note that most of the macros add
4657 // the necessary white space (as a prefix) which makes them a bit
4658 // easier to compose.
4660 // All the output lines are prefixed with this string to be able to
4661 // identify them easily in a large log file.
4662 #define G1PPRL_LINE_PREFIX "###"
4664 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4665 #ifdef _LP64
4666 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4667 #else // _LP64
4668 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4669 #endif // _LP64
4671 // For per-region info
4672 #define G1PPRL_TYPE_FORMAT " %-4s"
4673 #define G1PPRL_TYPE_H_FORMAT " %4s"
4674 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4675 #define G1PPRL_BYTE_H_FORMAT " %9s"
4676 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4677 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4679 // For summary info
4680 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4681 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4682 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4683 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4685 G1PrintRegionLivenessInfoClosure::
4686 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4687 : _out(out),
4688 _total_used_bytes(0), _total_capacity_bytes(0),
4689 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4690 _hum_used_bytes(0), _hum_capacity_bytes(0),
4691 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4692 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4693 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4694 MemRegion g1_reserved = g1h->g1_reserved();
4695 double now = os::elapsedTime();
4697 // Print the header of the output.
4698 _out->cr();
4699 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4700 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4701 G1PPRL_SUM_ADDR_FORMAT("reserved")
4702 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4703 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4704 HeapRegion::GrainBytes);
4705 _out->print_cr(G1PPRL_LINE_PREFIX);
4706 _out->print_cr(G1PPRL_LINE_PREFIX
4707 G1PPRL_TYPE_H_FORMAT
4708 G1PPRL_ADDR_BASE_H_FORMAT
4709 G1PPRL_BYTE_H_FORMAT
4710 G1PPRL_BYTE_H_FORMAT
4711 G1PPRL_BYTE_H_FORMAT
4712 G1PPRL_DOUBLE_H_FORMAT
4713 G1PPRL_BYTE_H_FORMAT
4714 G1PPRL_BYTE_H_FORMAT,
4715 "type", "address-range",
4716 "used", "prev-live", "next-live", "gc-eff",
4717 "remset", "code-roots");
4718 _out->print_cr(G1PPRL_LINE_PREFIX
4719 G1PPRL_TYPE_H_FORMAT
4720 G1PPRL_ADDR_BASE_H_FORMAT
4721 G1PPRL_BYTE_H_FORMAT
4722 G1PPRL_BYTE_H_FORMAT
4723 G1PPRL_BYTE_H_FORMAT
4724 G1PPRL_DOUBLE_H_FORMAT
4725 G1PPRL_BYTE_H_FORMAT
4726 G1PPRL_BYTE_H_FORMAT,
4727 "", "",
4728 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4729 "(bytes)", "(bytes)");
4730 }
4732 // It takes as a parameter a reference to one of the _hum_* fields, it
4733 // deduces the corresponding value for a region in a humongous region
4734 // series (either the region size, or what's left if the _hum_* field
4735 // is < the region size), and updates the _hum_* field accordingly.
4736 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4737 size_t bytes = 0;
4738 // The > 0 check is to deal with the prev and next live bytes which
4739 // could be 0.
4740 if (*hum_bytes > 0) {
4741 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4742 *hum_bytes -= bytes;
4743 }
4744 return bytes;
4745 }
4747 // It deduces the values for a region in a humongous region series
4748 // from the _hum_* fields and updates those accordingly. It assumes
4749 // that that _hum_* fields have already been set up from the "starts
4750 // humongous" region and we visit the regions in address order.
4751 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4752 size_t* capacity_bytes,
4753 size_t* prev_live_bytes,
4754 size_t* next_live_bytes) {
4755 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4756 *used_bytes = get_hum_bytes(&_hum_used_bytes);
4757 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
4758 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4759 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4760 }
4762 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4763 const char* type = "";
4764 HeapWord* bottom = r->bottom();
4765 HeapWord* end = r->end();
4766 size_t capacity_bytes = r->capacity();
4767 size_t used_bytes = r->used();
4768 size_t prev_live_bytes = r->live_bytes();
4769 size_t next_live_bytes = r->next_live_bytes();
4770 double gc_eff = r->gc_efficiency();
4771 size_t remset_bytes = r->rem_set()->mem_size();
4772 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4774 if (r->used() == 0) {
4775 type = "FREE";
4776 } else if (r->is_survivor()) {
4777 type = "SURV";
4778 } else if (r->is_young()) {
4779 type = "EDEN";
4780 } else if (r->startsHumongous()) {
4781 type = "HUMS";
4783 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4784 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4785 "they should have been zeroed after the last time we used them");
4786 // Set up the _hum_* fields.
4787 _hum_capacity_bytes = capacity_bytes;
4788 _hum_used_bytes = used_bytes;
4789 _hum_prev_live_bytes = prev_live_bytes;
4790 _hum_next_live_bytes = next_live_bytes;
4791 get_hum_bytes(&used_bytes, &capacity_bytes,
4792 &prev_live_bytes, &next_live_bytes);
4793 end = bottom + HeapRegion::GrainWords;
4794 } else if (r->continuesHumongous()) {
4795 type = "HUMC";
4796 get_hum_bytes(&used_bytes, &capacity_bytes,
4797 &prev_live_bytes, &next_live_bytes);
4798 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4799 } else {
4800 type = "OLD";
4801 }
4803 _total_used_bytes += used_bytes;
4804 _total_capacity_bytes += capacity_bytes;
4805 _total_prev_live_bytes += prev_live_bytes;
4806 _total_next_live_bytes += next_live_bytes;
4807 _total_remset_bytes += remset_bytes;
4808 _total_strong_code_roots_bytes += strong_code_roots_bytes;
4810 // Print a line for this particular region.
4811 _out->print_cr(G1PPRL_LINE_PREFIX
4812 G1PPRL_TYPE_FORMAT
4813 G1PPRL_ADDR_BASE_FORMAT
4814 G1PPRL_BYTE_FORMAT
4815 G1PPRL_BYTE_FORMAT
4816 G1PPRL_BYTE_FORMAT
4817 G1PPRL_DOUBLE_FORMAT
4818 G1PPRL_BYTE_FORMAT
4819 G1PPRL_BYTE_FORMAT,
4820 type, p2i(bottom), p2i(end),
4821 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4822 remset_bytes, strong_code_roots_bytes);
4824 return false;
4825 }
4827 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4828 // add static memory usages to remembered set sizes
4829 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4830 // Print the footer of the output.
4831 _out->print_cr(G1PPRL_LINE_PREFIX);
4832 _out->print_cr(G1PPRL_LINE_PREFIX
4833 " SUMMARY"
4834 G1PPRL_SUM_MB_FORMAT("capacity")
4835 G1PPRL_SUM_MB_PERC_FORMAT("used")
4836 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4837 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4838 G1PPRL_SUM_MB_FORMAT("remset")
4839 G1PPRL_SUM_MB_FORMAT("code-roots"),
4840 bytes_to_mb(_total_capacity_bytes),
4841 bytes_to_mb(_total_used_bytes),
4842 perc(_total_used_bytes, _total_capacity_bytes),
4843 bytes_to_mb(_total_prev_live_bytes),
4844 perc(_total_prev_live_bytes, _total_capacity_bytes),
4845 bytes_to_mb(_total_next_live_bytes),
4846 perc(_total_next_live_bytes, _total_capacity_bytes),
4847 bytes_to_mb(_total_remset_bytes),
4848 bytes_to_mb(_total_strong_code_roots_bytes));
4849 _out->cr();
4850 }