Tue, 29 Apr 2014 09:33:20 +0200
7132678: G1: verify that the marking bitmaps have no marks for objects over TAMS
Reviewed-by: jmasa, tschatzl, ehelin
Contributed-by: tony.printezis@oracle.com, bengt.rutisson@oracle.com
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
35 #include "gc_implementation/g1/g1RemSet.hpp"
36 #include "gc_implementation/g1/heapRegion.inline.hpp"
37 #include "gc_implementation/g1/heapRegionRemSet.hpp"
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
39 #include "gc_implementation/shared/vmGCOperations.hpp"
40 #include "gc_implementation/shared/gcTimer.hpp"
41 #include "gc_implementation/shared/gcTrace.hpp"
42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 #include "memory/allocation.hpp"
44 #include "memory/genOopClosures.inline.hpp"
45 #include "memory/referencePolicy.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/prefetch.inline.hpp"
51 #include "services/memTracker.hpp"
53 // Concurrent marking bit map wrapper
55 CMBitMapRO::CMBitMapRO(int shifter) :
56 _bm(),
57 _shifter(shifter) {
58 _bmStartWord = 0;
59 _bmWordSize = 0;
60 }
62 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
63 const HeapWord* limit) const {
64 // First we must round addr *up* to a possible object boundary.
65 addr = (HeapWord*)align_size_up((intptr_t)addr,
66 HeapWordSize << _shifter);
67 size_t addrOffset = heapWordToOffset(addr);
68 if (limit == NULL) {
69 limit = _bmStartWord + _bmWordSize;
70 }
71 size_t limitOffset = heapWordToOffset(limit);
72 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
73 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
74 assert(nextAddr >= addr, "get_next_one postcondition");
75 assert(nextAddr == limit || isMarked(nextAddr),
76 "get_next_one postcondition");
77 return nextAddr;
78 }
80 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
81 const HeapWord* limit) const {
82 size_t addrOffset = heapWordToOffset(addr);
83 if (limit == NULL) {
84 limit = _bmStartWord + _bmWordSize;
85 }
86 size_t limitOffset = heapWordToOffset(limit);
87 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
88 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
89 assert(nextAddr >= addr, "get_next_one postcondition");
90 assert(nextAddr == limit || !isMarked(nextAddr),
91 "get_next_one postcondition");
92 return nextAddr;
93 }
95 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
96 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
97 return (int) (diff >> _shifter);
98 }
100 #ifndef PRODUCT
101 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
102 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
103 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
104 "size inconsistency");
105 return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
106 _bmWordSize == heap_rs.size()>>LogHeapWordSize;
107 }
108 #endif
110 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
111 _bm.print_on_error(st, prefix);
112 }
114 bool CMBitMap::allocate(ReservedSpace heap_rs) {
115 _bmStartWord = (HeapWord*)(heap_rs.base());
116 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
117 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
118 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
119 if (!brs.is_reserved()) {
120 warning("ConcurrentMark marking bit map allocation failure");
121 return false;
122 }
123 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
124 // For now we'll just commit all of the bit map up front.
125 // Later on we'll try to be more parsimonious with swap.
126 if (!_virtual_space.initialize(brs, brs.size())) {
127 warning("ConcurrentMark marking bit map backing store failure");
128 return false;
129 }
130 assert(_virtual_space.committed_size() == brs.size(),
131 "didn't reserve backing store for all of concurrent marking bit map?");
132 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
133 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
134 _bmWordSize, "inconsistency in bit map sizing");
135 _bm.set_size(_bmWordSize >> _shifter);
136 return true;
137 }
139 void CMBitMap::clearAll() {
140 _bm.clear();
141 return;
142 }
144 void CMBitMap::markRange(MemRegion mr) {
145 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
146 assert(!mr.is_empty(), "unexpected empty region");
147 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
148 ((HeapWord *) mr.end())),
149 "markRange memory region end is not card aligned");
150 // convert address range into offset range
151 _bm.at_put_range(heapWordToOffset(mr.start()),
152 heapWordToOffset(mr.end()), true);
153 }
155 void CMBitMap::clearRange(MemRegion mr) {
156 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
157 assert(!mr.is_empty(), "unexpected empty region");
158 // convert address range into offset range
159 _bm.at_put_range(heapWordToOffset(mr.start()),
160 heapWordToOffset(mr.end()), false);
161 }
163 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
164 HeapWord* end_addr) {
165 HeapWord* start = getNextMarkedWordAddress(addr);
166 start = MIN2(start, end_addr);
167 HeapWord* end = getNextUnmarkedWordAddress(start);
168 end = MIN2(end, end_addr);
169 assert(start <= end, "Consistency check");
170 MemRegion mr(start, end);
171 if (!mr.is_empty()) {
172 clearRange(mr);
173 }
174 return mr;
175 }
177 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
178 _base(NULL), _cm(cm)
179 #ifdef ASSERT
180 , _drain_in_progress(false)
181 , _drain_in_progress_yields(false)
182 #endif
183 {}
185 bool CMMarkStack::allocate(size_t capacity) {
186 // allocate a stack of the requisite depth
187 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
188 if (!rs.is_reserved()) {
189 warning("ConcurrentMark MarkStack allocation failure");
190 return false;
191 }
192 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
193 if (!_virtual_space.initialize(rs, rs.size())) {
194 warning("ConcurrentMark MarkStack backing store failure");
195 // Release the virtual memory reserved for the marking stack
196 rs.release();
197 return false;
198 }
199 assert(_virtual_space.committed_size() == rs.size(),
200 "Didn't reserve backing store for all of ConcurrentMark stack?");
201 _base = (oop*) _virtual_space.low();
202 setEmpty();
203 _capacity = (jint) capacity;
204 _saved_index = -1;
205 _should_expand = false;
206 NOT_PRODUCT(_max_depth = 0);
207 return true;
208 }
210 void CMMarkStack::expand() {
211 // Called, during remark, if we've overflown the marking stack during marking.
212 assert(isEmpty(), "stack should been emptied while handling overflow");
213 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
214 // Clear expansion flag
215 _should_expand = false;
216 if (_capacity == (jint) MarkStackSizeMax) {
217 if (PrintGCDetails && Verbose) {
218 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
219 }
220 return;
221 }
222 // Double capacity if possible
223 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
224 // Do not give up existing stack until we have managed to
225 // get the double capacity that we desired.
226 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
227 sizeof(oop)));
228 if (rs.is_reserved()) {
229 // Release the backing store associated with old stack
230 _virtual_space.release();
231 // Reinitialize virtual space for new stack
232 if (!_virtual_space.initialize(rs, rs.size())) {
233 fatal("Not enough swap for expanded marking stack capacity");
234 }
235 _base = (oop*)(_virtual_space.low());
236 _index = 0;
237 _capacity = new_capacity;
238 } else {
239 if (PrintGCDetails && Verbose) {
240 // Failed to double capacity, continue;
241 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
242 SIZE_FORMAT"K to " SIZE_FORMAT"K",
243 _capacity / K, new_capacity / K);
244 }
245 }
246 }
248 void CMMarkStack::set_should_expand() {
249 // If we're resetting the marking state because of an
250 // marking stack overflow, record that we should, if
251 // possible, expand the stack.
252 _should_expand = _cm->has_overflown();
253 }
255 CMMarkStack::~CMMarkStack() {
256 if (_base != NULL) {
257 _base = NULL;
258 _virtual_space.release();
259 }
260 }
262 void CMMarkStack::par_push(oop ptr) {
263 while (true) {
264 if (isFull()) {
265 _overflow = true;
266 return;
267 }
268 // Otherwise...
269 jint index = _index;
270 jint next_index = index+1;
271 jint res = Atomic::cmpxchg(next_index, &_index, index);
272 if (res == index) {
273 _base[index] = ptr;
274 // Note that we don't maintain this atomically. We could, but it
275 // doesn't seem necessary.
276 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
277 return;
278 }
279 // Otherwise, we need to try again.
280 }
281 }
283 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
284 while (true) {
285 if (isFull()) {
286 _overflow = true;
287 return;
288 }
289 // Otherwise...
290 jint index = _index;
291 jint next_index = index + n;
292 if (next_index > _capacity) {
293 _overflow = true;
294 return;
295 }
296 jint res = Atomic::cmpxchg(next_index, &_index, index);
297 if (res == index) {
298 for (int i = 0; i < n; i++) {
299 int ind = index + i;
300 assert(ind < _capacity, "By overflow test above.");
301 _base[ind] = ptr_arr[i];
302 }
303 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
304 return;
305 }
306 // Otherwise, we need to try again.
307 }
308 }
310 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
311 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
312 jint start = _index;
313 jint next_index = start + n;
314 if (next_index > _capacity) {
315 _overflow = true;
316 return;
317 }
318 // Otherwise.
319 _index = next_index;
320 for (int i = 0; i < n; i++) {
321 int ind = start + i;
322 assert(ind < _capacity, "By overflow test above.");
323 _base[ind] = ptr_arr[i];
324 }
325 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
326 }
328 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
329 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
330 jint index = _index;
331 if (index == 0) {
332 *n = 0;
333 return false;
334 } else {
335 int k = MIN2(max, index);
336 jint new_ind = index - k;
337 for (int j = 0; j < k; j++) {
338 ptr_arr[j] = _base[new_ind + j];
339 }
340 _index = new_ind;
341 *n = k;
342 return true;
343 }
344 }
346 template<class OopClosureClass>
347 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
348 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
349 || SafepointSynchronize::is_at_safepoint(),
350 "Drain recursion must be yield-safe.");
351 bool res = true;
352 debug_only(_drain_in_progress = true);
353 debug_only(_drain_in_progress_yields = yield_after);
354 while (!isEmpty()) {
355 oop newOop = pop();
356 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
357 assert(newOop->is_oop(), "Expected an oop");
358 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
359 "only grey objects on this stack");
360 newOop->oop_iterate(cl);
361 if (yield_after && _cm->do_yield_check()) {
362 res = false;
363 break;
364 }
365 }
366 debug_only(_drain_in_progress = false);
367 return res;
368 }
370 void CMMarkStack::note_start_of_gc() {
371 assert(_saved_index == -1,
372 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
373 _saved_index = _index;
374 }
376 void CMMarkStack::note_end_of_gc() {
377 // This is intentionally a guarantee, instead of an assert. If we
378 // accidentally add something to the mark stack during GC, it
379 // will be a correctness issue so it's better if we crash. we'll
380 // only check this once per GC anyway, so it won't be a performance
381 // issue in any way.
382 guarantee(_saved_index == _index,
383 err_msg("saved index: %d index: %d", _saved_index, _index));
384 _saved_index = -1;
385 }
387 void CMMarkStack::oops_do(OopClosure* f) {
388 assert(_saved_index == _index,
389 err_msg("saved index: %d index: %d", _saved_index, _index));
390 for (int i = 0; i < _index; i += 1) {
391 f->do_oop(&_base[i]);
392 }
393 }
395 bool ConcurrentMark::not_yet_marked(oop obj) const {
396 return _g1h->is_obj_ill(obj);
397 }
399 CMRootRegions::CMRootRegions() :
400 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
401 _should_abort(false), _next_survivor(NULL) { }
403 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
404 _young_list = g1h->young_list();
405 _cm = cm;
406 }
408 void CMRootRegions::prepare_for_scan() {
409 assert(!scan_in_progress(), "pre-condition");
411 // Currently, only survivors can be root regions.
412 assert(_next_survivor == NULL, "pre-condition");
413 _next_survivor = _young_list->first_survivor_region();
414 _scan_in_progress = (_next_survivor != NULL);
415 _should_abort = false;
416 }
418 HeapRegion* CMRootRegions::claim_next() {
419 if (_should_abort) {
420 // If someone has set the should_abort flag, we return NULL to
421 // force the caller to bail out of their loop.
422 return NULL;
423 }
425 // Currently, only survivors can be root regions.
426 HeapRegion* res = _next_survivor;
427 if (res != NULL) {
428 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
429 // Read it again in case it changed while we were waiting for the lock.
430 res = _next_survivor;
431 if (res != NULL) {
432 if (res == _young_list->last_survivor_region()) {
433 // We just claimed the last survivor so store NULL to indicate
434 // that we're done.
435 _next_survivor = NULL;
436 } else {
437 _next_survivor = res->get_next_young_region();
438 }
439 } else {
440 // Someone else claimed the last survivor while we were trying
441 // to take the lock so nothing else to do.
442 }
443 }
444 assert(res == NULL || res->is_survivor(), "post-condition");
446 return res;
447 }
449 void CMRootRegions::scan_finished() {
450 assert(scan_in_progress(), "pre-condition");
452 // Currently, only survivors can be root regions.
453 if (!_should_abort) {
454 assert(_next_survivor == NULL, "we should have claimed all survivors");
455 }
456 _next_survivor = NULL;
458 {
459 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
460 _scan_in_progress = false;
461 RootRegionScan_lock->notify_all();
462 }
463 }
465 bool CMRootRegions::wait_until_scan_finished() {
466 if (!scan_in_progress()) return false;
468 {
469 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
470 while (scan_in_progress()) {
471 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
472 }
473 }
474 return true;
475 }
477 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
478 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
479 #endif // _MSC_VER
481 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
482 return MAX2((n_par_threads + 2) / 4, 1U);
483 }
485 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
486 _g1h(g1h),
487 _markBitMap1(log2_intptr(MinObjAlignment)),
488 _markBitMap2(log2_intptr(MinObjAlignment)),
489 _parallel_marking_threads(0),
490 _max_parallel_marking_threads(0),
491 _sleep_factor(0.0),
492 _marking_task_overhead(1.0),
493 _cleanup_sleep_factor(0.0),
494 _cleanup_task_overhead(1.0),
495 _cleanup_list("Cleanup List"),
496 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
497 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
498 CardTableModRefBS::card_shift,
499 false /* in_resource_area*/),
501 _prevMarkBitMap(&_markBitMap1),
502 _nextMarkBitMap(&_markBitMap2),
504 _markStack(this),
505 // _finger set in set_non_marking_state
507 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
508 // _active_tasks set in set_non_marking_state
509 // _tasks set inside the constructor
510 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
511 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
513 _has_overflown(false),
514 _concurrent(false),
515 _has_aborted(false),
516 _aborted_gc_id(GCId::undefined()),
517 _restart_for_overflow(false),
518 _concurrent_marking_in_progress(false),
520 // _verbose_level set below
522 _init_times(),
523 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
524 _cleanup_times(),
525 _total_counting_time(0.0),
526 _total_rs_scrub_time(0.0),
528 _parallel_workers(NULL),
530 _count_card_bitmaps(NULL),
531 _count_marked_bytes(NULL),
532 _completed_initialization(false) {
533 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
534 if (verbose_level < no_verbose) {
535 verbose_level = no_verbose;
536 }
537 if (verbose_level > high_verbose) {
538 verbose_level = high_verbose;
539 }
540 _verbose_level = verbose_level;
542 if (verbose_low()) {
543 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
544 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
545 }
547 if (!_markBitMap1.allocate(heap_rs)) {
548 warning("Failed to allocate first CM bit map");
549 return;
550 }
551 if (!_markBitMap2.allocate(heap_rs)) {
552 warning("Failed to allocate second CM bit map");
553 return;
554 }
556 // Create & start a ConcurrentMark thread.
557 _cmThread = new ConcurrentMarkThread(this);
558 assert(cmThread() != NULL, "CM Thread should have been created");
559 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
560 if (_cmThread->osthread() == NULL) {
561 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
562 }
564 assert(CGC_lock != NULL, "Where's the CGC_lock?");
565 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
566 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
568 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
569 satb_qs.set_buffer_size(G1SATBBufferSize);
571 _root_regions.init(_g1h, this);
573 if (ConcGCThreads > ParallelGCThreads) {
574 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
575 "than ParallelGCThreads (" UINTX_FORMAT ").",
576 ConcGCThreads, ParallelGCThreads);
577 return;
578 }
579 if (ParallelGCThreads == 0) {
580 // if we are not running with any parallel GC threads we will not
581 // spawn any marking threads either
582 _parallel_marking_threads = 0;
583 _max_parallel_marking_threads = 0;
584 _sleep_factor = 0.0;
585 _marking_task_overhead = 1.0;
586 } else {
587 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
588 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
589 // if both are set
590 _sleep_factor = 0.0;
591 _marking_task_overhead = 1.0;
592 } else if (G1MarkingOverheadPercent > 0) {
593 // We will calculate the number of parallel marking threads based
594 // on a target overhead with respect to the soft real-time goal
595 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
596 double overall_cm_overhead =
597 (double) MaxGCPauseMillis * marking_overhead /
598 (double) GCPauseIntervalMillis;
599 double cpu_ratio = 1.0 / (double) os::processor_count();
600 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
601 double marking_task_overhead =
602 overall_cm_overhead / marking_thread_num *
603 (double) os::processor_count();
604 double sleep_factor =
605 (1.0 - marking_task_overhead) / marking_task_overhead;
607 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
608 _sleep_factor = sleep_factor;
609 _marking_task_overhead = marking_task_overhead;
610 } else {
611 // Calculate the number of parallel marking threads by scaling
612 // the number of parallel GC threads.
613 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
614 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
615 _sleep_factor = 0.0;
616 _marking_task_overhead = 1.0;
617 }
619 assert(ConcGCThreads > 0, "Should have been set");
620 _parallel_marking_threads = (uint) ConcGCThreads;
621 _max_parallel_marking_threads = _parallel_marking_threads;
623 if (parallel_marking_threads() > 1) {
624 _cleanup_task_overhead = 1.0;
625 } else {
626 _cleanup_task_overhead = marking_task_overhead();
627 }
628 _cleanup_sleep_factor =
629 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
631 #if 0
632 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
633 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
634 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
635 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
636 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
637 #endif
639 guarantee(parallel_marking_threads() > 0, "peace of mind");
640 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
641 _max_parallel_marking_threads, false, true);
642 if (_parallel_workers == NULL) {
643 vm_exit_during_initialization("Failed necessary allocation.");
644 } else {
645 _parallel_workers->initialize_workers();
646 }
647 }
649 if (FLAG_IS_DEFAULT(MarkStackSize)) {
650 uintx mark_stack_size =
651 MIN2(MarkStackSizeMax,
652 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
653 // Verify that the calculated value for MarkStackSize is in range.
654 // It would be nice to use the private utility routine from Arguments.
655 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
656 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
657 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
658 mark_stack_size, (uintx) 1, MarkStackSizeMax);
659 return;
660 }
661 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
662 } else {
663 // Verify MarkStackSize is in range.
664 if (FLAG_IS_CMDLINE(MarkStackSize)) {
665 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
666 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
667 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
668 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
669 MarkStackSize, (uintx) 1, MarkStackSizeMax);
670 return;
671 }
672 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
673 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
674 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
675 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
676 MarkStackSize, MarkStackSizeMax);
677 return;
678 }
679 }
680 }
681 }
683 if (!_markStack.allocate(MarkStackSize)) {
684 warning("Failed to allocate CM marking stack");
685 return;
686 }
688 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
689 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
691 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
692 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
694 BitMap::idx_t card_bm_size = _card_bm.size();
696 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
697 _active_tasks = _max_worker_id;
699 size_t max_regions = (size_t) _g1h->max_regions();
700 for (uint i = 0; i < _max_worker_id; ++i) {
701 CMTaskQueue* task_queue = new CMTaskQueue();
702 task_queue->initialize();
703 _task_queues->register_queue(i, task_queue);
705 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
706 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
708 _tasks[i] = new CMTask(i, this,
709 _count_marked_bytes[i],
710 &_count_card_bitmaps[i],
711 task_queue, _task_queues);
713 _accum_task_vtime[i] = 0.0;
714 }
716 // Calculate the card number for the bottom of the heap. Used
717 // in biasing indexes into the accounting card bitmaps.
718 _heap_bottom_card_num =
719 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
720 CardTableModRefBS::card_shift);
722 // Clear all the liveness counting data
723 clear_all_count_data();
725 // so that the call below can read a sensible value
726 _heap_start = (HeapWord*) heap_rs.base();
727 set_non_marking_state();
728 _completed_initialization = true;
729 }
731 void ConcurrentMark::update_g1_committed(bool force) {
732 // If concurrent marking is not in progress, then we do not need to
733 // update _heap_end.
734 if (!concurrent_marking_in_progress() && !force) return;
736 MemRegion committed = _g1h->g1_committed();
737 assert(committed.start() == _heap_start, "start shouldn't change");
738 HeapWord* new_end = committed.end();
739 if (new_end > _heap_end) {
740 // The heap has been expanded.
742 _heap_end = new_end;
743 }
744 // Notice that the heap can also shrink. However, this only happens
745 // during a Full GC (at least currently) and the entire marking
746 // phase will bail out and the task will not be restarted. So, let's
747 // do nothing.
748 }
750 void ConcurrentMark::reset() {
751 // Starting values for these two. This should be called in a STW
752 // phase. CM will be notified of any future g1_committed expansions
753 // will be at the end of evacuation pauses, when tasks are
754 // inactive.
755 MemRegion committed = _g1h->g1_committed();
756 _heap_start = committed.start();
757 _heap_end = committed.end();
759 // Separated the asserts so that we know which one fires.
760 assert(_heap_start != NULL, "heap bounds should look ok");
761 assert(_heap_end != NULL, "heap bounds should look ok");
762 assert(_heap_start < _heap_end, "heap bounds should look ok");
764 // Reset all the marking data structures and any necessary flags
765 reset_marking_state();
767 if (verbose_low()) {
768 gclog_or_tty->print_cr("[global] resetting");
769 }
771 // We do reset all of them, since different phases will use
772 // different number of active threads. So, it's easiest to have all
773 // of them ready.
774 for (uint i = 0; i < _max_worker_id; ++i) {
775 _tasks[i]->reset(_nextMarkBitMap);
776 }
778 // we need this to make sure that the flag is on during the evac
779 // pause with initial mark piggy-backed
780 set_concurrent_marking_in_progress();
781 }
784 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
785 _markStack.set_should_expand();
786 _markStack.setEmpty(); // Also clears the _markStack overflow flag
787 if (clear_overflow) {
788 clear_has_overflown();
789 } else {
790 assert(has_overflown(), "pre-condition");
791 }
792 _finger = _heap_start;
794 for (uint i = 0; i < _max_worker_id; ++i) {
795 CMTaskQueue* queue = _task_queues->queue(i);
796 queue->set_empty();
797 }
798 }
800 void ConcurrentMark::set_concurrency(uint active_tasks) {
801 assert(active_tasks <= _max_worker_id, "we should not have more");
803 _active_tasks = active_tasks;
804 // Need to update the three data structures below according to the
805 // number of active threads for this phase.
806 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
807 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
808 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
809 }
811 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
812 set_concurrency(active_tasks);
814 _concurrent = concurrent;
815 // We propagate this to all tasks, not just the active ones.
816 for (uint i = 0; i < _max_worker_id; ++i)
817 _tasks[i]->set_concurrent(concurrent);
819 if (concurrent) {
820 set_concurrent_marking_in_progress();
821 } else {
822 // We currently assume that the concurrent flag has been set to
823 // false before we start remark. At this point we should also be
824 // in a STW phase.
825 assert(!concurrent_marking_in_progress(), "invariant");
826 assert(out_of_regions(),
827 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
828 p2i(_finger), p2i(_heap_end)));
829 update_g1_committed(true);
830 }
831 }
833 void ConcurrentMark::set_non_marking_state() {
834 // We set the global marking state to some default values when we're
835 // not doing marking.
836 reset_marking_state();
837 _active_tasks = 0;
838 clear_concurrent_marking_in_progress();
839 }
841 ConcurrentMark::~ConcurrentMark() {
842 // The ConcurrentMark instance is never freed.
843 ShouldNotReachHere();
844 }
846 void ConcurrentMark::clearNextBitmap() {
847 G1CollectedHeap* g1h = G1CollectedHeap::heap();
848 G1CollectorPolicy* g1p = g1h->g1_policy();
850 // Make sure that the concurrent mark thread looks to still be in
851 // the current cycle.
852 guarantee(cmThread()->during_cycle(), "invariant");
854 // We are finishing up the current cycle by clearing the next
855 // marking bitmap and getting it ready for the next cycle. During
856 // this time no other cycle can start. So, let's make sure that this
857 // is the case.
858 guarantee(!g1h->mark_in_progress(), "invariant");
860 // clear the mark bitmap (no grey objects to start with).
861 // We need to do this in chunks and offer to yield in between
862 // each chunk.
863 HeapWord* start = _nextMarkBitMap->startWord();
864 HeapWord* end = _nextMarkBitMap->endWord();
865 HeapWord* cur = start;
866 size_t chunkSize = M;
867 while (cur < end) {
868 HeapWord* next = cur + chunkSize;
869 if (next > end) {
870 next = end;
871 }
872 MemRegion mr(cur,next);
873 _nextMarkBitMap->clearRange(mr);
874 cur = next;
875 do_yield_check();
877 // Repeat the asserts from above. We'll do them as asserts here to
878 // minimize their overhead on the product. However, we'll have
879 // them as guarantees at the beginning / end of the bitmap
880 // clearing to get some checking in the product.
881 assert(cmThread()->during_cycle(), "invariant");
882 assert(!g1h->mark_in_progress(), "invariant");
883 }
885 // Clear the liveness counting data
886 clear_all_count_data();
888 // Repeat the asserts from above.
889 guarantee(cmThread()->during_cycle(), "invariant");
890 guarantee(!g1h->mark_in_progress(), "invariant");
891 }
893 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
894 public:
895 bool doHeapRegion(HeapRegion* r) {
896 if (!r->continuesHumongous()) {
897 r->note_start_of_marking();
898 }
899 return false;
900 }
901 };
903 void ConcurrentMark::checkpointRootsInitialPre() {
904 G1CollectedHeap* g1h = G1CollectedHeap::heap();
905 G1CollectorPolicy* g1p = g1h->g1_policy();
907 _has_aborted = false;
909 #ifndef PRODUCT
910 if (G1PrintReachableAtInitialMark) {
911 print_reachable("at-cycle-start",
912 VerifyOption_G1UsePrevMarking, true /* all */);
913 }
914 #endif
916 // Initialise marking structures. This has to be done in a STW phase.
917 reset();
919 // For each region note start of marking.
920 NoteStartOfMarkHRClosure startcl;
921 g1h->heap_region_iterate(&startcl);
922 }
925 void ConcurrentMark::checkpointRootsInitialPost() {
926 G1CollectedHeap* g1h = G1CollectedHeap::heap();
928 // If we force an overflow during remark, the remark operation will
929 // actually abort and we'll restart concurrent marking. If we always
930 // force an oveflow during remark we'll never actually complete the
931 // marking phase. So, we initilize this here, at the start of the
932 // cycle, so that at the remaining overflow number will decrease at
933 // every remark and we'll eventually not need to cause one.
934 force_overflow_stw()->init();
936 // Start Concurrent Marking weak-reference discovery.
937 ReferenceProcessor* rp = g1h->ref_processor_cm();
938 // enable ("weak") refs discovery
939 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
940 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
942 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
943 // This is the start of the marking cycle, we're expected all
944 // threads to have SATB queues with active set to false.
945 satb_mq_set.set_active_all_threads(true, /* new active value */
946 false /* expected_active */);
948 _root_regions.prepare_for_scan();
950 // update_g1_committed() will be called at the end of an evac pause
951 // when marking is on. So, it's also called at the end of the
952 // initial-mark pause to update the heap end, if the heap expands
953 // during it. No need to call it here.
954 }
956 /*
957 * Notice that in the next two methods, we actually leave the STS
958 * during the barrier sync and join it immediately afterwards. If we
959 * do not do this, the following deadlock can occur: one thread could
960 * be in the barrier sync code, waiting for the other thread to also
961 * sync up, whereas another one could be trying to yield, while also
962 * waiting for the other threads to sync up too.
963 *
964 * Note, however, that this code is also used during remark and in
965 * this case we should not attempt to leave / enter the STS, otherwise
966 * we'll either hit an asseert (debug / fastdebug) or deadlock
967 * (product). So we should only leave / enter the STS if we are
968 * operating concurrently.
969 *
970 * Because the thread that does the sync barrier has left the STS, it
971 * is possible to be suspended for a Full GC or an evacuation pause
972 * could occur. This is actually safe, since the entering the sync
973 * barrier is one of the last things do_marking_step() does, and it
974 * doesn't manipulate any data structures afterwards.
975 */
977 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
978 if (verbose_low()) {
979 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
980 }
982 if (concurrent()) {
983 SuspendibleThreadSet::leave();
984 }
986 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
988 if (concurrent()) {
989 SuspendibleThreadSet::join();
990 }
991 // at this point everyone should have synced up and not be doing any
992 // more work
994 if (verbose_low()) {
995 if (barrier_aborted) {
996 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
997 } else {
998 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
999 }
1000 }
1002 if (barrier_aborted) {
1003 // If the barrier aborted we ignore the overflow condition and
1004 // just abort the whole marking phase as quickly as possible.
1005 return;
1006 }
1008 // If we're executing the concurrent phase of marking, reset the marking
1009 // state; otherwise the marking state is reset after reference processing,
1010 // during the remark pause.
1011 // If we reset here as a result of an overflow during the remark we will
1012 // see assertion failures from any subsequent set_concurrency_and_phase()
1013 // calls.
1014 if (concurrent()) {
1015 // let the task associated with with worker 0 do this
1016 if (worker_id == 0) {
1017 // task 0 is responsible for clearing the global data structures
1018 // We should be here because of an overflow. During STW we should
1019 // not clear the overflow flag since we rely on it being true when
1020 // we exit this method to abort the pause and restart concurent
1021 // marking.
1022 reset_marking_state(true /* clear_overflow */);
1023 force_overflow()->update();
1025 if (G1Log::fine()) {
1026 gclog_or_tty->gclog_stamp(concurrent_gc_id());
1027 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1028 }
1029 }
1030 }
1032 // after this, each task should reset its own data structures then
1033 // then go into the second barrier
1034 }
1036 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1037 if (verbose_low()) {
1038 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1039 }
1041 if (concurrent()) {
1042 SuspendibleThreadSet::leave();
1043 }
1045 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1047 if (concurrent()) {
1048 SuspendibleThreadSet::join();
1049 }
1050 // at this point everything should be re-initialized and ready to go
1052 if (verbose_low()) {
1053 if (barrier_aborted) {
1054 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1055 } else {
1056 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1057 }
1058 }
1059 }
1061 #ifndef PRODUCT
1062 void ForceOverflowSettings::init() {
1063 _num_remaining = G1ConcMarkForceOverflow;
1064 _force = false;
1065 update();
1066 }
1068 void ForceOverflowSettings::update() {
1069 if (_num_remaining > 0) {
1070 _num_remaining -= 1;
1071 _force = true;
1072 } else {
1073 _force = false;
1074 }
1075 }
1077 bool ForceOverflowSettings::should_force() {
1078 if (_force) {
1079 _force = false;
1080 return true;
1081 } else {
1082 return false;
1083 }
1084 }
1085 #endif // !PRODUCT
1087 class CMConcurrentMarkingTask: public AbstractGangTask {
1088 private:
1089 ConcurrentMark* _cm;
1090 ConcurrentMarkThread* _cmt;
1092 public:
1093 void work(uint worker_id) {
1094 assert(Thread::current()->is_ConcurrentGC_thread(),
1095 "this should only be done by a conc GC thread");
1096 ResourceMark rm;
1098 double start_vtime = os::elapsedVTime();
1100 SuspendibleThreadSet::join();
1102 assert(worker_id < _cm->active_tasks(), "invariant");
1103 CMTask* the_task = _cm->task(worker_id);
1104 the_task->record_start_time();
1105 if (!_cm->has_aborted()) {
1106 do {
1107 double start_vtime_sec = os::elapsedVTime();
1108 double start_time_sec = os::elapsedTime();
1109 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1111 the_task->do_marking_step(mark_step_duration_ms,
1112 true /* do_termination */,
1113 false /* is_serial*/);
1115 double end_time_sec = os::elapsedTime();
1116 double end_vtime_sec = os::elapsedVTime();
1117 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1118 double elapsed_time_sec = end_time_sec - start_time_sec;
1119 _cm->clear_has_overflown();
1121 bool ret = _cm->do_yield_check(worker_id);
1123 jlong sleep_time_ms;
1124 if (!_cm->has_aborted() && the_task->has_aborted()) {
1125 sleep_time_ms =
1126 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1127 SuspendibleThreadSet::leave();
1128 os::sleep(Thread::current(), sleep_time_ms, false);
1129 SuspendibleThreadSet::join();
1130 }
1131 double end_time2_sec = os::elapsedTime();
1132 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1134 #if 0
1135 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1136 "overhead %1.4lf",
1137 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1138 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1139 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1140 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1141 #endif
1142 } while (!_cm->has_aborted() && the_task->has_aborted());
1143 }
1144 the_task->record_end_time();
1145 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1147 SuspendibleThreadSet::leave();
1149 double end_vtime = os::elapsedVTime();
1150 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1151 }
1153 CMConcurrentMarkingTask(ConcurrentMark* cm,
1154 ConcurrentMarkThread* cmt) :
1155 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1157 ~CMConcurrentMarkingTask() { }
1158 };
1160 // Calculates the number of active workers for a concurrent
1161 // phase.
1162 uint ConcurrentMark::calc_parallel_marking_threads() {
1163 if (G1CollectedHeap::use_parallel_gc_threads()) {
1164 uint n_conc_workers = 0;
1165 if (!UseDynamicNumberOfGCThreads ||
1166 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1167 !ForceDynamicNumberOfGCThreads)) {
1168 n_conc_workers = max_parallel_marking_threads();
1169 } else {
1170 n_conc_workers =
1171 AdaptiveSizePolicy::calc_default_active_workers(
1172 max_parallel_marking_threads(),
1173 1, /* Minimum workers */
1174 parallel_marking_threads(),
1175 Threads::number_of_non_daemon_threads());
1176 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1177 // that scaling has already gone into "_max_parallel_marking_threads".
1178 }
1179 assert(n_conc_workers > 0, "Always need at least 1");
1180 return n_conc_workers;
1181 }
1182 // If we are not running with any parallel GC threads we will not
1183 // have spawned any marking threads either. Hence the number of
1184 // concurrent workers should be 0.
1185 return 0;
1186 }
1188 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1189 // Currently, only survivors can be root regions.
1190 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1191 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1193 const uintx interval = PrefetchScanIntervalInBytes;
1194 HeapWord* curr = hr->bottom();
1195 const HeapWord* end = hr->top();
1196 while (curr < end) {
1197 Prefetch::read(curr, interval);
1198 oop obj = oop(curr);
1199 int size = obj->oop_iterate(&cl);
1200 assert(size == obj->size(), "sanity");
1201 curr += size;
1202 }
1203 }
1205 class CMRootRegionScanTask : public AbstractGangTask {
1206 private:
1207 ConcurrentMark* _cm;
1209 public:
1210 CMRootRegionScanTask(ConcurrentMark* cm) :
1211 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1213 void work(uint worker_id) {
1214 assert(Thread::current()->is_ConcurrentGC_thread(),
1215 "this should only be done by a conc GC thread");
1217 CMRootRegions* root_regions = _cm->root_regions();
1218 HeapRegion* hr = root_regions->claim_next();
1219 while (hr != NULL) {
1220 _cm->scanRootRegion(hr, worker_id);
1221 hr = root_regions->claim_next();
1222 }
1223 }
1224 };
1226 void ConcurrentMark::scanRootRegions() {
1227 // Start of concurrent marking.
1228 ClassLoaderDataGraph::clear_claimed_marks();
1230 // scan_in_progress() will have been set to true only if there was
1231 // at least one root region to scan. So, if it's false, we
1232 // should not attempt to do any further work.
1233 if (root_regions()->scan_in_progress()) {
1234 _parallel_marking_threads = calc_parallel_marking_threads();
1235 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1236 "Maximum number of marking threads exceeded");
1237 uint active_workers = MAX2(1U, parallel_marking_threads());
1239 CMRootRegionScanTask task(this);
1240 if (use_parallel_marking_threads()) {
1241 _parallel_workers->set_active_workers((int) active_workers);
1242 _parallel_workers->run_task(&task);
1243 } else {
1244 task.work(0);
1245 }
1247 // It's possible that has_aborted() is true here without actually
1248 // aborting the survivor scan earlier. This is OK as it's
1249 // mainly used for sanity checking.
1250 root_regions()->scan_finished();
1251 }
1252 }
1254 void ConcurrentMark::markFromRoots() {
1255 // we might be tempted to assert that:
1256 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1257 // "inconsistent argument?");
1258 // However that wouldn't be right, because it's possible that
1259 // a safepoint is indeed in progress as a younger generation
1260 // stop-the-world GC happens even as we mark in this generation.
1262 _restart_for_overflow = false;
1263 force_overflow_conc()->init();
1265 // _g1h has _n_par_threads
1266 _parallel_marking_threads = calc_parallel_marking_threads();
1267 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1268 "Maximum number of marking threads exceeded");
1270 uint active_workers = MAX2(1U, parallel_marking_threads());
1272 // Parallel task terminator is set in "set_concurrency_and_phase()"
1273 set_concurrency_and_phase(active_workers, true /* concurrent */);
1275 CMConcurrentMarkingTask markingTask(this, cmThread());
1276 if (use_parallel_marking_threads()) {
1277 _parallel_workers->set_active_workers((int)active_workers);
1278 // Don't set _n_par_threads because it affects MT in process_roots()
1279 // and the decisions on that MT processing is made elsewhere.
1280 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1281 _parallel_workers->run_task(&markingTask);
1282 } else {
1283 markingTask.work(0);
1284 }
1285 print_stats();
1286 }
1288 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1289 // world is stopped at this checkpoint
1290 assert(SafepointSynchronize::is_at_safepoint(),
1291 "world should be stopped");
1293 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1295 // If a full collection has happened, we shouldn't do this.
1296 if (has_aborted()) {
1297 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1298 return;
1299 }
1301 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1303 if (VerifyDuringGC) {
1304 HandleMark hm; // handle scope
1305 Universe::heap()->prepare_for_verify();
1306 Universe::verify(VerifyOption_G1UsePrevMarking,
1307 " VerifyDuringGC:(before)");
1308 }
1309 g1h->check_bitmaps("Remark Start");
1311 G1CollectorPolicy* g1p = g1h->g1_policy();
1312 g1p->record_concurrent_mark_remark_start();
1314 double start = os::elapsedTime();
1316 checkpointRootsFinalWork();
1318 double mark_work_end = os::elapsedTime();
1320 weakRefsWork(clear_all_soft_refs);
1322 if (has_overflown()) {
1323 // Oops. We overflowed. Restart concurrent marking.
1324 _restart_for_overflow = true;
1325 if (G1TraceMarkStackOverflow) {
1326 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1327 }
1329 // Verify the heap w.r.t. the previous marking bitmap.
1330 if (VerifyDuringGC) {
1331 HandleMark hm; // handle scope
1332 Universe::heap()->prepare_for_verify();
1333 Universe::verify(VerifyOption_G1UsePrevMarking,
1334 " VerifyDuringGC:(overflow)");
1335 }
1337 // Clear the marking state because we will be restarting
1338 // marking due to overflowing the global mark stack.
1339 reset_marking_state();
1340 } else {
1341 // Aggregate the per-task counting data that we have accumulated
1342 // while marking.
1343 aggregate_count_data();
1345 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1346 // We're done with marking.
1347 // This is the end of the marking cycle, we're expected all
1348 // threads to have SATB queues with active set to true.
1349 satb_mq_set.set_active_all_threads(false, /* new active value */
1350 true /* expected_active */);
1352 if (VerifyDuringGC) {
1353 HandleMark hm; // handle scope
1354 Universe::heap()->prepare_for_verify();
1355 Universe::verify(VerifyOption_G1UseNextMarking,
1356 " VerifyDuringGC:(after)");
1357 }
1358 g1h->check_bitmaps("Remark End");
1359 assert(!restart_for_overflow(), "sanity");
1360 // Completely reset the marking state since marking completed
1361 set_non_marking_state();
1362 }
1364 // Expand the marking stack, if we have to and if we can.
1365 if (_markStack.should_expand()) {
1366 _markStack.expand();
1367 }
1369 // Statistics
1370 double now = os::elapsedTime();
1371 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1372 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1373 _remark_times.add((now - start) * 1000.0);
1375 g1p->record_concurrent_mark_remark_end();
1377 G1CMIsAliveClosure is_alive(g1h);
1378 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1379 }
1381 // Base class of the closures that finalize and verify the
1382 // liveness counting data.
1383 class CMCountDataClosureBase: public HeapRegionClosure {
1384 protected:
1385 G1CollectedHeap* _g1h;
1386 ConcurrentMark* _cm;
1387 CardTableModRefBS* _ct_bs;
1389 BitMap* _region_bm;
1390 BitMap* _card_bm;
1392 // Takes a region that's not empty (i.e., it has at least one
1393 // live object in it and sets its corresponding bit on the region
1394 // bitmap to 1. If the region is "starts humongous" it will also set
1395 // to 1 the bits on the region bitmap that correspond to its
1396 // associated "continues humongous" regions.
1397 void set_bit_for_region(HeapRegion* hr) {
1398 assert(!hr->continuesHumongous(), "should have filtered those out");
1400 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1401 if (!hr->startsHumongous()) {
1402 // Normal (non-humongous) case: just set the bit.
1403 _region_bm->par_at_put(index, true);
1404 } else {
1405 // Starts humongous case: calculate how many regions are part of
1406 // this humongous region and then set the bit range.
1407 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1408 _region_bm->par_at_put_range(index, end_index, true);
1409 }
1410 }
1412 public:
1413 CMCountDataClosureBase(G1CollectedHeap* g1h,
1414 BitMap* region_bm, BitMap* card_bm):
1415 _g1h(g1h), _cm(g1h->concurrent_mark()),
1416 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1417 _region_bm(region_bm), _card_bm(card_bm) { }
1418 };
1420 // Closure that calculates the # live objects per region. Used
1421 // for verification purposes during the cleanup pause.
1422 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1423 CMBitMapRO* _bm;
1424 size_t _region_marked_bytes;
1426 public:
1427 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1428 BitMap* region_bm, BitMap* card_bm) :
1429 CMCountDataClosureBase(g1h, region_bm, card_bm),
1430 _bm(bm), _region_marked_bytes(0) { }
1432 bool doHeapRegion(HeapRegion* hr) {
1434 if (hr->continuesHumongous()) {
1435 // We will ignore these here and process them when their
1436 // associated "starts humongous" region is processed (see
1437 // set_bit_for_heap_region()). Note that we cannot rely on their
1438 // associated "starts humongous" region to have their bit set to
1439 // 1 since, due to the region chunking in the parallel region
1440 // iteration, a "continues humongous" region might be visited
1441 // before its associated "starts humongous".
1442 return false;
1443 }
1445 HeapWord* ntams = hr->next_top_at_mark_start();
1446 HeapWord* start = hr->bottom();
1448 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1449 err_msg("Preconditions not met - "
1450 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1451 p2i(start), p2i(ntams), p2i(hr->end())));
1453 // Find the first marked object at or after "start".
1454 start = _bm->getNextMarkedWordAddress(start, ntams);
1456 size_t marked_bytes = 0;
1458 while (start < ntams) {
1459 oop obj = oop(start);
1460 int obj_sz = obj->size();
1461 HeapWord* obj_end = start + obj_sz;
1463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1464 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1466 // Note: if we're looking at the last region in heap - obj_end
1467 // could be actually just beyond the end of the heap; end_idx
1468 // will then correspond to a (non-existent) card that is also
1469 // just beyond the heap.
1470 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1471 // end of object is not card aligned - increment to cover
1472 // all the cards spanned by the object
1473 end_idx += 1;
1474 }
1476 // Set the bits in the card BM for the cards spanned by this object.
1477 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1479 // Add the size of this object to the number of marked bytes.
1480 marked_bytes += (size_t)obj_sz * HeapWordSize;
1482 // Find the next marked object after this one.
1483 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1484 }
1486 // Mark the allocated-since-marking portion...
1487 HeapWord* top = hr->top();
1488 if (ntams < top) {
1489 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1490 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1492 // Note: if we're looking at the last region in heap - top
1493 // could be actually just beyond the end of the heap; end_idx
1494 // will then correspond to a (non-existent) card that is also
1495 // just beyond the heap.
1496 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1497 // end of object is not card aligned - increment to cover
1498 // all the cards spanned by the object
1499 end_idx += 1;
1500 }
1501 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1503 // This definitely means the region has live objects.
1504 set_bit_for_region(hr);
1505 }
1507 // Update the live region bitmap.
1508 if (marked_bytes > 0) {
1509 set_bit_for_region(hr);
1510 }
1512 // Set the marked bytes for the current region so that
1513 // it can be queried by a calling verificiation routine
1514 _region_marked_bytes = marked_bytes;
1516 return false;
1517 }
1519 size_t region_marked_bytes() const { return _region_marked_bytes; }
1520 };
1522 // Heap region closure used for verifying the counting data
1523 // that was accumulated concurrently and aggregated during
1524 // the remark pause. This closure is applied to the heap
1525 // regions during the STW cleanup pause.
1527 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1528 G1CollectedHeap* _g1h;
1529 ConcurrentMark* _cm;
1530 CalcLiveObjectsClosure _calc_cl;
1531 BitMap* _region_bm; // Region BM to be verified
1532 BitMap* _card_bm; // Card BM to be verified
1533 bool _verbose; // verbose output?
1535 BitMap* _exp_region_bm; // Expected Region BM values
1536 BitMap* _exp_card_bm; // Expected card BM values
1538 int _failures;
1540 public:
1541 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1542 BitMap* region_bm,
1543 BitMap* card_bm,
1544 BitMap* exp_region_bm,
1545 BitMap* exp_card_bm,
1546 bool verbose) :
1547 _g1h(g1h), _cm(g1h->concurrent_mark()),
1548 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1549 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1550 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1551 _failures(0) { }
1553 int failures() const { return _failures; }
1555 bool doHeapRegion(HeapRegion* hr) {
1556 if (hr->continuesHumongous()) {
1557 // We will ignore these here and process them when their
1558 // associated "starts humongous" region is processed (see
1559 // set_bit_for_heap_region()). Note that we cannot rely on their
1560 // associated "starts humongous" region to have their bit set to
1561 // 1 since, due to the region chunking in the parallel region
1562 // iteration, a "continues humongous" region might be visited
1563 // before its associated "starts humongous".
1564 return false;
1565 }
1567 int failures = 0;
1569 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1570 // this region and set the corresponding bits in the expected region
1571 // and card bitmaps.
1572 bool res = _calc_cl.doHeapRegion(hr);
1573 assert(res == false, "should be continuing");
1575 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1576 Mutex::_no_safepoint_check_flag);
1578 // Verify the marked bytes for this region.
1579 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1580 size_t act_marked_bytes = hr->next_marked_bytes();
1582 // We're not OK if expected marked bytes > actual marked bytes. It means
1583 // we have missed accounting some objects during the actual marking.
1584 if (exp_marked_bytes > act_marked_bytes) {
1585 if (_verbose) {
1586 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1587 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1588 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1589 }
1590 failures += 1;
1591 }
1593 // Verify the bit, for this region, in the actual and expected
1594 // (which was just calculated) region bit maps.
1595 // We're not OK if the bit in the calculated expected region
1596 // bitmap is set and the bit in the actual region bitmap is not.
1597 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1599 bool expected = _exp_region_bm->at(index);
1600 bool actual = _region_bm->at(index);
1601 if (expected && !actual) {
1602 if (_verbose) {
1603 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1604 "expected: %s, actual: %s",
1605 hr->hrs_index(),
1606 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1607 }
1608 failures += 1;
1609 }
1611 // Verify that the card bit maps for the cards spanned by the current
1612 // region match. We have an error if we have a set bit in the expected
1613 // bit map and the corresponding bit in the actual bitmap is not set.
1615 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1616 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1618 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1619 expected = _exp_card_bm->at(i);
1620 actual = _card_bm->at(i);
1622 if (expected && !actual) {
1623 if (_verbose) {
1624 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1625 "expected: %s, actual: %s",
1626 hr->hrs_index(), i,
1627 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1628 }
1629 failures += 1;
1630 }
1631 }
1633 if (failures > 0 && _verbose) {
1634 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1635 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1636 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1637 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1638 }
1640 _failures += failures;
1642 // We could stop iteration over the heap when we
1643 // find the first violating region by returning true.
1644 return false;
1645 }
1646 };
1648 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1649 protected:
1650 G1CollectedHeap* _g1h;
1651 ConcurrentMark* _cm;
1652 BitMap* _actual_region_bm;
1653 BitMap* _actual_card_bm;
1655 uint _n_workers;
1657 BitMap* _expected_region_bm;
1658 BitMap* _expected_card_bm;
1660 int _failures;
1661 bool _verbose;
1663 public:
1664 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1665 BitMap* region_bm, BitMap* card_bm,
1666 BitMap* expected_region_bm, BitMap* expected_card_bm)
1667 : AbstractGangTask("G1 verify final counting"),
1668 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1669 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1670 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1671 _failures(0), _verbose(false),
1672 _n_workers(0) {
1673 assert(VerifyDuringGC, "don't call this otherwise");
1675 // Use the value already set as the number of active threads
1676 // in the call to run_task().
1677 if (G1CollectedHeap::use_parallel_gc_threads()) {
1678 assert( _g1h->workers()->active_workers() > 0,
1679 "Should have been previously set");
1680 _n_workers = _g1h->workers()->active_workers();
1681 } else {
1682 _n_workers = 1;
1683 }
1685 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1686 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1688 _verbose = _cm->verbose_medium();
1689 }
1691 void work(uint worker_id) {
1692 assert(worker_id < _n_workers, "invariant");
1694 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1695 _actual_region_bm, _actual_card_bm,
1696 _expected_region_bm,
1697 _expected_card_bm,
1698 _verbose);
1700 if (G1CollectedHeap::use_parallel_gc_threads()) {
1701 _g1h->heap_region_par_iterate_chunked(&verify_cl,
1702 worker_id,
1703 _n_workers,
1704 HeapRegion::VerifyCountClaimValue);
1705 } else {
1706 _g1h->heap_region_iterate(&verify_cl);
1707 }
1709 Atomic::add(verify_cl.failures(), &_failures);
1710 }
1712 int failures() const { return _failures; }
1713 };
1715 // Closure that finalizes the liveness counting data.
1716 // Used during the cleanup pause.
1717 // Sets the bits corresponding to the interval [NTAMS, top]
1718 // (which contains the implicitly live objects) in the
1719 // card liveness bitmap. Also sets the bit for each region,
1720 // containing live data, in the region liveness bitmap.
1722 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1723 public:
1724 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1725 BitMap* region_bm,
1726 BitMap* card_bm) :
1727 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1729 bool doHeapRegion(HeapRegion* hr) {
1731 if (hr->continuesHumongous()) {
1732 // We will ignore these here and process them when their
1733 // associated "starts humongous" region is processed (see
1734 // set_bit_for_heap_region()). Note that we cannot rely on their
1735 // associated "starts humongous" region to have their bit set to
1736 // 1 since, due to the region chunking in the parallel region
1737 // iteration, a "continues humongous" region might be visited
1738 // before its associated "starts humongous".
1739 return false;
1740 }
1742 HeapWord* ntams = hr->next_top_at_mark_start();
1743 HeapWord* top = hr->top();
1745 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1747 // Mark the allocated-since-marking portion...
1748 if (ntams < top) {
1749 // This definitely means the region has live objects.
1750 set_bit_for_region(hr);
1752 // Now set the bits in the card bitmap for [ntams, top)
1753 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1754 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1756 // Note: if we're looking at the last region in heap - top
1757 // could be actually just beyond the end of the heap; end_idx
1758 // will then correspond to a (non-existent) card that is also
1759 // just beyond the heap.
1760 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1761 // end of object is not card aligned - increment to cover
1762 // all the cards spanned by the object
1763 end_idx += 1;
1764 }
1766 assert(end_idx <= _card_bm->size(),
1767 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1768 end_idx, _card_bm->size()));
1769 assert(start_idx < _card_bm->size(),
1770 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1771 start_idx, _card_bm->size()));
1773 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1774 }
1776 // Set the bit for the region if it contains live data
1777 if (hr->next_marked_bytes() > 0) {
1778 set_bit_for_region(hr);
1779 }
1781 return false;
1782 }
1783 };
1785 class G1ParFinalCountTask: public AbstractGangTask {
1786 protected:
1787 G1CollectedHeap* _g1h;
1788 ConcurrentMark* _cm;
1789 BitMap* _actual_region_bm;
1790 BitMap* _actual_card_bm;
1792 uint _n_workers;
1794 public:
1795 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1796 : AbstractGangTask("G1 final counting"),
1797 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1798 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1799 _n_workers(0) {
1800 // Use the value already set as the number of active threads
1801 // in the call to run_task().
1802 if (G1CollectedHeap::use_parallel_gc_threads()) {
1803 assert( _g1h->workers()->active_workers() > 0,
1804 "Should have been previously set");
1805 _n_workers = _g1h->workers()->active_workers();
1806 } else {
1807 _n_workers = 1;
1808 }
1809 }
1811 void work(uint worker_id) {
1812 assert(worker_id < _n_workers, "invariant");
1814 FinalCountDataUpdateClosure final_update_cl(_g1h,
1815 _actual_region_bm,
1816 _actual_card_bm);
1818 if (G1CollectedHeap::use_parallel_gc_threads()) {
1819 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1820 worker_id,
1821 _n_workers,
1822 HeapRegion::FinalCountClaimValue);
1823 } else {
1824 _g1h->heap_region_iterate(&final_update_cl);
1825 }
1826 }
1827 };
1829 class G1ParNoteEndTask;
1831 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1832 G1CollectedHeap* _g1;
1833 size_t _max_live_bytes;
1834 uint _regions_claimed;
1835 size_t _freed_bytes;
1836 FreeRegionList* _local_cleanup_list;
1837 HeapRegionSetCount _old_regions_removed;
1838 HeapRegionSetCount _humongous_regions_removed;
1839 HRRSCleanupTask* _hrrs_cleanup_task;
1840 double _claimed_region_time;
1841 double _max_region_time;
1843 public:
1844 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1845 FreeRegionList* local_cleanup_list,
1846 HRRSCleanupTask* hrrs_cleanup_task) :
1847 _g1(g1),
1848 _max_live_bytes(0), _regions_claimed(0),
1849 _freed_bytes(0),
1850 _claimed_region_time(0.0), _max_region_time(0.0),
1851 _local_cleanup_list(local_cleanup_list),
1852 _old_regions_removed(),
1853 _humongous_regions_removed(),
1854 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1856 size_t freed_bytes() { return _freed_bytes; }
1857 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1858 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1860 bool doHeapRegion(HeapRegion *hr) {
1861 if (hr->continuesHumongous()) {
1862 return false;
1863 }
1864 // We use a claim value of zero here because all regions
1865 // were claimed with value 1 in the FinalCount task.
1866 _g1->reset_gc_time_stamps(hr);
1867 double start = os::elapsedTime();
1868 _regions_claimed++;
1869 hr->note_end_of_marking();
1870 _max_live_bytes += hr->max_live_bytes();
1872 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1873 _freed_bytes += hr->used();
1874 hr->set_containing_set(NULL);
1875 if (hr->isHumongous()) {
1876 assert(hr->startsHumongous(), "we should only see starts humongous");
1877 _humongous_regions_removed.increment(1u, hr->capacity());
1878 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1879 } else {
1880 _old_regions_removed.increment(1u, hr->capacity());
1881 _g1->free_region(hr, _local_cleanup_list, true);
1882 }
1883 } else {
1884 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1885 }
1887 double region_time = (os::elapsedTime() - start);
1888 _claimed_region_time += region_time;
1889 if (region_time > _max_region_time) {
1890 _max_region_time = region_time;
1891 }
1892 return false;
1893 }
1895 size_t max_live_bytes() { return _max_live_bytes; }
1896 uint regions_claimed() { return _regions_claimed; }
1897 double claimed_region_time_sec() { return _claimed_region_time; }
1898 double max_region_time_sec() { return _max_region_time; }
1899 };
1901 class G1ParNoteEndTask: public AbstractGangTask {
1902 friend class G1NoteEndOfConcMarkClosure;
1904 protected:
1905 G1CollectedHeap* _g1h;
1906 size_t _max_live_bytes;
1907 size_t _freed_bytes;
1908 FreeRegionList* _cleanup_list;
1910 public:
1911 G1ParNoteEndTask(G1CollectedHeap* g1h,
1912 FreeRegionList* cleanup_list) :
1913 AbstractGangTask("G1 note end"), _g1h(g1h),
1914 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1916 void work(uint worker_id) {
1917 double start = os::elapsedTime();
1918 FreeRegionList local_cleanup_list("Local Cleanup List");
1919 HRRSCleanupTask hrrs_cleanup_task;
1920 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1921 &hrrs_cleanup_task);
1922 if (G1CollectedHeap::use_parallel_gc_threads()) {
1923 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1924 _g1h->workers()->active_workers(),
1925 HeapRegion::NoteEndClaimValue);
1926 } else {
1927 _g1h->heap_region_iterate(&g1_note_end);
1928 }
1929 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1931 // Now update the lists
1932 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1933 {
1934 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1935 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1936 _max_live_bytes += g1_note_end.max_live_bytes();
1937 _freed_bytes += g1_note_end.freed_bytes();
1939 // If we iterate over the global cleanup list at the end of
1940 // cleanup to do this printing we will not guarantee to only
1941 // generate output for the newly-reclaimed regions (the list
1942 // might not be empty at the beginning of cleanup; we might
1943 // still be working on its previous contents). So we do the
1944 // printing here, before we append the new regions to the global
1945 // cleanup list.
1947 G1HRPrinter* hr_printer = _g1h->hr_printer();
1948 if (hr_printer->is_active()) {
1949 FreeRegionListIterator iter(&local_cleanup_list);
1950 while (iter.more_available()) {
1951 HeapRegion* hr = iter.get_next();
1952 hr_printer->cleanup(hr);
1953 }
1954 }
1956 _cleanup_list->add_ordered(&local_cleanup_list);
1957 assert(local_cleanup_list.is_empty(), "post-condition");
1959 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1960 }
1961 }
1962 size_t max_live_bytes() { return _max_live_bytes; }
1963 size_t freed_bytes() { return _freed_bytes; }
1964 };
1966 class G1ParScrubRemSetTask: public AbstractGangTask {
1967 protected:
1968 G1RemSet* _g1rs;
1969 BitMap* _region_bm;
1970 BitMap* _card_bm;
1971 public:
1972 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1973 BitMap* region_bm, BitMap* card_bm) :
1974 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1975 _region_bm(region_bm), _card_bm(card_bm) { }
1977 void work(uint worker_id) {
1978 if (G1CollectedHeap::use_parallel_gc_threads()) {
1979 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1980 HeapRegion::ScrubRemSetClaimValue);
1981 } else {
1982 _g1rs->scrub(_region_bm, _card_bm);
1983 }
1984 }
1986 };
1988 void ConcurrentMark::cleanup() {
1989 // world is stopped at this checkpoint
1990 assert(SafepointSynchronize::is_at_safepoint(),
1991 "world should be stopped");
1992 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1994 // If a full collection has happened, we shouldn't do this.
1995 if (has_aborted()) {
1996 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1997 return;
1998 }
2000 g1h->verify_region_sets_optional();
2002 if (VerifyDuringGC) {
2003 HandleMark hm; // handle scope
2004 Universe::heap()->prepare_for_verify();
2005 Universe::verify(VerifyOption_G1UsePrevMarking,
2006 " VerifyDuringGC:(before)");
2007 }
2008 g1h->check_bitmaps("Cleanup Start");
2010 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2011 g1p->record_concurrent_mark_cleanup_start();
2013 double start = os::elapsedTime();
2015 HeapRegionRemSet::reset_for_cleanup_tasks();
2017 uint n_workers;
2019 // Do counting once more with the world stopped for good measure.
2020 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2022 if (G1CollectedHeap::use_parallel_gc_threads()) {
2023 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2024 "sanity check");
2026 g1h->set_par_threads();
2027 n_workers = g1h->n_par_threads();
2028 assert(g1h->n_par_threads() == n_workers,
2029 "Should not have been reset");
2030 g1h->workers()->run_task(&g1_par_count_task);
2031 // Done with the parallel phase so reset to 0.
2032 g1h->set_par_threads(0);
2034 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2035 "sanity check");
2036 } else {
2037 n_workers = 1;
2038 g1_par_count_task.work(0);
2039 }
2041 if (VerifyDuringGC) {
2042 // Verify that the counting data accumulated during marking matches
2043 // that calculated by walking the marking bitmap.
2045 // Bitmaps to hold expected values
2046 BitMap expected_region_bm(_region_bm.size(), true);
2047 BitMap expected_card_bm(_card_bm.size(), true);
2049 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2050 &_region_bm,
2051 &_card_bm,
2052 &expected_region_bm,
2053 &expected_card_bm);
2055 if (G1CollectedHeap::use_parallel_gc_threads()) {
2056 g1h->set_par_threads((int)n_workers);
2057 g1h->workers()->run_task(&g1_par_verify_task);
2058 // Done with the parallel phase so reset to 0.
2059 g1h->set_par_threads(0);
2061 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2062 "sanity check");
2063 } else {
2064 g1_par_verify_task.work(0);
2065 }
2067 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2068 }
2070 size_t start_used_bytes = g1h->used();
2071 g1h->set_marking_complete();
2073 double count_end = os::elapsedTime();
2074 double this_final_counting_time = (count_end - start);
2075 _total_counting_time += this_final_counting_time;
2077 if (G1PrintRegionLivenessInfo) {
2078 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2079 _g1h->heap_region_iterate(&cl);
2080 }
2082 // Install newly created mark bitMap as "prev".
2083 swapMarkBitMaps();
2085 g1h->reset_gc_time_stamp();
2087 // Note end of marking in all heap regions.
2088 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2089 if (G1CollectedHeap::use_parallel_gc_threads()) {
2090 g1h->set_par_threads((int)n_workers);
2091 g1h->workers()->run_task(&g1_par_note_end_task);
2092 g1h->set_par_threads(0);
2094 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2095 "sanity check");
2096 } else {
2097 g1_par_note_end_task.work(0);
2098 }
2099 g1h->check_gc_time_stamps();
2101 if (!cleanup_list_is_empty()) {
2102 // The cleanup list is not empty, so we'll have to process it
2103 // concurrently. Notify anyone else that might be wanting free
2104 // regions that there will be more free regions coming soon.
2105 g1h->set_free_regions_coming();
2106 }
2108 // call below, since it affects the metric by which we sort the heap
2109 // regions.
2110 if (G1ScrubRemSets) {
2111 double rs_scrub_start = os::elapsedTime();
2112 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2113 if (G1CollectedHeap::use_parallel_gc_threads()) {
2114 g1h->set_par_threads((int)n_workers);
2115 g1h->workers()->run_task(&g1_par_scrub_rs_task);
2116 g1h->set_par_threads(0);
2118 assert(g1h->check_heap_region_claim_values(
2119 HeapRegion::ScrubRemSetClaimValue),
2120 "sanity check");
2121 } else {
2122 g1_par_scrub_rs_task.work(0);
2123 }
2125 double rs_scrub_end = os::elapsedTime();
2126 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2127 _total_rs_scrub_time += this_rs_scrub_time;
2128 }
2130 // this will also free any regions totally full of garbage objects,
2131 // and sort the regions.
2132 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2134 // Statistics.
2135 double end = os::elapsedTime();
2136 _cleanup_times.add((end - start) * 1000.0);
2138 if (G1Log::fine()) {
2139 g1h->print_size_transition(gclog_or_tty,
2140 start_used_bytes,
2141 g1h->used(),
2142 g1h->capacity());
2143 }
2145 // Clean up will have freed any regions completely full of garbage.
2146 // Update the soft reference policy with the new heap occupancy.
2147 Universe::update_heap_info_at_gc();
2149 if (VerifyDuringGC) {
2150 HandleMark hm; // handle scope
2151 Universe::heap()->prepare_for_verify();
2152 Universe::verify(VerifyOption_G1UsePrevMarking,
2153 " VerifyDuringGC:(after)");
2154 }
2155 g1h->check_bitmaps("Cleanup End");
2157 g1h->verify_region_sets_optional();
2159 // We need to make this be a "collection" so any collection pause that
2160 // races with it goes around and waits for completeCleanup to finish.
2161 g1h->increment_total_collections();
2163 // Clean out dead classes and update Metaspace sizes.
2164 if (ClassUnloadingWithConcurrentMark) {
2165 ClassLoaderDataGraph::purge();
2166 }
2167 MetaspaceGC::compute_new_size();
2169 // We reclaimed old regions so we should calculate the sizes to make
2170 // sure we update the old gen/space data.
2171 g1h->g1mm()->update_sizes();
2173 g1h->trace_heap_after_concurrent_cycle();
2174 }
2176 void ConcurrentMark::completeCleanup() {
2177 if (has_aborted()) return;
2179 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2181 _cleanup_list.verify_optional();
2182 FreeRegionList tmp_free_list("Tmp Free List");
2184 if (G1ConcRegionFreeingVerbose) {
2185 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2186 "cleanup list has %u entries",
2187 _cleanup_list.length());
2188 }
2190 // Noone else should be accessing the _cleanup_list at this point,
2191 // so it's not necessary to take any locks
2192 while (!_cleanup_list.is_empty()) {
2193 HeapRegion* hr = _cleanup_list.remove_head();
2194 assert(hr != NULL, "Got NULL from a non-empty list");
2195 hr->par_clear();
2196 tmp_free_list.add_ordered(hr);
2198 // Instead of adding one region at a time to the secondary_free_list,
2199 // we accumulate them in the local list and move them a few at a
2200 // time. This also cuts down on the number of notify_all() calls
2201 // we do during this process. We'll also append the local list when
2202 // _cleanup_list is empty (which means we just removed the last
2203 // region from the _cleanup_list).
2204 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2205 _cleanup_list.is_empty()) {
2206 if (G1ConcRegionFreeingVerbose) {
2207 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2208 "appending %u entries to the secondary_free_list, "
2209 "cleanup list still has %u entries",
2210 tmp_free_list.length(),
2211 _cleanup_list.length());
2212 }
2214 {
2215 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2216 g1h->secondary_free_list_add(&tmp_free_list);
2217 SecondaryFreeList_lock->notify_all();
2218 }
2220 if (G1StressConcRegionFreeing) {
2221 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2222 os::sleep(Thread::current(), (jlong) 1, false);
2223 }
2224 }
2225 }
2226 }
2227 assert(tmp_free_list.is_empty(), "post-condition");
2228 }
2230 // Supporting Object and Oop closures for reference discovery
2231 // and processing in during marking
2233 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2234 HeapWord* addr = (HeapWord*)obj;
2235 return addr != NULL &&
2236 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2237 }
2239 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2240 // Uses the CMTask associated with a worker thread (for serial reference
2241 // processing the CMTask for worker 0 is used) to preserve (mark) and
2242 // trace referent objects.
2243 //
2244 // Using the CMTask and embedded local queues avoids having the worker
2245 // threads operating on the global mark stack. This reduces the risk
2246 // of overflowing the stack - which we would rather avoid at this late
2247 // state. Also using the tasks' local queues removes the potential
2248 // of the workers interfering with each other that could occur if
2249 // operating on the global stack.
2251 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2252 ConcurrentMark* _cm;
2253 CMTask* _task;
2254 int _ref_counter_limit;
2255 int _ref_counter;
2256 bool _is_serial;
2257 public:
2258 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2259 _cm(cm), _task(task), _is_serial(is_serial),
2260 _ref_counter_limit(G1RefProcDrainInterval) {
2261 assert(_ref_counter_limit > 0, "sanity");
2262 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2263 _ref_counter = _ref_counter_limit;
2264 }
2266 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2267 virtual void do_oop( oop* p) { do_oop_work(p); }
2269 template <class T> void do_oop_work(T* p) {
2270 if (!_cm->has_overflown()) {
2271 oop obj = oopDesc::load_decode_heap_oop(p);
2272 if (_cm->verbose_high()) {
2273 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2274 "*"PTR_FORMAT" = "PTR_FORMAT,
2275 _task->worker_id(), p2i(p), p2i((void*) obj));
2276 }
2278 _task->deal_with_reference(obj);
2279 _ref_counter--;
2281 if (_ref_counter == 0) {
2282 // We have dealt with _ref_counter_limit references, pushing them
2283 // and objects reachable from them on to the local stack (and
2284 // possibly the global stack). Call CMTask::do_marking_step() to
2285 // process these entries.
2286 //
2287 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2288 // there's nothing more to do (i.e. we're done with the entries that
2289 // were pushed as a result of the CMTask::deal_with_reference() calls
2290 // above) or we overflow.
2291 //
2292 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2293 // flag while there may still be some work to do. (See the comment at
2294 // the beginning of CMTask::do_marking_step() for those conditions -
2295 // one of which is reaching the specified time target.) It is only
2296 // when CMTask::do_marking_step() returns without setting the
2297 // has_aborted() flag that the marking step has completed.
2298 do {
2299 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2300 _task->do_marking_step(mark_step_duration_ms,
2301 false /* do_termination */,
2302 _is_serial);
2303 } while (_task->has_aborted() && !_cm->has_overflown());
2304 _ref_counter = _ref_counter_limit;
2305 }
2306 } else {
2307 if (_cm->verbose_high()) {
2308 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2309 }
2310 }
2311 }
2312 };
2314 // 'Drain' oop closure used by both serial and parallel reference processing.
2315 // Uses the CMTask associated with a given worker thread (for serial
2316 // reference processing the CMtask for worker 0 is used). Calls the
2317 // do_marking_step routine, with an unbelievably large timeout value,
2318 // to drain the marking data structures of the remaining entries
2319 // added by the 'keep alive' oop closure above.
2321 class G1CMDrainMarkingStackClosure: public VoidClosure {
2322 ConcurrentMark* _cm;
2323 CMTask* _task;
2324 bool _is_serial;
2325 public:
2326 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2327 _cm(cm), _task(task), _is_serial(is_serial) {
2328 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2329 }
2331 void do_void() {
2332 do {
2333 if (_cm->verbose_high()) {
2334 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2335 _task->worker_id(), BOOL_TO_STR(_is_serial));
2336 }
2338 // We call CMTask::do_marking_step() to completely drain the local
2339 // and global marking stacks of entries pushed by the 'keep alive'
2340 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2341 //
2342 // CMTask::do_marking_step() is called in a loop, which we'll exit
2343 // if there's nothing more to do (i.e. we'completely drained the
2344 // entries that were pushed as a a result of applying the 'keep alive'
2345 // closure to the entries on the discovered ref lists) or we overflow
2346 // the global marking stack.
2347 //
2348 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2349 // flag while there may still be some work to do. (See the comment at
2350 // the beginning of CMTask::do_marking_step() for those conditions -
2351 // one of which is reaching the specified time target.) It is only
2352 // when CMTask::do_marking_step() returns without setting the
2353 // has_aborted() flag that the marking step has completed.
2355 _task->do_marking_step(1000000000.0 /* something very large */,
2356 true /* do_termination */,
2357 _is_serial);
2358 } while (_task->has_aborted() && !_cm->has_overflown());
2359 }
2360 };
2362 // Implementation of AbstractRefProcTaskExecutor for parallel
2363 // reference processing at the end of G1 concurrent marking
2365 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2366 private:
2367 G1CollectedHeap* _g1h;
2368 ConcurrentMark* _cm;
2369 WorkGang* _workers;
2370 int _active_workers;
2372 public:
2373 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2374 ConcurrentMark* cm,
2375 WorkGang* workers,
2376 int n_workers) :
2377 _g1h(g1h), _cm(cm),
2378 _workers(workers), _active_workers(n_workers) { }
2380 // Executes the given task using concurrent marking worker threads.
2381 virtual void execute(ProcessTask& task);
2382 virtual void execute(EnqueueTask& task);
2383 };
2385 class G1CMRefProcTaskProxy: public AbstractGangTask {
2386 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2387 ProcessTask& _proc_task;
2388 G1CollectedHeap* _g1h;
2389 ConcurrentMark* _cm;
2391 public:
2392 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2393 G1CollectedHeap* g1h,
2394 ConcurrentMark* cm) :
2395 AbstractGangTask("Process reference objects in parallel"),
2396 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2397 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2398 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2399 }
2401 virtual void work(uint worker_id) {
2402 CMTask* task = _cm->task(worker_id);
2403 G1CMIsAliveClosure g1_is_alive(_g1h);
2404 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2405 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2407 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2408 }
2409 };
2411 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2412 assert(_workers != NULL, "Need parallel worker threads.");
2413 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2415 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2417 // We need to reset the concurrency level before each
2418 // proxy task execution, so that the termination protocol
2419 // and overflow handling in CMTask::do_marking_step() knows
2420 // how many workers to wait for.
2421 _cm->set_concurrency(_active_workers);
2422 _g1h->set_par_threads(_active_workers);
2423 _workers->run_task(&proc_task_proxy);
2424 _g1h->set_par_threads(0);
2425 }
2427 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2428 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2429 EnqueueTask& _enq_task;
2431 public:
2432 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2433 AbstractGangTask("Enqueue reference objects in parallel"),
2434 _enq_task(enq_task) { }
2436 virtual void work(uint worker_id) {
2437 _enq_task.work(worker_id);
2438 }
2439 };
2441 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2442 assert(_workers != NULL, "Need parallel worker threads.");
2443 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2445 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2447 // Not strictly necessary but...
2448 //
2449 // We need to reset the concurrency level before each
2450 // proxy task execution, so that the termination protocol
2451 // and overflow handling in CMTask::do_marking_step() knows
2452 // how many workers to wait for.
2453 _cm->set_concurrency(_active_workers);
2454 _g1h->set_par_threads(_active_workers);
2455 _workers->run_task(&enq_task_proxy);
2456 _g1h->set_par_threads(0);
2457 }
2459 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2460 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2461 }
2463 // Helper class to get rid of some boilerplate code.
2464 class G1RemarkGCTraceTime : public GCTraceTime {
2465 static bool doit_and_prepend(bool doit) {
2466 if (doit) {
2467 gclog_or_tty->put(' ');
2468 }
2469 return doit;
2470 }
2472 public:
2473 G1RemarkGCTraceTime(const char* title, bool doit)
2474 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2475 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2476 }
2477 };
2479 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2480 if (has_overflown()) {
2481 // Skip processing the discovered references if we have
2482 // overflown the global marking stack. Reference objects
2483 // only get discovered once so it is OK to not
2484 // de-populate the discovered reference lists. We could have,
2485 // but the only benefit would be that, when marking restarts,
2486 // less reference objects are discovered.
2487 return;
2488 }
2490 ResourceMark rm;
2491 HandleMark hm;
2493 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2495 // Is alive closure.
2496 G1CMIsAliveClosure g1_is_alive(g1h);
2498 // Inner scope to exclude the cleaning of the string and symbol
2499 // tables from the displayed time.
2500 {
2501 if (G1Log::finer()) {
2502 gclog_or_tty->put(' ');
2503 }
2504 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
2506 ReferenceProcessor* rp = g1h->ref_processor_cm();
2508 // See the comment in G1CollectedHeap::ref_processing_init()
2509 // about how reference processing currently works in G1.
2511 // Set the soft reference policy
2512 rp->setup_policy(clear_all_soft_refs);
2513 assert(_markStack.isEmpty(), "mark stack should be empty");
2515 // Instances of the 'Keep Alive' and 'Complete GC' closures used
2516 // in serial reference processing. Note these closures are also
2517 // used for serially processing (by the the current thread) the
2518 // JNI references during parallel reference processing.
2519 //
2520 // These closures do not need to synchronize with the worker
2521 // threads involved in parallel reference processing as these
2522 // instances are executed serially by the current thread (e.g.
2523 // reference processing is not multi-threaded and is thus
2524 // performed by the current thread instead of a gang worker).
2525 //
2526 // The gang tasks involved in parallel reference procssing create
2527 // their own instances of these closures, which do their own
2528 // synchronization among themselves.
2529 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2530 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2532 // We need at least one active thread. If reference processing
2533 // is not multi-threaded we use the current (VMThread) thread,
2534 // otherwise we use the work gang from the G1CollectedHeap and
2535 // we utilize all the worker threads we can.
2536 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2537 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2538 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2540 // Parallel processing task executor.
2541 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2542 g1h->workers(), active_workers);
2543 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2545 // Set the concurrency level. The phase was already set prior to
2546 // executing the remark task.
2547 set_concurrency(active_workers);
2549 // Set the degree of MT processing here. If the discovery was done MT,
2550 // the number of threads involved during discovery could differ from
2551 // the number of active workers. This is OK as long as the discovered
2552 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2553 rp->set_active_mt_degree(active_workers);
2555 // Process the weak references.
2556 const ReferenceProcessorStats& stats =
2557 rp->process_discovered_references(&g1_is_alive,
2558 &g1_keep_alive,
2559 &g1_drain_mark_stack,
2560 executor,
2561 g1h->gc_timer_cm(),
2562 concurrent_gc_id());
2563 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2565 // The do_oop work routines of the keep_alive and drain_marking_stack
2566 // oop closures will set the has_overflown flag if we overflow the
2567 // global marking stack.
2569 assert(_markStack.overflow() || _markStack.isEmpty(),
2570 "mark stack should be empty (unless it overflowed)");
2572 if (_markStack.overflow()) {
2573 // This should have been done already when we tried to push an
2574 // entry on to the global mark stack. But let's do it again.
2575 set_has_overflown();
2576 }
2578 assert(rp->num_q() == active_workers, "why not");
2580 rp->enqueue_discovered_references(executor);
2582 rp->verify_no_references_recorded();
2583 assert(!rp->discovery_enabled(), "Post condition");
2584 }
2586 if (has_overflown()) {
2587 // We can not trust g1_is_alive if the marking stack overflowed
2588 return;
2589 }
2591 assert(_markStack.isEmpty(), "Marking should have completed");
2593 // Unload Klasses, String, Symbols, Code Cache, etc.
2594 {
2595 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2597 if (ClassUnloadingWithConcurrentMark) {
2598 bool purged_classes;
2600 {
2601 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2602 purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2603 }
2605 {
2606 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2607 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2608 }
2609 }
2611 if (G1StringDedup::is_enabled()) {
2612 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2613 G1StringDedup::unlink(&g1_is_alive);
2614 }
2615 }
2616 }
2618 void ConcurrentMark::swapMarkBitMaps() {
2619 CMBitMapRO* temp = _prevMarkBitMap;
2620 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2621 _nextMarkBitMap = (CMBitMap*) temp;
2622 }
2624 class CMObjectClosure;
2626 // Closure for iterating over objects, currently only used for
2627 // processing SATB buffers.
2628 class CMObjectClosure : public ObjectClosure {
2629 private:
2630 CMTask* _task;
2632 public:
2633 void do_object(oop obj) {
2634 _task->deal_with_reference(obj);
2635 }
2637 CMObjectClosure(CMTask* task) : _task(task) { }
2638 };
2640 class G1RemarkThreadsClosure : public ThreadClosure {
2641 CMObjectClosure _cm_obj;
2642 G1CMOopClosure _cm_cl;
2643 MarkingCodeBlobClosure _code_cl;
2644 int _thread_parity;
2645 bool _is_par;
2647 public:
2648 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2649 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2650 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2652 void do_thread(Thread* thread) {
2653 if (thread->is_Java_thread()) {
2654 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2655 JavaThread* jt = (JavaThread*)thread;
2657 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2658 // however the liveness of oops reachable from nmethods have very complex lifecycles:
2659 // * Alive if on the stack of an executing method
2660 // * Weakly reachable otherwise
2661 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2662 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2663 jt->nmethods_do(&_code_cl);
2665 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2666 }
2667 } else if (thread->is_VM_thread()) {
2668 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2669 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2670 }
2671 }
2672 }
2673 };
2675 class CMRemarkTask: public AbstractGangTask {
2676 private:
2677 ConcurrentMark* _cm;
2678 bool _is_serial;
2679 public:
2680 void work(uint worker_id) {
2681 // Since all available tasks are actually started, we should
2682 // only proceed if we're supposed to be actived.
2683 if (worker_id < _cm->active_tasks()) {
2684 CMTask* task = _cm->task(worker_id);
2685 task->record_start_time();
2686 {
2687 ResourceMark rm;
2688 HandleMark hm;
2690 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2691 Threads::threads_do(&threads_f);
2692 }
2694 do {
2695 task->do_marking_step(1000000000.0 /* something very large */,
2696 true /* do_termination */,
2697 _is_serial);
2698 } while (task->has_aborted() && !_cm->has_overflown());
2699 // If we overflow, then we do not want to restart. We instead
2700 // want to abort remark and do concurrent marking again.
2701 task->record_end_time();
2702 }
2703 }
2705 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2706 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2707 _cm->terminator()->reset_for_reuse(active_workers);
2708 }
2709 };
2711 void ConcurrentMark::checkpointRootsFinalWork() {
2712 ResourceMark rm;
2713 HandleMark hm;
2714 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2716 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2718 g1h->ensure_parsability(false);
2720 if (G1CollectedHeap::use_parallel_gc_threads()) {
2721 G1CollectedHeap::StrongRootsScope srs(g1h);
2722 // this is remark, so we'll use up all active threads
2723 uint active_workers = g1h->workers()->active_workers();
2724 if (active_workers == 0) {
2725 assert(active_workers > 0, "Should have been set earlier");
2726 active_workers = (uint) ParallelGCThreads;
2727 g1h->workers()->set_active_workers(active_workers);
2728 }
2729 set_concurrency_and_phase(active_workers, false /* concurrent */);
2730 // Leave _parallel_marking_threads at it's
2731 // value originally calculated in the ConcurrentMark
2732 // constructor and pass values of the active workers
2733 // through the gang in the task.
2735 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2736 // We will start all available threads, even if we decide that the
2737 // active_workers will be fewer. The extra ones will just bail out
2738 // immediately.
2739 g1h->set_par_threads(active_workers);
2740 g1h->workers()->run_task(&remarkTask);
2741 g1h->set_par_threads(0);
2742 } else {
2743 G1CollectedHeap::StrongRootsScope srs(g1h);
2744 uint active_workers = 1;
2745 set_concurrency_and_phase(active_workers, false /* concurrent */);
2747 // Note - if there's no work gang then the VMThread will be
2748 // the thread to execute the remark - serially. We have
2749 // to pass true for the is_serial parameter so that
2750 // CMTask::do_marking_step() doesn't enter the sync
2751 // barriers in the event of an overflow. Doing so will
2752 // cause an assert that the current thread is not a
2753 // concurrent GC thread.
2754 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2755 remarkTask.work(0);
2756 }
2757 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2758 guarantee(has_overflown() ||
2759 satb_mq_set.completed_buffers_num() == 0,
2760 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2761 BOOL_TO_STR(has_overflown()),
2762 satb_mq_set.completed_buffers_num()));
2764 print_stats();
2765 }
2767 #ifndef PRODUCT
2769 class PrintReachableOopClosure: public OopClosure {
2770 private:
2771 G1CollectedHeap* _g1h;
2772 outputStream* _out;
2773 VerifyOption _vo;
2774 bool _all;
2776 public:
2777 PrintReachableOopClosure(outputStream* out,
2778 VerifyOption vo,
2779 bool all) :
2780 _g1h(G1CollectedHeap::heap()),
2781 _out(out), _vo(vo), _all(all) { }
2783 void do_oop(narrowOop* p) { do_oop_work(p); }
2784 void do_oop( oop* p) { do_oop_work(p); }
2786 template <class T> void do_oop_work(T* p) {
2787 oop obj = oopDesc::load_decode_heap_oop(p);
2788 const char* str = NULL;
2789 const char* str2 = "";
2791 if (obj == NULL) {
2792 str = "";
2793 } else if (!_g1h->is_in_g1_reserved(obj)) {
2794 str = " O";
2795 } else {
2796 HeapRegion* hr = _g1h->heap_region_containing(obj);
2797 guarantee(hr != NULL, "invariant");
2798 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2799 bool marked = _g1h->is_marked(obj, _vo);
2801 if (over_tams) {
2802 str = " >";
2803 if (marked) {
2804 str2 = " AND MARKED";
2805 }
2806 } else if (marked) {
2807 str = " M";
2808 } else {
2809 str = " NOT";
2810 }
2811 }
2813 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
2814 p2i(p), p2i((void*) obj), str, str2);
2815 }
2816 };
2818 class PrintReachableObjectClosure : public ObjectClosure {
2819 private:
2820 G1CollectedHeap* _g1h;
2821 outputStream* _out;
2822 VerifyOption _vo;
2823 bool _all;
2824 HeapRegion* _hr;
2826 public:
2827 PrintReachableObjectClosure(outputStream* out,
2828 VerifyOption vo,
2829 bool all,
2830 HeapRegion* hr) :
2831 _g1h(G1CollectedHeap::heap()),
2832 _out(out), _vo(vo), _all(all), _hr(hr) { }
2834 void do_object(oop o) {
2835 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2836 bool marked = _g1h->is_marked(o, _vo);
2837 bool print_it = _all || over_tams || marked;
2839 if (print_it) {
2840 _out->print_cr(" "PTR_FORMAT"%s",
2841 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2842 PrintReachableOopClosure oopCl(_out, _vo, _all);
2843 o->oop_iterate_no_header(&oopCl);
2844 }
2845 }
2846 };
2848 class PrintReachableRegionClosure : public HeapRegionClosure {
2849 private:
2850 G1CollectedHeap* _g1h;
2851 outputStream* _out;
2852 VerifyOption _vo;
2853 bool _all;
2855 public:
2856 bool doHeapRegion(HeapRegion* hr) {
2857 HeapWord* b = hr->bottom();
2858 HeapWord* e = hr->end();
2859 HeapWord* t = hr->top();
2860 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2861 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2862 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2863 _out->cr();
2865 HeapWord* from = b;
2866 HeapWord* to = t;
2868 if (to > from) {
2869 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2870 _out->cr();
2871 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2872 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2873 _out->cr();
2874 }
2876 return false;
2877 }
2879 PrintReachableRegionClosure(outputStream* out,
2880 VerifyOption vo,
2881 bool all) :
2882 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2883 };
2885 void ConcurrentMark::print_reachable(const char* str,
2886 VerifyOption vo,
2887 bool all) {
2888 gclog_or_tty->cr();
2889 gclog_or_tty->print_cr("== Doing heap dump... ");
2891 if (G1PrintReachableBaseFile == NULL) {
2892 gclog_or_tty->print_cr(" #### error: no base file defined");
2893 return;
2894 }
2896 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2897 (JVM_MAXPATHLEN - 1)) {
2898 gclog_or_tty->print_cr(" #### error: file name too long");
2899 return;
2900 }
2902 char file_name[JVM_MAXPATHLEN];
2903 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2904 gclog_or_tty->print_cr(" dumping to file %s", file_name);
2906 fileStream fout(file_name);
2907 if (!fout.is_open()) {
2908 gclog_or_tty->print_cr(" #### error: could not open file");
2909 return;
2910 }
2912 outputStream* out = &fout;
2913 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2914 out->cr();
2916 out->print_cr("--- ITERATING OVER REGIONS");
2917 out->cr();
2918 PrintReachableRegionClosure rcl(out, vo, all);
2919 _g1h->heap_region_iterate(&rcl);
2920 out->cr();
2922 gclog_or_tty->print_cr(" done");
2923 gclog_or_tty->flush();
2924 }
2926 #endif // PRODUCT
2928 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2929 // Note we are overriding the read-only view of the prev map here, via
2930 // the cast.
2931 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2932 }
2934 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2935 _nextMarkBitMap->clearRange(mr);
2936 }
2938 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2939 clearRangePrevBitmap(mr);
2940 clearRangeNextBitmap(mr);
2941 }
2943 HeapRegion*
2944 ConcurrentMark::claim_region(uint worker_id) {
2945 // "checkpoint" the finger
2946 HeapWord* finger = _finger;
2948 // _heap_end will not change underneath our feet; it only changes at
2949 // yield points.
2950 while (finger < _heap_end) {
2951 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2953 // Note on how this code handles humongous regions. In the
2954 // normal case the finger will reach the start of a "starts
2955 // humongous" (SH) region. Its end will either be the end of the
2956 // last "continues humongous" (CH) region in the sequence, or the
2957 // standard end of the SH region (if the SH is the only region in
2958 // the sequence). That way claim_region() will skip over the CH
2959 // regions. However, there is a subtle race between a CM thread
2960 // executing this method and a mutator thread doing a humongous
2961 // object allocation. The two are not mutually exclusive as the CM
2962 // thread does not need to hold the Heap_lock when it gets
2963 // here. So there is a chance that claim_region() will come across
2964 // a free region that's in the progress of becoming a SH or a CH
2965 // region. In the former case, it will either
2966 // a) Miss the update to the region's end, in which case it will
2967 // visit every subsequent CH region, will find their bitmaps
2968 // empty, and do nothing, or
2969 // b) Will observe the update of the region's end (in which case
2970 // it will skip the subsequent CH regions).
2971 // If it comes across a region that suddenly becomes CH, the
2972 // scenario will be similar to b). So, the race between
2973 // claim_region() and a humongous object allocation might force us
2974 // to do a bit of unnecessary work (due to some unnecessary bitmap
2975 // iterations) but it should not introduce and correctness issues.
2976 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2977 HeapWord* bottom = curr_region->bottom();
2978 HeapWord* end = curr_region->end();
2979 HeapWord* limit = curr_region->next_top_at_mark_start();
2981 if (verbose_low()) {
2982 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2983 "["PTR_FORMAT", "PTR_FORMAT"), "
2984 "limit = "PTR_FORMAT,
2985 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2986 }
2988 // Is the gap between reading the finger and doing the CAS too long?
2989 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2990 if (res == finger) {
2991 // we succeeded
2993 // notice that _finger == end cannot be guaranteed here since,
2994 // someone else might have moved the finger even further
2995 assert(_finger >= end, "the finger should have moved forward");
2997 if (verbose_low()) {
2998 gclog_or_tty->print_cr("[%u] we were successful with region = "
2999 PTR_FORMAT, worker_id, p2i(curr_region));
3000 }
3002 if (limit > bottom) {
3003 if (verbose_low()) {
3004 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
3005 "returning it ", worker_id, p2i(curr_region));
3006 }
3007 return curr_region;
3008 } else {
3009 assert(limit == bottom,
3010 "the region limit should be at bottom");
3011 if (verbose_low()) {
3012 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
3013 "returning NULL", worker_id, p2i(curr_region));
3014 }
3015 // we return NULL and the caller should try calling
3016 // claim_region() again.
3017 return NULL;
3018 }
3019 } else {
3020 assert(_finger > finger, "the finger should have moved forward");
3021 if (verbose_low()) {
3022 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3023 "global finger = "PTR_FORMAT", "
3024 "our finger = "PTR_FORMAT,
3025 worker_id, p2i(_finger), p2i(finger));
3026 }
3028 // read it again
3029 finger = _finger;
3030 }
3031 }
3033 return NULL;
3034 }
3036 #ifndef PRODUCT
3037 enum VerifyNoCSetOopsPhase {
3038 VerifyNoCSetOopsStack,
3039 VerifyNoCSetOopsQueues,
3040 VerifyNoCSetOopsSATBCompleted,
3041 VerifyNoCSetOopsSATBThread
3042 };
3044 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
3045 private:
3046 G1CollectedHeap* _g1h;
3047 VerifyNoCSetOopsPhase _phase;
3048 int _info;
3050 const char* phase_str() {
3051 switch (_phase) {
3052 case VerifyNoCSetOopsStack: return "Stack";
3053 case VerifyNoCSetOopsQueues: return "Queue";
3054 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
3055 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
3056 default: ShouldNotReachHere();
3057 }
3058 return NULL;
3059 }
3061 void do_object_work(oop obj) {
3062 guarantee(!_g1h->obj_in_cs(obj),
3063 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3064 p2i((void*) obj), phase_str(), _info));
3065 }
3067 public:
3068 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3070 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3071 _phase = phase;
3072 _info = info;
3073 }
3075 virtual void do_oop(oop* p) {
3076 oop obj = oopDesc::load_decode_heap_oop(p);
3077 do_object_work(obj);
3078 }
3080 virtual void do_oop(narrowOop* p) {
3081 // We should not come across narrow oops while scanning marking
3082 // stacks and SATB buffers.
3083 ShouldNotReachHere();
3084 }
3086 virtual void do_object(oop obj) {
3087 do_object_work(obj);
3088 }
3089 };
3091 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
3092 bool verify_enqueued_buffers,
3093 bool verify_thread_buffers,
3094 bool verify_fingers) {
3095 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3096 if (!G1CollectedHeap::heap()->mark_in_progress()) {
3097 return;
3098 }
3100 VerifyNoCSetOopsClosure cl;
3102 if (verify_stacks) {
3103 // Verify entries on the global mark stack
3104 cl.set_phase(VerifyNoCSetOopsStack);
3105 _markStack.oops_do(&cl);
3107 // Verify entries on the task queues
3108 for (uint i = 0; i < _max_worker_id; i += 1) {
3109 cl.set_phase(VerifyNoCSetOopsQueues, i);
3110 CMTaskQueue* queue = _task_queues->queue(i);
3111 queue->oops_do(&cl);
3112 }
3113 }
3115 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
3117 // Verify entries on the enqueued SATB buffers
3118 if (verify_enqueued_buffers) {
3119 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
3120 satb_qs.iterate_completed_buffers_read_only(&cl);
3121 }
3123 // Verify entries on the per-thread SATB buffers
3124 if (verify_thread_buffers) {
3125 cl.set_phase(VerifyNoCSetOopsSATBThread);
3126 satb_qs.iterate_thread_buffers_read_only(&cl);
3127 }
3129 if (verify_fingers) {
3130 // Verify the global finger
3131 HeapWord* global_finger = finger();
3132 if (global_finger != NULL && global_finger < _heap_end) {
3133 // The global finger always points to a heap region boundary. We
3134 // use heap_region_containing_raw() to get the containing region
3135 // given that the global finger could be pointing to a free region
3136 // which subsequently becomes continues humongous. If that
3137 // happens, heap_region_containing() will return the bottom of the
3138 // corresponding starts humongous region and the check below will
3139 // not hold any more.
3140 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3141 guarantee(global_finger == global_hr->bottom(),
3142 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3143 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3144 }
3146 // Verify the task fingers
3147 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3148 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3149 CMTask* task = _tasks[i];
3150 HeapWord* task_finger = task->finger();
3151 if (task_finger != NULL && task_finger < _heap_end) {
3152 // See above note on the global finger verification.
3153 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3154 guarantee(task_finger == task_hr->bottom() ||
3155 !task_hr->in_collection_set(),
3156 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3157 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3158 }
3159 }
3160 }
3161 }
3162 #endif // PRODUCT
3164 // Aggregate the counting data that was constructed concurrently
3165 // with marking.
3166 class AggregateCountDataHRClosure: public HeapRegionClosure {
3167 G1CollectedHeap* _g1h;
3168 ConcurrentMark* _cm;
3169 CardTableModRefBS* _ct_bs;
3170 BitMap* _cm_card_bm;
3171 uint _max_worker_id;
3173 public:
3174 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3175 BitMap* cm_card_bm,
3176 uint max_worker_id) :
3177 _g1h(g1h), _cm(g1h->concurrent_mark()),
3178 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3179 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3181 bool doHeapRegion(HeapRegion* hr) {
3182 if (hr->continuesHumongous()) {
3183 // We will ignore these here and process them when their
3184 // associated "starts humongous" region is processed.
3185 // Note that we cannot rely on their associated
3186 // "starts humongous" region to have their bit set to 1
3187 // since, due to the region chunking in the parallel region
3188 // iteration, a "continues humongous" region might be visited
3189 // before its associated "starts humongous".
3190 return false;
3191 }
3193 HeapWord* start = hr->bottom();
3194 HeapWord* limit = hr->next_top_at_mark_start();
3195 HeapWord* end = hr->end();
3197 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3198 err_msg("Preconditions not met - "
3199 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3200 "top: "PTR_FORMAT", end: "PTR_FORMAT,
3201 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3203 assert(hr->next_marked_bytes() == 0, "Precondition");
3205 if (start == limit) {
3206 // NTAMS of this region has not been set so nothing to do.
3207 return false;
3208 }
3210 // 'start' should be in the heap.
3211 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3212 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3213 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3215 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3216 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3217 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3219 // If ntams is not card aligned then we bump card bitmap index
3220 // for limit so that we get the all the cards spanned by
3221 // the object ending at ntams.
3222 // Note: if this is the last region in the heap then ntams
3223 // could be actually just beyond the end of the the heap;
3224 // limit_idx will then correspond to a (non-existent) card
3225 // that is also outside the heap.
3226 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3227 limit_idx += 1;
3228 }
3230 assert(limit_idx <= end_idx, "or else use atomics");
3232 // Aggregate the "stripe" in the count data associated with hr.
3233 uint hrs_index = hr->hrs_index();
3234 size_t marked_bytes = 0;
3236 for (uint i = 0; i < _max_worker_id; i += 1) {
3237 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3238 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3240 // Fetch the marked_bytes in this region for task i and
3241 // add it to the running total for this region.
3242 marked_bytes += marked_bytes_array[hrs_index];
3244 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3245 // into the global card bitmap.
3246 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3248 while (scan_idx < limit_idx) {
3249 assert(task_card_bm->at(scan_idx) == true, "should be");
3250 _cm_card_bm->set_bit(scan_idx);
3251 assert(_cm_card_bm->at(scan_idx) == true, "should be");
3253 // BitMap::get_next_one_offset() can handle the case when
3254 // its left_offset parameter is greater than its right_offset
3255 // parameter. It does, however, have an early exit if
3256 // left_offset == right_offset. So let's limit the value
3257 // passed in for left offset here.
3258 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3259 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3260 }
3261 }
3263 // Update the marked bytes for this region.
3264 hr->add_to_marked_bytes(marked_bytes);
3266 // Next heap region
3267 return false;
3268 }
3269 };
3271 class G1AggregateCountDataTask: public AbstractGangTask {
3272 protected:
3273 G1CollectedHeap* _g1h;
3274 ConcurrentMark* _cm;
3275 BitMap* _cm_card_bm;
3276 uint _max_worker_id;
3277 int _active_workers;
3279 public:
3280 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3281 ConcurrentMark* cm,
3282 BitMap* cm_card_bm,
3283 uint max_worker_id,
3284 int n_workers) :
3285 AbstractGangTask("Count Aggregation"),
3286 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3287 _max_worker_id(max_worker_id),
3288 _active_workers(n_workers) { }
3290 void work(uint worker_id) {
3291 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3293 if (G1CollectedHeap::use_parallel_gc_threads()) {
3294 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3295 _active_workers,
3296 HeapRegion::AggregateCountClaimValue);
3297 } else {
3298 _g1h->heap_region_iterate(&cl);
3299 }
3300 }
3301 };
3304 void ConcurrentMark::aggregate_count_data() {
3305 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3306 _g1h->workers()->active_workers() :
3307 1);
3309 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3310 _max_worker_id, n_workers);
3312 if (G1CollectedHeap::use_parallel_gc_threads()) {
3313 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3314 "sanity check");
3315 _g1h->set_par_threads(n_workers);
3316 _g1h->workers()->run_task(&g1_par_agg_task);
3317 _g1h->set_par_threads(0);
3319 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3320 "sanity check");
3321 _g1h->reset_heap_region_claim_values();
3322 } else {
3323 g1_par_agg_task.work(0);
3324 }
3325 }
3327 // Clear the per-worker arrays used to store the per-region counting data
3328 void ConcurrentMark::clear_all_count_data() {
3329 // Clear the global card bitmap - it will be filled during
3330 // liveness count aggregation (during remark) and the
3331 // final counting task.
3332 _card_bm.clear();
3334 // Clear the global region bitmap - it will be filled as part
3335 // of the final counting task.
3336 _region_bm.clear();
3338 uint max_regions = _g1h->max_regions();
3339 assert(_max_worker_id > 0, "uninitialized");
3341 for (uint i = 0; i < _max_worker_id; i += 1) {
3342 BitMap* task_card_bm = count_card_bitmap_for(i);
3343 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3345 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3346 assert(marked_bytes_array != NULL, "uninitialized");
3348 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3349 task_card_bm->clear();
3350 }
3351 }
3353 void ConcurrentMark::print_stats() {
3354 if (verbose_stats()) {
3355 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3356 for (size_t i = 0; i < _active_tasks; ++i) {
3357 _tasks[i]->print_stats();
3358 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3359 }
3360 }
3361 }
3363 // abandon current marking iteration due to a Full GC
3364 void ConcurrentMark::abort() {
3365 // Clear all marks to force marking thread to do nothing
3366 _nextMarkBitMap->clearAll();
3368 // Note we cannot clear the previous marking bitmap here
3369 // since VerifyDuringGC verifies the objects marked during
3370 // a full GC against the previous bitmap.
3372 // Clear the liveness counting data
3373 clear_all_count_data();
3374 // Empty mark stack
3375 reset_marking_state();
3376 for (uint i = 0; i < _max_worker_id; ++i) {
3377 _tasks[i]->clear_region_fields();
3378 }
3379 _first_overflow_barrier_sync.abort();
3380 _second_overflow_barrier_sync.abort();
3381 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3382 if (!gc_id.is_undefined()) {
3383 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3384 // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3385 _aborted_gc_id = gc_id;
3386 }
3387 _has_aborted = true;
3389 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3390 satb_mq_set.abandon_partial_marking();
3391 // This can be called either during or outside marking, we'll read
3392 // the expected_active value from the SATB queue set.
3393 satb_mq_set.set_active_all_threads(
3394 false, /* new active value */
3395 satb_mq_set.is_active() /* expected_active */);
3397 _g1h->trace_heap_after_concurrent_cycle();
3398 _g1h->register_concurrent_cycle_end();
3399 }
3401 const GCId& ConcurrentMark::concurrent_gc_id() {
3402 if (has_aborted()) {
3403 return _aborted_gc_id;
3404 }
3405 return _g1h->gc_tracer_cm()->gc_id();
3406 }
3408 static void print_ms_time_info(const char* prefix, const char* name,
3409 NumberSeq& ns) {
3410 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3411 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3412 if (ns.num() > 0) {
3413 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
3414 prefix, ns.sd(), ns.maximum());
3415 }
3416 }
3418 void ConcurrentMark::print_summary_info() {
3419 gclog_or_tty->print_cr(" Concurrent marking:");
3420 print_ms_time_info(" ", "init marks", _init_times);
3421 print_ms_time_info(" ", "remarks", _remark_times);
3422 {
3423 print_ms_time_info(" ", "final marks", _remark_mark_times);
3424 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
3426 }
3427 print_ms_time_info(" ", "cleanups", _cleanup_times);
3428 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
3429 _total_counting_time,
3430 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3431 (double)_cleanup_times.num()
3432 : 0.0));
3433 if (G1ScrubRemSets) {
3434 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
3435 _total_rs_scrub_time,
3436 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3437 (double)_cleanup_times.num()
3438 : 0.0));
3439 }
3440 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
3441 (_init_times.sum() + _remark_times.sum() +
3442 _cleanup_times.sum())/1000.0);
3443 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
3444 "(%8.2f s marking).",
3445 cmThread()->vtime_accum(),
3446 cmThread()->vtime_mark_accum());
3447 }
3449 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3450 if (use_parallel_marking_threads()) {
3451 _parallel_workers->print_worker_threads_on(st);
3452 }
3453 }
3455 void ConcurrentMark::print_on_error(outputStream* st) const {
3456 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3457 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3458 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3459 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3460 }
3462 // We take a break if someone is trying to stop the world.
3463 bool ConcurrentMark::do_yield_check(uint worker_id) {
3464 if (SuspendibleThreadSet::should_yield()) {
3465 if (worker_id == 0) {
3466 _g1h->g1_policy()->record_concurrent_pause();
3467 }
3468 SuspendibleThreadSet::yield();
3469 return true;
3470 } else {
3471 return false;
3472 }
3473 }
3475 bool ConcurrentMark::containing_card_is_marked(void* p) {
3476 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3477 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3478 }
3480 bool ConcurrentMark::containing_cards_are_marked(void* start,
3481 void* last) {
3482 return containing_card_is_marked(start) &&
3483 containing_card_is_marked(last);
3484 }
3486 #ifndef PRODUCT
3487 // for debugging purposes
3488 void ConcurrentMark::print_finger() {
3489 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3490 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3491 for (uint i = 0; i < _max_worker_id; ++i) {
3492 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3493 }
3494 gclog_or_tty->cr();
3495 }
3496 #endif
3498 void CMTask::scan_object(oop obj) {
3499 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3501 if (_cm->verbose_high()) {
3502 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3503 _worker_id, p2i((void*) obj));
3504 }
3506 size_t obj_size = obj->size();
3507 _words_scanned += obj_size;
3509 obj->oop_iterate(_cm_oop_closure);
3510 statsOnly( ++_objs_scanned );
3511 check_limits();
3512 }
3514 // Closure for iteration over bitmaps
3515 class CMBitMapClosure : public BitMapClosure {
3516 private:
3517 // the bitmap that is being iterated over
3518 CMBitMap* _nextMarkBitMap;
3519 ConcurrentMark* _cm;
3520 CMTask* _task;
3522 public:
3523 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3524 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3526 bool do_bit(size_t offset) {
3527 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3528 assert(_nextMarkBitMap->isMarked(addr), "invariant");
3529 assert( addr < _cm->finger(), "invariant");
3531 statsOnly( _task->increase_objs_found_on_bitmap() );
3532 assert(addr >= _task->finger(), "invariant");
3534 // We move that task's local finger along.
3535 _task->move_finger_to(addr);
3537 _task->scan_object(oop(addr));
3538 // we only partially drain the local queue and global stack
3539 _task->drain_local_queue(true);
3540 _task->drain_global_stack(true);
3542 // if the has_aborted flag has been raised, we need to bail out of
3543 // the iteration
3544 return !_task->has_aborted();
3545 }
3546 };
3548 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3549 ConcurrentMark* cm,
3550 CMTask* task)
3551 : _g1h(g1h), _cm(cm), _task(task) {
3552 assert(_ref_processor == NULL, "should be initialized to NULL");
3554 if (G1UseConcMarkReferenceProcessing) {
3555 _ref_processor = g1h->ref_processor_cm();
3556 assert(_ref_processor != NULL, "should not be NULL");
3557 }
3558 }
3560 void CMTask::setup_for_region(HeapRegion* hr) {
3561 // Separated the asserts so that we know which one fires.
3562 assert(hr != NULL,
3563 "claim_region() should have filtered out continues humongous regions");
3564 assert(!hr->continuesHumongous(),
3565 "claim_region() should have filtered out continues humongous regions");
3567 if (_cm->verbose_low()) {
3568 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3569 _worker_id, p2i(hr));
3570 }
3572 _curr_region = hr;
3573 _finger = hr->bottom();
3574 update_region_limit();
3575 }
3577 void CMTask::update_region_limit() {
3578 HeapRegion* hr = _curr_region;
3579 HeapWord* bottom = hr->bottom();
3580 HeapWord* limit = hr->next_top_at_mark_start();
3582 if (limit == bottom) {
3583 if (_cm->verbose_low()) {
3584 gclog_or_tty->print_cr("[%u] found an empty region "
3585 "["PTR_FORMAT", "PTR_FORMAT")",
3586 _worker_id, p2i(bottom), p2i(limit));
3587 }
3588 // The region was collected underneath our feet.
3589 // We set the finger to bottom to ensure that the bitmap
3590 // iteration that will follow this will not do anything.
3591 // (this is not a condition that holds when we set the region up,
3592 // as the region is not supposed to be empty in the first place)
3593 _finger = bottom;
3594 } else if (limit >= _region_limit) {
3595 assert(limit >= _finger, "peace of mind");
3596 } else {
3597 assert(limit < _region_limit, "only way to get here");
3598 // This can happen under some pretty unusual circumstances. An
3599 // evacuation pause empties the region underneath our feet (NTAMS
3600 // at bottom). We then do some allocation in the region (NTAMS
3601 // stays at bottom), followed by the region being used as a GC
3602 // alloc region (NTAMS will move to top() and the objects
3603 // originally below it will be grayed). All objects now marked in
3604 // the region are explicitly grayed, if below the global finger,
3605 // and we do not need in fact to scan anything else. So, we simply
3606 // set _finger to be limit to ensure that the bitmap iteration
3607 // doesn't do anything.
3608 _finger = limit;
3609 }
3611 _region_limit = limit;
3612 }
3614 void CMTask::giveup_current_region() {
3615 assert(_curr_region != NULL, "invariant");
3616 if (_cm->verbose_low()) {
3617 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3618 _worker_id, p2i(_curr_region));
3619 }
3620 clear_region_fields();
3621 }
3623 void CMTask::clear_region_fields() {
3624 // Values for these three fields that indicate that we're not
3625 // holding on to a region.
3626 _curr_region = NULL;
3627 _finger = NULL;
3628 _region_limit = NULL;
3629 }
3631 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3632 if (cm_oop_closure == NULL) {
3633 assert(_cm_oop_closure != NULL, "invariant");
3634 } else {
3635 assert(_cm_oop_closure == NULL, "invariant");
3636 }
3637 _cm_oop_closure = cm_oop_closure;
3638 }
3640 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3641 guarantee(nextMarkBitMap != NULL, "invariant");
3643 if (_cm->verbose_low()) {
3644 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3645 }
3647 _nextMarkBitMap = nextMarkBitMap;
3648 clear_region_fields();
3650 _calls = 0;
3651 _elapsed_time_ms = 0.0;
3652 _termination_time_ms = 0.0;
3653 _termination_start_time_ms = 0.0;
3655 #if _MARKING_STATS_
3656 _local_pushes = 0;
3657 _local_pops = 0;
3658 _local_max_size = 0;
3659 _objs_scanned = 0;
3660 _global_pushes = 0;
3661 _global_pops = 0;
3662 _global_max_size = 0;
3663 _global_transfers_to = 0;
3664 _global_transfers_from = 0;
3665 _regions_claimed = 0;
3666 _objs_found_on_bitmap = 0;
3667 _satb_buffers_processed = 0;
3668 _steal_attempts = 0;
3669 _steals = 0;
3670 _aborted = 0;
3671 _aborted_overflow = 0;
3672 _aborted_cm_aborted = 0;
3673 _aborted_yield = 0;
3674 _aborted_timed_out = 0;
3675 _aborted_satb = 0;
3676 _aborted_termination = 0;
3677 #endif // _MARKING_STATS_
3678 }
3680 bool CMTask::should_exit_termination() {
3681 regular_clock_call();
3682 // This is called when we are in the termination protocol. We should
3683 // quit if, for some reason, this task wants to abort or the global
3684 // stack is not empty (this means that we can get work from it).
3685 return !_cm->mark_stack_empty() || has_aborted();
3686 }
3688 void CMTask::reached_limit() {
3689 assert(_words_scanned >= _words_scanned_limit ||
3690 _refs_reached >= _refs_reached_limit ,
3691 "shouldn't have been called otherwise");
3692 regular_clock_call();
3693 }
3695 void CMTask::regular_clock_call() {
3696 if (has_aborted()) return;
3698 // First, we need to recalculate the words scanned and refs reached
3699 // limits for the next clock call.
3700 recalculate_limits();
3702 // During the regular clock call we do the following
3704 // (1) If an overflow has been flagged, then we abort.
3705 if (_cm->has_overflown()) {
3706 set_has_aborted();
3707 return;
3708 }
3710 // If we are not concurrent (i.e. we're doing remark) we don't need
3711 // to check anything else. The other steps are only needed during
3712 // the concurrent marking phase.
3713 if (!concurrent()) return;
3715 // (2) If marking has been aborted for Full GC, then we also abort.
3716 if (_cm->has_aborted()) {
3717 set_has_aborted();
3718 statsOnly( ++_aborted_cm_aborted );
3719 return;
3720 }
3722 double curr_time_ms = os::elapsedVTime() * 1000.0;
3724 // (3) If marking stats are enabled, then we update the step history.
3725 #if _MARKING_STATS_
3726 if (_words_scanned >= _words_scanned_limit) {
3727 ++_clock_due_to_scanning;
3728 }
3729 if (_refs_reached >= _refs_reached_limit) {
3730 ++_clock_due_to_marking;
3731 }
3733 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3734 _interval_start_time_ms = curr_time_ms;
3735 _all_clock_intervals_ms.add(last_interval_ms);
3737 if (_cm->verbose_medium()) {
3738 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3739 "scanned = %d%s, refs reached = %d%s",
3740 _worker_id, last_interval_ms,
3741 _words_scanned,
3742 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3743 _refs_reached,
3744 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3745 }
3746 #endif // _MARKING_STATS_
3748 // (4) We check whether we should yield. If we have to, then we abort.
3749 if (SuspendibleThreadSet::should_yield()) {
3750 // We should yield. To do this we abort the task. The caller is
3751 // responsible for yielding.
3752 set_has_aborted();
3753 statsOnly( ++_aborted_yield );
3754 return;
3755 }
3757 // (5) We check whether we've reached our time quota. If we have,
3758 // then we abort.
3759 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3760 if (elapsed_time_ms > _time_target_ms) {
3761 set_has_aborted();
3762 _has_timed_out = true;
3763 statsOnly( ++_aborted_timed_out );
3764 return;
3765 }
3767 // (6) Finally, we check whether there are enough completed STAB
3768 // buffers available for processing. If there are, we abort.
3769 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3770 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3771 if (_cm->verbose_low()) {
3772 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3773 _worker_id);
3774 }
3775 // we do need to process SATB buffers, we'll abort and restart
3776 // the marking task to do so
3777 set_has_aborted();
3778 statsOnly( ++_aborted_satb );
3779 return;
3780 }
3781 }
3783 void CMTask::recalculate_limits() {
3784 _real_words_scanned_limit = _words_scanned + words_scanned_period;
3785 _words_scanned_limit = _real_words_scanned_limit;
3787 _real_refs_reached_limit = _refs_reached + refs_reached_period;
3788 _refs_reached_limit = _real_refs_reached_limit;
3789 }
3791 void CMTask::decrease_limits() {
3792 // This is called when we believe that we're going to do an infrequent
3793 // operation which will increase the per byte scanned cost (i.e. move
3794 // entries to/from the global stack). It basically tries to decrease the
3795 // scanning limit so that the clock is called earlier.
3797 if (_cm->verbose_medium()) {
3798 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3799 }
3801 _words_scanned_limit = _real_words_scanned_limit -
3802 3 * words_scanned_period / 4;
3803 _refs_reached_limit = _real_refs_reached_limit -
3804 3 * refs_reached_period / 4;
3805 }
3807 void CMTask::move_entries_to_global_stack() {
3808 // local array where we'll store the entries that will be popped
3809 // from the local queue
3810 oop buffer[global_stack_transfer_size];
3812 int n = 0;
3813 oop obj;
3814 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3815 buffer[n] = obj;
3816 ++n;
3817 }
3819 if (n > 0) {
3820 // we popped at least one entry from the local queue
3822 statsOnly( ++_global_transfers_to; _local_pops += n );
3824 if (!_cm->mark_stack_push(buffer, n)) {
3825 if (_cm->verbose_low()) {
3826 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3827 _worker_id);
3828 }
3829 set_has_aborted();
3830 } else {
3831 // the transfer was successful
3833 if (_cm->verbose_medium()) {
3834 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3835 _worker_id, n);
3836 }
3837 statsOnly( int tmp_size = _cm->mark_stack_size();
3838 if (tmp_size > _global_max_size) {
3839 _global_max_size = tmp_size;
3840 }
3841 _global_pushes += n );
3842 }
3843 }
3845 // this operation was quite expensive, so decrease the limits
3846 decrease_limits();
3847 }
3849 void CMTask::get_entries_from_global_stack() {
3850 // local array where we'll store the entries that will be popped
3851 // from the global stack.
3852 oop buffer[global_stack_transfer_size];
3853 int n;
3854 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3855 assert(n <= global_stack_transfer_size,
3856 "we should not pop more than the given limit");
3857 if (n > 0) {
3858 // yes, we did actually pop at least one entry
3860 statsOnly( ++_global_transfers_from; _global_pops += n );
3861 if (_cm->verbose_medium()) {
3862 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3863 _worker_id, n);
3864 }
3865 for (int i = 0; i < n; ++i) {
3866 bool success = _task_queue->push(buffer[i]);
3867 // We only call this when the local queue is empty or under a
3868 // given target limit. So, we do not expect this push to fail.
3869 assert(success, "invariant");
3870 }
3872 statsOnly( int tmp_size = _task_queue->size();
3873 if (tmp_size > _local_max_size) {
3874 _local_max_size = tmp_size;
3875 }
3876 _local_pushes += n );
3877 }
3879 // this operation was quite expensive, so decrease the limits
3880 decrease_limits();
3881 }
3883 void CMTask::drain_local_queue(bool partially) {
3884 if (has_aborted()) return;
3886 // Decide what the target size is, depending whether we're going to
3887 // drain it partially (so that other tasks can steal if they run out
3888 // of things to do) or totally (at the very end).
3889 size_t target_size;
3890 if (partially) {
3891 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3892 } else {
3893 target_size = 0;
3894 }
3896 if (_task_queue->size() > target_size) {
3897 if (_cm->verbose_high()) {
3898 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3899 _worker_id, target_size);
3900 }
3902 oop obj;
3903 bool ret = _task_queue->pop_local(obj);
3904 while (ret) {
3905 statsOnly( ++_local_pops );
3907 if (_cm->verbose_high()) {
3908 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3909 p2i((void*) obj));
3910 }
3912 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3913 assert(!_g1h->is_on_master_free_list(
3914 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3916 scan_object(obj);
3918 if (_task_queue->size() <= target_size || has_aborted()) {
3919 ret = false;
3920 } else {
3921 ret = _task_queue->pop_local(obj);
3922 }
3923 }
3925 if (_cm->verbose_high()) {
3926 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3927 _worker_id, _task_queue->size());
3928 }
3929 }
3930 }
3932 void CMTask::drain_global_stack(bool partially) {
3933 if (has_aborted()) return;
3935 // We have a policy to drain the local queue before we attempt to
3936 // drain the global stack.
3937 assert(partially || _task_queue->size() == 0, "invariant");
3939 // Decide what the target size is, depending whether we're going to
3940 // drain it partially (so that other tasks can steal if they run out
3941 // of things to do) or totally (at the very end). Notice that,
3942 // because we move entries from the global stack in chunks or
3943 // because another task might be doing the same, we might in fact
3944 // drop below the target. But, this is not a problem.
3945 size_t target_size;
3946 if (partially) {
3947 target_size = _cm->partial_mark_stack_size_target();
3948 } else {
3949 target_size = 0;
3950 }
3952 if (_cm->mark_stack_size() > target_size) {
3953 if (_cm->verbose_low()) {
3954 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3955 _worker_id, target_size);
3956 }
3958 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3959 get_entries_from_global_stack();
3960 drain_local_queue(partially);
3961 }
3963 if (_cm->verbose_low()) {
3964 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3965 _worker_id, _cm->mark_stack_size());
3966 }
3967 }
3968 }
3970 // SATB Queue has several assumptions on whether to call the par or
3971 // non-par versions of the methods. this is why some of the code is
3972 // replicated. We should really get rid of the single-threaded version
3973 // of the code to simplify things.
3974 void CMTask::drain_satb_buffers() {
3975 if (has_aborted()) return;
3977 // We set this so that the regular clock knows that we're in the
3978 // middle of draining buffers and doesn't set the abort flag when it
3979 // notices that SATB buffers are available for draining. It'd be
3980 // very counter productive if it did that. :-)
3981 _draining_satb_buffers = true;
3983 CMObjectClosure oc(this);
3984 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3985 if (G1CollectedHeap::use_parallel_gc_threads()) {
3986 satb_mq_set.set_par_closure(_worker_id, &oc);
3987 } else {
3988 satb_mq_set.set_closure(&oc);
3989 }
3991 // This keeps claiming and applying the closure to completed buffers
3992 // until we run out of buffers or we need to abort.
3993 if (G1CollectedHeap::use_parallel_gc_threads()) {
3994 while (!has_aborted() &&
3995 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3996 if (_cm->verbose_medium()) {
3997 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3998 }
3999 statsOnly( ++_satb_buffers_processed );
4000 regular_clock_call();
4001 }
4002 } else {
4003 while (!has_aborted() &&
4004 satb_mq_set.apply_closure_to_completed_buffer()) {
4005 if (_cm->verbose_medium()) {
4006 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4007 }
4008 statsOnly( ++_satb_buffers_processed );
4009 regular_clock_call();
4010 }
4011 }
4013 _draining_satb_buffers = false;
4015 assert(has_aborted() ||
4016 concurrent() ||
4017 satb_mq_set.completed_buffers_num() == 0, "invariant");
4019 if (G1CollectedHeap::use_parallel_gc_threads()) {
4020 satb_mq_set.set_par_closure(_worker_id, NULL);
4021 } else {
4022 satb_mq_set.set_closure(NULL);
4023 }
4025 // again, this was a potentially expensive operation, decrease the
4026 // limits to get the regular clock call early
4027 decrease_limits();
4028 }
4030 void CMTask::print_stats() {
4031 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
4032 _worker_id, _calls);
4033 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
4034 _elapsed_time_ms, _termination_time_ms);
4035 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4036 _step_times_ms.num(), _step_times_ms.avg(),
4037 _step_times_ms.sd());
4038 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4039 _step_times_ms.maximum(), _step_times_ms.sum());
4041 #if _MARKING_STATS_
4042 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4043 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
4044 _all_clock_intervals_ms.sd());
4045 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4046 _all_clock_intervals_ms.maximum(),
4047 _all_clock_intervals_ms.sum());
4048 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
4049 _clock_due_to_scanning, _clock_due_to_marking);
4050 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
4051 _objs_scanned, _objs_found_on_bitmap);
4052 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
4053 _local_pushes, _local_pops, _local_max_size);
4054 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
4055 _global_pushes, _global_pops, _global_max_size);
4056 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
4057 _global_transfers_to,_global_transfers_from);
4058 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
4059 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
4060 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
4061 _steal_attempts, _steals);
4062 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
4063 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
4064 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
4065 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
4066 _aborted_timed_out, _aborted_satb, _aborted_termination);
4067 #endif // _MARKING_STATS_
4068 }
4070 /*****************************************************************************
4072 The do_marking_step(time_target_ms, ...) method is the building
4073 block of the parallel marking framework. It can be called in parallel
4074 with other invocations of do_marking_step() on different tasks
4075 (but only one per task, obviously) and concurrently with the
4076 mutator threads, or during remark, hence it eliminates the need
4077 for two versions of the code. When called during remark, it will
4078 pick up from where the task left off during the concurrent marking
4079 phase. Interestingly, tasks are also claimable during evacuation
4080 pauses too, since do_marking_step() ensures that it aborts before
4081 it needs to yield.
4083 The data structures that it uses to do marking work are the
4084 following:
4086 (1) Marking Bitmap. If there are gray objects that appear only
4087 on the bitmap (this happens either when dealing with an overflow
4088 or when the initial marking phase has simply marked the roots
4089 and didn't push them on the stack), then tasks claim heap
4090 regions whose bitmap they then scan to find gray objects. A
4091 global finger indicates where the end of the last claimed region
4092 is. A local finger indicates how far into the region a task has
4093 scanned. The two fingers are used to determine how to gray an
4094 object (i.e. whether simply marking it is OK, as it will be
4095 visited by a task in the future, or whether it needs to be also
4096 pushed on a stack).
4098 (2) Local Queue. The local queue of the task which is accessed
4099 reasonably efficiently by the task. Other tasks can steal from
4100 it when they run out of work. Throughout the marking phase, a
4101 task attempts to keep its local queue short but not totally
4102 empty, so that entries are available for stealing by other
4103 tasks. Only when there is no more work, a task will totally
4104 drain its local queue.
4106 (3) Global Mark Stack. This handles local queue overflow. During
4107 marking only sets of entries are moved between it and the local
4108 queues, as access to it requires a mutex and more fine-grain
4109 interaction with it which might cause contention. If it
4110 overflows, then the marking phase should restart and iterate
4111 over the bitmap to identify gray objects. Throughout the marking
4112 phase, tasks attempt to keep the global mark stack at a small
4113 length but not totally empty, so that entries are available for
4114 popping by other tasks. Only when there is no more work, tasks
4115 will totally drain the global mark stack.
4117 (4) SATB Buffer Queue. This is where completed SATB buffers are
4118 made available. Buffers are regularly removed from this queue
4119 and scanned for roots, so that the queue doesn't get too
4120 long. During remark, all completed buffers are processed, as
4121 well as the filled in parts of any uncompleted buffers.
4123 The do_marking_step() method tries to abort when the time target
4124 has been reached. There are a few other cases when the
4125 do_marking_step() method also aborts:
4127 (1) When the marking phase has been aborted (after a Full GC).
4129 (2) When a global overflow (on the global stack) has been
4130 triggered. Before the task aborts, it will actually sync up with
4131 the other tasks to ensure that all the marking data structures
4132 (local queues, stacks, fingers etc.) are re-initialized so that
4133 when do_marking_step() completes, the marking phase can
4134 immediately restart.
4136 (3) When enough completed SATB buffers are available. The
4137 do_marking_step() method only tries to drain SATB buffers right
4138 at the beginning. So, if enough buffers are available, the
4139 marking step aborts and the SATB buffers are processed at
4140 the beginning of the next invocation.
4142 (4) To yield. when we have to yield then we abort and yield
4143 right at the end of do_marking_step(). This saves us from a lot
4144 of hassle as, by yielding we might allow a Full GC. If this
4145 happens then objects will be compacted underneath our feet, the
4146 heap might shrink, etc. We save checking for this by just
4147 aborting and doing the yield right at the end.
4149 From the above it follows that the do_marking_step() method should
4150 be called in a loop (or, otherwise, regularly) until it completes.
4152 If a marking step completes without its has_aborted() flag being
4153 true, it means it has completed the current marking phase (and
4154 also all other marking tasks have done so and have all synced up).
4156 A method called regular_clock_call() is invoked "regularly" (in
4157 sub ms intervals) throughout marking. It is this clock method that
4158 checks all the abort conditions which were mentioned above and
4159 decides when the task should abort. A work-based scheme is used to
4160 trigger this clock method: when the number of object words the
4161 marking phase has scanned or the number of references the marking
4162 phase has visited reach a given limit. Additional invocations to
4163 the method clock have been planted in a few other strategic places
4164 too. The initial reason for the clock method was to avoid calling
4165 vtime too regularly, as it is quite expensive. So, once it was in
4166 place, it was natural to piggy-back all the other conditions on it
4167 too and not constantly check them throughout the code.
4169 If do_termination is true then do_marking_step will enter its
4170 termination protocol.
4172 The value of is_serial must be true when do_marking_step is being
4173 called serially (i.e. by the VMThread) and do_marking_step should
4174 skip any synchronization in the termination and overflow code.
4175 Examples include the serial remark code and the serial reference
4176 processing closures.
4178 The value of is_serial must be false when do_marking_step is
4179 being called by any of the worker threads in a work gang.
4180 Examples include the concurrent marking code (CMMarkingTask),
4181 the MT remark code, and the MT reference processing closures.
4183 *****************************************************************************/
4185 void CMTask::do_marking_step(double time_target_ms,
4186 bool do_termination,
4187 bool is_serial) {
4188 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4189 assert(concurrent() == _cm->concurrent(), "they should be the same");
4191 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4192 assert(_task_queues != NULL, "invariant");
4193 assert(_task_queue != NULL, "invariant");
4194 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4196 assert(!_claimed,
4197 "only one thread should claim this task at any one time");
4199 // OK, this doesn't safeguard again all possible scenarios, as it is
4200 // possible for two threads to set the _claimed flag at the same
4201 // time. But it is only for debugging purposes anyway and it will
4202 // catch most problems.
4203 _claimed = true;
4205 _start_time_ms = os::elapsedVTime() * 1000.0;
4206 statsOnly( _interval_start_time_ms = _start_time_ms );
4208 // If do_stealing is true then do_marking_step will attempt to
4209 // steal work from the other CMTasks. It only makes sense to
4210 // enable stealing when the termination protocol is enabled
4211 // and do_marking_step() is not being called serially.
4212 bool do_stealing = do_termination && !is_serial;
4214 double diff_prediction_ms =
4215 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4216 _time_target_ms = time_target_ms - diff_prediction_ms;
4218 // set up the variables that are used in the work-based scheme to
4219 // call the regular clock method
4220 _words_scanned = 0;
4221 _refs_reached = 0;
4222 recalculate_limits();
4224 // clear all flags
4225 clear_has_aborted();
4226 _has_timed_out = false;
4227 _draining_satb_buffers = false;
4229 ++_calls;
4231 if (_cm->verbose_low()) {
4232 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4233 "target = %1.2lfms >>>>>>>>>>",
4234 _worker_id, _calls, _time_target_ms);
4235 }
4237 // Set up the bitmap and oop closures. Anything that uses them is
4238 // eventually called from this method, so it is OK to allocate these
4239 // statically.
4240 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4241 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4242 set_cm_oop_closure(&cm_oop_closure);
4244 if (_cm->has_overflown()) {
4245 // This can happen if the mark stack overflows during a GC pause
4246 // and this task, after a yield point, restarts. We have to abort
4247 // as we need to get into the overflow protocol which happens
4248 // right at the end of this task.
4249 set_has_aborted();
4250 }
4252 // First drain any available SATB buffers. After this, we will not
4253 // look at SATB buffers before the next invocation of this method.
4254 // If enough completed SATB buffers are queued up, the regular clock
4255 // will abort this task so that it restarts.
4256 drain_satb_buffers();
4257 // ...then partially drain the local queue and the global stack
4258 drain_local_queue(true);
4259 drain_global_stack(true);
4261 do {
4262 if (!has_aborted() && _curr_region != NULL) {
4263 // This means that we're already holding on to a region.
4264 assert(_finger != NULL, "if region is not NULL, then the finger "
4265 "should not be NULL either");
4267 // We might have restarted this task after an evacuation pause
4268 // which might have evacuated the region we're holding on to
4269 // underneath our feet. Let's read its limit again to make sure
4270 // that we do not iterate over a region of the heap that
4271 // contains garbage (update_region_limit() will also move
4272 // _finger to the start of the region if it is found empty).
4273 update_region_limit();
4274 // We will start from _finger not from the start of the region,
4275 // as we might be restarting this task after aborting half-way
4276 // through scanning this region. In this case, _finger points to
4277 // the address where we last found a marked object. If this is a
4278 // fresh region, _finger points to start().
4279 MemRegion mr = MemRegion(_finger, _region_limit);
4281 if (_cm->verbose_low()) {
4282 gclog_or_tty->print_cr("[%u] we're scanning part "
4283 "["PTR_FORMAT", "PTR_FORMAT") "
4284 "of region "HR_FORMAT,
4285 _worker_id, p2i(_finger), p2i(_region_limit),
4286 HR_FORMAT_PARAMS(_curr_region));
4287 }
4289 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4290 "humongous regions should go around loop once only");
4292 // Some special cases:
4293 // If the memory region is empty, we can just give up the region.
4294 // If the current region is humongous then we only need to check
4295 // the bitmap for the bit associated with the start of the object,
4296 // scan the object if it's live, and give up the region.
4297 // Otherwise, let's iterate over the bitmap of the part of the region
4298 // that is left.
4299 // If the iteration is successful, give up the region.
4300 if (mr.is_empty()) {
4301 giveup_current_region();
4302 regular_clock_call();
4303 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4304 if (_nextMarkBitMap->isMarked(mr.start())) {
4305 // The object is marked - apply the closure
4306 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4307 bitmap_closure.do_bit(offset);
4308 }
4309 // Even if this task aborted while scanning the humongous object
4310 // we can (and should) give up the current region.
4311 giveup_current_region();
4312 regular_clock_call();
4313 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4314 giveup_current_region();
4315 regular_clock_call();
4316 } else {
4317 assert(has_aborted(), "currently the only way to do so");
4318 // The only way to abort the bitmap iteration is to return
4319 // false from the do_bit() method. However, inside the
4320 // do_bit() method we move the _finger to point to the
4321 // object currently being looked at. So, if we bail out, we
4322 // have definitely set _finger to something non-null.
4323 assert(_finger != NULL, "invariant");
4325 // Region iteration was actually aborted. So now _finger
4326 // points to the address of the object we last scanned. If we
4327 // leave it there, when we restart this task, we will rescan
4328 // the object. It is easy to avoid this. We move the finger by
4329 // enough to point to the next possible object header (the
4330 // bitmap knows by how much we need to move it as it knows its
4331 // granularity).
4332 assert(_finger < _region_limit, "invariant");
4333 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4334 // Check if bitmap iteration was aborted while scanning the last object
4335 if (new_finger >= _region_limit) {
4336 giveup_current_region();
4337 } else {
4338 move_finger_to(new_finger);
4339 }
4340 }
4341 }
4342 // At this point we have either completed iterating over the
4343 // region we were holding on to, or we have aborted.
4345 // We then partially drain the local queue and the global stack.
4346 // (Do we really need this?)
4347 drain_local_queue(true);
4348 drain_global_stack(true);
4350 // Read the note on the claim_region() method on why it might
4351 // return NULL with potentially more regions available for
4352 // claiming and why we have to check out_of_regions() to determine
4353 // whether we're done or not.
4354 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4355 // We are going to try to claim a new region. We should have
4356 // given up on the previous one.
4357 // Separated the asserts so that we know which one fires.
4358 assert(_curr_region == NULL, "invariant");
4359 assert(_finger == NULL, "invariant");
4360 assert(_region_limit == NULL, "invariant");
4361 if (_cm->verbose_low()) {
4362 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4363 }
4364 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4365 if (claimed_region != NULL) {
4366 // Yes, we managed to claim one
4367 statsOnly( ++_regions_claimed );
4369 if (_cm->verbose_low()) {
4370 gclog_or_tty->print_cr("[%u] we successfully claimed "
4371 "region "PTR_FORMAT,
4372 _worker_id, p2i(claimed_region));
4373 }
4375 setup_for_region(claimed_region);
4376 assert(_curr_region == claimed_region, "invariant");
4377 }
4378 // It is important to call the regular clock here. It might take
4379 // a while to claim a region if, for example, we hit a large
4380 // block of empty regions. So we need to call the regular clock
4381 // method once round the loop to make sure it's called
4382 // frequently enough.
4383 regular_clock_call();
4384 }
4386 if (!has_aborted() && _curr_region == NULL) {
4387 assert(_cm->out_of_regions(),
4388 "at this point we should be out of regions");
4389 }
4390 } while ( _curr_region != NULL && !has_aborted());
4392 if (!has_aborted()) {
4393 // We cannot check whether the global stack is empty, since other
4394 // tasks might be pushing objects to it concurrently.
4395 assert(_cm->out_of_regions(),
4396 "at this point we should be out of regions");
4398 if (_cm->verbose_low()) {
4399 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4400 }
4402 // Try to reduce the number of available SATB buffers so that
4403 // remark has less work to do.
4404 drain_satb_buffers();
4405 }
4407 // Since we've done everything else, we can now totally drain the
4408 // local queue and global stack.
4409 drain_local_queue(false);
4410 drain_global_stack(false);
4412 // Attempt at work stealing from other task's queues.
4413 if (do_stealing && !has_aborted()) {
4414 // We have not aborted. This means that we have finished all that
4415 // we could. Let's try to do some stealing...
4417 // We cannot check whether the global stack is empty, since other
4418 // tasks might be pushing objects to it concurrently.
4419 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4420 "only way to reach here");
4422 if (_cm->verbose_low()) {
4423 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4424 }
4426 while (!has_aborted()) {
4427 oop obj;
4428 statsOnly( ++_steal_attempts );
4430 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4431 if (_cm->verbose_medium()) {
4432 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4433 _worker_id, p2i((void*) obj));
4434 }
4436 statsOnly( ++_steals );
4438 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4439 "any stolen object should be marked");
4440 scan_object(obj);
4442 // And since we're towards the end, let's totally drain the
4443 // local queue and global stack.
4444 drain_local_queue(false);
4445 drain_global_stack(false);
4446 } else {
4447 break;
4448 }
4449 }
4450 }
4452 // If we are about to wrap up and go into termination, check if we
4453 // should raise the overflow flag.
4454 if (do_termination && !has_aborted()) {
4455 if (_cm->force_overflow()->should_force()) {
4456 _cm->set_has_overflown();
4457 regular_clock_call();
4458 }
4459 }
4461 // We still haven't aborted. Now, let's try to get into the
4462 // termination protocol.
4463 if (do_termination && !has_aborted()) {
4464 // We cannot check whether the global stack is empty, since other
4465 // tasks might be concurrently pushing objects on it.
4466 // Separated the asserts so that we know which one fires.
4467 assert(_cm->out_of_regions(), "only way to reach here");
4468 assert(_task_queue->size() == 0, "only way to reach here");
4470 if (_cm->verbose_low()) {
4471 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4472 }
4474 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4476 // The CMTask class also extends the TerminatorTerminator class,
4477 // hence its should_exit_termination() method will also decide
4478 // whether to exit the termination protocol or not.
4479 bool finished = (is_serial ||
4480 _cm->terminator()->offer_termination(this));
4481 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4482 _termination_time_ms +=
4483 termination_end_time_ms - _termination_start_time_ms;
4485 if (finished) {
4486 // We're all done.
4488 if (_worker_id == 0) {
4489 // let's allow task 0 to do this
4490 if (concurrent()) {
4491 assert(_cm->concurrent_marking_in_progress(), "invariant");
4492 // we need to set this to false before the next
4493 // safepoint. This way we ensure that the marking phase
4494 // doesn't observe any more heap expansions.
4495 _cm->clear_concurrent_marking_in_progress();
4496 }
4497 }
4499 // We can now guarantee that the global stack is empty, since
4500 // all other tasks have finished. We separated the guarantees so
4501 // that, if a condition is false, we can immediately find out
4502 // which one.
4503 guarantee(_cm->out_of_regions(), "only way to reach here");
4504 guarantee(_cm->mark_stack_empty(), "only way to reach here");
4505 guarantee(_task_queue->size() == 0, "only way to reach here");
4506 guarantee(!_cm->has_overflown(), "only way to reach here");
4507 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4509 if (_cm->verbose_low()) {
4510 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4511 }
4512 } else {
4513 // Apparently there's more work to do. Let's abort this task. It
4514 // will restart it and we can hopefully find more things to do.
4516 if (_cm->verbose_low()) {
4517 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4518 _worker_id);
4519 }
4521 set_has_aborted();
4522 statsOnly( ++_aborted_termination );
4523 }
4524 }
4526 // Mainly for debugging purposes to make sure that a pointer to the
4527 // closure which was statically allocated in this frame doesn't
4528 // escape it by accident.
4529 set_cm_oop_closure(NULL);
4530 double end_time_ms = os::elapsedVTime() * 1000.0;
4531 double elapsed_time_ms = end_time_ms - _start_time_ms;
4532 // Update the step history.
4533 _step_times_ms.add(elapsed_time_ms);
4535 if (has_aborted()) {
4536 // The task was aborted for some reason.
4538 statsOnly( ++_aborted );
4540 if (_has_timed_out) {
4541 double diff_ms = elapsed_time_ms - _time_target_ms;
4542 // Keep statistics of how well we did with respect to hitting
4543 // our target only if we actually timed out (if we aborted for
4544 // other reasons, then the results might get skewed).
4545 _marking_step_diffs_ms.add(diff_ms);
4546 }
4548 if (_cm->has_overflown()) {
4549 // This is the interesting one. We aborted because a global
4550 // overflow was raised. This means we have to restart the
4551 // marking phase and start iterating over regions. However, in
4552 // order to do this we have to make sure that all tasks stop
4553 // what they are doing and re-initialise in a safe manner. We
4554 // will achieve this with the use of two barrier sync points.
4556 if (_cm->verbose_low()) {
4557 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4558 }
4560 if (!is_serial) {
4561 // We only need to enter the sync barrier if being called
4562 // from a parallel context
4563 _cm->enter_first_sync_barrier(_worker_id);
4565 // When we exit this sync barrier we know that all tasks have
4566 // stopped doing marking work. So, it's now safe to
4567 // re-initialise our data structures. At the end of this method,
4568 // task 0 will clear the global data structures.
4569 }
4571 statsOnly( ++_aborted_overflow );
4573 // We clear the local state of this task...
4574 clear_region_fields();
4576 if (!is_serial) {
4577 // ...and enter the second barrier.
4578 _cm->enter_second_sync_barrier(_worker_id);
4579 }
4580 // At this point, if we're during the concurrent phase of
4581 // marking, everything has been re-initialized and we're
4582 // ready to restart.
4583 }
4585 if (_cm->verbose_low()) {
4586 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4587 "elapsed = %1.2lfms <<<<<<<<<<",
4588 _worker_id, _time_target_ms, elapsed_time_ms);
4589 if (_cm->has_aborted()) {
4590 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4591 _worker_id);
4592 }
4593 }
4594 } else {
4595 if (_cm->verbose_low()) {
4596 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4597 "elapsed = %1.2lfms <<<<<<<<<<",
4598 _worker_id, _time_target_ms, elapsed_time_ms);
4599 }
4600 }
4602 _claimed = false;
4603 }
4605 CMTask::CMTask(uint worker_id,
4606 ConcurrentMark* cm,
4607 size_t* marked_bytes,
4608 BitMap* card_bm,
4609 CMTaskQueue* task_queue,
4610 CMTaskQueueSet* task_queues)
4611 : _g1h(G1CollectedHeap::heap()),
4612 _worker_id(worker_id), _cm(cm),
4613 _claimed(false),
4614 _nextMarkBitMap(NULL), _hash_seed(17),
4615 _task_queue(task_queue),
4616 _task_queues(task_queues),
4617 _cm_oop_closure(NULL),
4618 _marked_bytes_array(marked_bytes),
4619 _card_bm(card_bm) {
4620 guarantee(task_queue != NULL, "invariant");
4621 guarantee(task_queues != NULL, "invariant");
4623 statsOnly( _clock_due_to_scanning = 0;
4624 _clock_due_to_marking = 0 );
4626 _marking_step_diffs_ms.add(0.5);
4627 }
4629 // These are formatting macros that are used below to ensure
4630 // consistent formatting. The *_H_* versions are used to format the
4631 // header for a particular value and they should be kept consistent
4632 // with the corresponding macro. Also note that most of the macros add
4633 // the necessary white space (as a prefix) which makes them a bit
4634 // easier to compose.
4636 // All the output lines are prefixed with this string to be able to
4637 // identify them easily in a large log file.
4638 #define G1PPRL_LINE_PREFIX "###"
4640 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4641 #ifdef _LP64
4642 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4643 #else // _LP64
4644 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4645 #endif // _LP64
4647 // For per-region info
4648 #define G1PPRL_TYPE_FORMAT " %-4s"
4649 #define G1PPRL_TYPE_H_FORMAT " %4s"
4650 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4651 #define G1PPRL_BYTE_H_FORMAT " %9s"
4652 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4653 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4655 // For summary info
4656 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4657 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4658 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4659 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4661 G1PrintRegionLivenessInfoClosure::
4662 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4663 : _out(out),
4664 _total_used_bytes(0), _total_capacity_bytes(0),
4665 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4666 _hum_used_bytes(0), _hum_capacity_bytes(0),
4667 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4668 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4669 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4670 MemRegion g1_committed = g1h->g1_committed();
4671 MemRegion g1_reserved = g1h->g1_reserved();
4672 double now = os::elapsedTime();
4674 // Print the header of the output.
4675 _out->cr();
4676 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4677 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4678 G1PPRL_SUM_ADDR_FORMAT("committed")
4679 G1PPRL_SUM_ADDR_FORMAT("reserved")
4680 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4681 p2i(g1_committed.start()), p2i(g1_committed.end()),
4682 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4683 HeapRegion::GrainBytes);
4684 _out->print_cr(G1PPRL_LINE_PREFIX);
4685 _out->print_cr(G1PPRL_LINE_PREFIX
4686 G1PPRL_TYPE_H_FORMAT
4687 G1PPRL_ADDR_BASE_H_FORMAT
4688 G1PPRL_BYTE_H_FORMAT
4689 G1PPRL_BYTE_H_FORMAT
4690 G1PPRL_BYTE_H_FORMAT
4691 G1PPRL_DOUBLE_H_FORMAT
4692 G1PPRL_BYTE_H_FORMAT
4693 G1PPRL_BYTE_H_FORMAT,
4694 "type", "address-range",
4695 "used", "prev-live", "next-live", "gc-eff",
4696 "remset", "code-roots");
4697 _out->print_cr(G1PPRL_LINE_PREFIX
4698 G1PPRL_TYPE_H_FORMAT
4699 G1PPRL_ADDR_BASE_H_FORMAT
4700 G1PPRL_BYTE_H_FORMAT
4701 G1PPRL_BYTE_H_FORMAT
4702 G1PPRL_BYTE_H_FORMAT
4703 G1PPRL_DOUBLE_H_FORMAT
4704 G1PPRL_BYTE_H_FORMAT
4705 G1PPRL_BYTE_H_FORMAT,
4706 "", "",
4707 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4708 "(bytes)", "(bytes)");
4709 }
4711 // It takes as a parameter a reference to one of the _hum_* fields, it
4712 // deduces the corresponding value for a region in a humongous region
4713 // series (either the region size, or what's left if the _hum_* field
4714 // is < the region size), and updates the _hum_* field accordingly.
4715 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4716 size_t bytes = 0;
4717 // The > 0 check is to deal with the prev and next live bytes which
4718 // could be 0.
4719 if (*hum_bytes > 0) {
4720 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4721 *hum_bytes -= bytes;
4722 }
4723 return bytes;
4724 }
4726 // It deduces the values for a region in a humongous region series
4727 // from the _hum_* fields and updates those accordingly. It assumes
4728 // that that _hum_* fields have already been set up from the "starts
4729 // humongous" region and we visit the regions in address order.
4730 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4731 size_t* capacity_bytes,
4732 size_t* prev_live_bytes,
4733 size_t* next_live_bytes) {
4734 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4735 *used_bytes = get_hum_bytes(&_hum_used_bytes);
4736 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
4737 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4738 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4739 }
4741 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4742 const char* type = "";
4743 HeapWord* bottom = r->bottom();
4744 HeapWord* end = r->end();
4745 size_t capacity_bytes = r->capacity();
4746 size_t used_bytes = r->used();
4747 size_t prev_live_bytes = r->live_bytes();
4748 size_t next_live_bytes = r->next_live_bytes();
4749 double gc_eff = r->gc_efficiency();
4750 size_t remset_bytes = r->rem_set()->mem_size();
4751 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4753 if (r->used() == 0) {
4754 type = "FREE";
4755 } else if (r->is_survivor()) {
4756 type = "SURV";
4757 } else if (r->is_young()) {
4758 type = "EDEN";
4759 } else if (r->startsHumongous()) {
4760 type = "HUMS";
4762 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4763 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4764 "they should have been zeroed after the last time we used them");
4765 // Set up the _hum_* fields.
4766 _hum_capacity_bytes = capacity_bytes;
4767 _hum_used_bytes = used_bytes;
4768 _hum_prev_live_bytes = prev_live_bytes;
4769 _hum_next_live_bytes = next_live_bytes;
4770 get_hum_bytes(&used_bytes, &capacity_bytes,
4771 &prev_live_bytes, &next_live_bytes);
4772 end = bottom + HeapRegion::GrainWords;
4773 } else if (r->continuesHumongous()) {
4774 type = "HUMC";
4775 get_hum_bytes(&used_bytes, &capacity_bytes,
4776 &prev_live_bytes, &next_live_bytes);
4777 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4778 } else {
4779 type = "OLD";
4780 }
4782 _total_used_bytes += used_bytes;
4783 _total_capacity_bytes += capacity_bytes;
4784 _total_prev_live_bytes += prev_live_bytes;
4785 _total_next_live_bytes += next_live_bytes;
4786 _total_remset_bytes += remset_bytes;
4787 _total_strong_code_roots_bytes += strong_code_roots_bytes;
4789 // Print a line for this particular region.
4790 _out->print_cr(G1PPRL_LINE_PREFIX
4791 G1PPRL_TYPE_FORMAT
4792 G1PPRL_ADDR_BASE_FORMAT
4793 G1PPRL_BYTE_FORMAT
4794 G1PPRL_BYTE_FORMAT
4795 G1PPRL_BYTE_FORMAT
4796 G1PPRL_DOUBLE_FORMAT
4797 G1PPRL_BYTE_FORMAT
4798 G1PPRL_BYTE_FORMAT,
4799 type, p2i(bottom), p2i(end),
4800 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4801 remset_bytes, strong_code_roots_bytes);
4803 return false;
4804 }
4806 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4807 // add static memory usages to remembered set sizes
4808 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4809 // Print the footer of the output.
4810 _out->print_cr(G1PPRL_LINE_PREFIX);
4811 _out->print_cr(G1PPRL_LINE_PREFIX
4812 " SUMMARY"
4813 G1PPRL_SUM_MB_FORMAT("capacity")
4814 G1PPRL_SUM_MB_PERC_FORMAT("used")
4815 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4816 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4817 G1PPRL_SUM_MB_FORMAT("remset")
4818 G1PPRL_SUM_MB_FORMAT("code-roots"),
4819 bytes_to_mb(_total_capacity_bytes),
4820 bytes_to_mb(_total_used_bytes),
4821 perc(_total_used_bytes, _total_capacity_bytes),
4822 bytes_to_mb(_total_prev_live_bytes),
4823 perc(_total_prev_live_bytes, _total_capacity_bytes),
4824 bytes_to_mb(_total_next_live_bytes),
4825 perc(_total_next_live_bytes, _total_capacity_bytes),
4826 bytes_to_mb(_total_remset_bytes),
4827 bytes_to_mb(_total_strong_code_roots_bytes));
4828 _out->cr();
4829 }