Thu, 03 Apr 2014 17:49:31 +0400
8016302: Change type of the number of GC workers to unsigned int (2)
Reviewed-by: tschatzl, jwilhelm
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1Log.hpp"
33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
34 #include "gc_implementation/g1/g1RemSet.hpp"
35 #include "gc_implementation/g1/heapRegion.inline.hpp"
36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
38 #include "gc_implementation/shared/vmGCOperations.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp"
42 #include "memory/genOopClosures.inline.hpp"
43 #include "memory/referencePolicy.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "services/memTracker.hpp"
50 // Concurrent marking bit map wrapper
52 CMBitMapRO::CMBitMapRO(int shifter) :
53 _bm(),
54 _shifter(shifter) {
55 _bmStartWord = 0;
56 _bmWordSize = 0;
57 }
59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
60 HeapWord* limit) const {
61 // First we must round addr *up* to a possible object boundary.
62 addr = (HeapWord*)align_size_up((intptr_t)addr,
63 HeapWordSize << _shifter);
64 size_t addrOffset = heapWordToOffset(addr);
65 if (limit == NULL) {
66 limit = _bmStartWord + _bmWordSize;
67 }
68 size_t limitOffset = heapWordToOffset(limit);
69 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
70 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
71 assert(nextAddr >= addr, "get_next_one postcondition");
72 assert(nextAddr == limit || isMarked(nextAddr),
73 "get_next_one postcondition");
74 return nextAddr;
75 }
77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
78 HeapWord* limit) const {
79 size_t addrOffset = heapWordToOffset(addr);
80 if (limit == NULL) {
81 limit = _bmStartWord + _bmWordSize;
82 }
83 size_t limitOffset = heapWordToOffset(limit);
84 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
85 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
86 assert(nextAddr >= addr, "get_next_one postcondition");
87 assert(nextAddr == limit || !isMarked(nextAddr),
88 "get_next_one postcondition");
89 return nextAddr;
90 }
92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
93 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
94 return (int) (diff >> _shifter);
95 }
97 #ifndef PRODUCT
98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
99 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
100 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
101 "size inconsistency");
102 return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
103 _bmWordSize == heap_rs.size()>>LogHeapWordSize;
104 }
105 #endif
107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
108 _bm.print_on_error(st, prefix);
109 }
111 bool CMBitMap::allocate(ReservedSpace heap_rs) {
112 _bmStartWord = (HeapWord*)(heap_rs.base());
113 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
114 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
115 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
116 if (!brs.is_reserved()) {
117 warning("ConcurrentMark marking bit map allocation failure");
118 return false;
119 }
120 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
121 // For now we'll just commit all of the bit map up front.
122 // Later on we'll try to be more parsimonious with swap.
123 if (!_virtual_space.initialize(brs, brs.size())) {
124 warning("ConcurrentMark marking bit map backing store failure");
125 return false;
126 }
127 assert(_virtual_space.committed_size() == brs.size(),
128 "didn't reserve backing store for all of concurrent marking bit map?");
129 _bm.set_map((uintptr_t*)_virtual_space.low());
130 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
131 _bmWordSize, "inconsistency in bit map sizing");
132 _bm.set_size(_bmWordSize >> _shifter);
133 return true;
134 }
136 void CMBitMap::clearAll() {
137 _bm.clear();
138 return;
139 }
141 void CMBitMap::markRange(MemRegion mr) {
142 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
143 assert(!mr.is_empty(), "unexpected empty region");
144 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
145 ((HeapWord *) mr.end())),
146 "markRange memory region end is not card aligned");
147 // convert address range into offset range
148 _bm.at_put_range(heapWordToOffset(mr.start()),
149 heapWordToOffset(mr.end()), true);
150 }
152 void CMBitMap::clearRange(MemRegion mr) {
153 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
154 assert(!mr.is_empty(), "unexpected empty region");
155 // convert address range into offset range
156 _bm.at_put_range(heapWordToOffset(mr.start()),
157 heapWordToOffset(mr.end()), false);
158 }
160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
161 HeapWord* end_addr) {
162 HeapWord* start = getNextMarkedWordAddress(addr);
163 start = MIN2(start, end_addr);
164 HeapWord* end = getNextUnmarkedWordAddress(start);
165 end = MIN2(end, end_addr);
166 assert(start <= end, "Consistency check");
167 MemRegion mr(start, end);
168 if (!mr.is_empty()) {
169 clearRange(mr);
170 }
171 return mr;
172 }
174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
175 _base(NULL), _cm(cm)
176 #ifdef ASSERT
177 , _drain_in_progress(false)
178 , _drain_in_progress_yields(false)
179 #endif
180 {}
182 bool CMMarkStack::allocate(size_t capacity) {
183 // allocate a stack of the requisite depth
184 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
185 if (!rs.is_reserved()) {
186 warning("ConcurrentMark MarkStack allocation failure");
187 return false;
188 }
189 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
190 if (!_virtual_space.initialize(rs, rs.size())) {
191 warning("ConcurrentMark MarkStack backing store failure");
192 // Release the virtual memory reserved for the marking stack
193 rs.release();
194 return false;
195 }
196 assert(_virtual_space.committed_size() == rs.size(),
197 "Didn't reserve backing store for all of ConcurrentMark stack?");
198 _base = (oop*) _virtual_space.low();
199 setEmpty();
200 _capacity = (jint) capacity;
201 _saved_index = -1;
202 _should_expand = false;
203 NOT_PRODUCT(_max_depth = 0);
204 return true;
205 }
207 void CMMarkStack::expand() {
208 // Called, during remark, if we've overflown the marking stack during marking.
209 assert(isEmpty(), "stack should been emptied while handling overflow");
210 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
211 // Clear expansion flag
212 _should_expand = false;
213 if (_capacity == (jint) MarkStackSizeMax) {
214 if (PrintGCDetails && Verbose) {
215 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
216 }
217 return;
218 }
219 // Double capacity if possible
220 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
221 // Do not give up existing stack until we have managed to
222 // get the double capacity that we desired.
223 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
224 sizeof(oop)));
225 if (rs.is_reserved()) {
226 // Release the backing store associated with old stack
227 _virtual_space.release();
228 // Reinitialize virtual space for new stack
229 if (!_virtual_space.initialize(rs, rs.size())) {
230 fatal("Not enough swap for expanded marking stack capacity");
231 }
232 _base = (oop*)(_virtual_space.low());
233 _index = 0;
234 _capacity = new_capacity;
235 } else {
236 if (PrintGCDetails && Verbose) {
237 // Failed to double capacity, continue;
238 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
239 SIZE_FORMAT"K to " SIZE_FORMAT"K",
240 _capacity / K, new_capacity / K);
241 }
242 }
243 }
245 void CMMarkStack::set_should_expand() {
246 // If we're resetting the marking state because of an
247 // marking stack overflow, record that we should, if
248 // possible, expand the stack.
249 _should_expand = _cm->has_overflown();
250 }
252 CMMarkStack::~CMMarkStack() {
253 if (_base != NULL) {
254 _base = NULL;
255 _virtual_space.release();
256 }
257 }
259 void CMMarkStack::par_push(oop ptr) {
260 while (true) {
261 if (isFull()) {
262 _overflow = true;
263 return;
264 }
265 // Otherwise...
266 jint index = _index;
267 jint next_index = index+1;
268 jint res = Atomic::cmpxchg(next_index, &_index, index);
269 if (res == index) {
270 _base[index] = ptr;
271 // Note that we don't maintain this atomically. We could, but it
272 // doesn't seem necessary.
273 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
274 return;
275 }
276 // Otherwise, we need to try again.
277 }
278 }
280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
281 while (true) {
282 if (isFull()) {
283 _overflow = true;
284 return;
285 }
286 // Otherwise...
287 jint index = _index;
288 jint next_index = index + n;
289 if (next_index > _capacity) {
290 _overflow = true;
291 return;
292 }
293 jint res = Atomic::cmpxchg(next_index, &_index, index);
294 if (res == index) {
295 for (int i = 0; i < n; i++) {
296 int ind = index + i;
297 assert(ind < _capacity, "By overflow test above.");
298 _base[ind] = ptr_arr[i];
299 }
300 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
301 return;
302 }
303 // Otherwise, we need to try again.
304 }
305 }
307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
308 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
309 jint start = _index;
310 jint next_index = start + n;
311 if (next_index > _capacity) {
312 _overflow = true;
313 return;
314 }
315 // Otherwise.
316 _index = next_index;
317 for (int i = 0; i < n; i++) {
318 int ind = start + i;
319 assert(ind < _capacity, "By overflow test above.");
320 _base[ind] = ptr_arr[i];
321 }
322 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
323 }
325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
326 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
327 jint index = _index;
328 if (index == 0) {
329 *n = 0;
330 return false;
331 } else {
332 int k = MIN2(max, index);
333 jint new_ind = index - k;
334 for (int j = 0; j < k; j++) {
335 ptr_arr[j] = _base[new_ind + j];
336 }
337 _index = new_ind;
338 *n = k;
339 return true;
340 }
341 }
343 template<class OopClosureClass>
344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
345 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
346 || SafepointSynchronize::is_at_safepoint(),
347 "Drain recursion must be yield-safe.");
348 bool res = true;
349 debug_only(_drain_in_progress = true);
350 debug_only(_drain_in_progress_yields = yield_after);
351 while (!isEmpty()) {
352 oop newOop = pop();
353 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
354 assert(newOop->is_oop(), "Expected an oop");
355 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
356 "only grey objects on this stack");
357 newOop->oop_iterate(cl);
358 if (yield_after && _cm->do_yield_check()) {
359 res = false;
360 break;
361 }
362 }
363 debug_only(_drain_in_progress = false);
364 return res;
365 }
367 void CMMarkStack::note_start_of_gc() {
368 assert(_saved_index == -1,
369 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
370 _saved_index = _index;
371 }
373 void CMMarkStack::note_end_of_gc() {
374 // This is intentionally a guarantee, instead of an assert. If we
375 // accidentally add something to the mark stack during GC, it
376 // will be a correctness issue so it's better if we crash. we'll
377 // only check this once per GC anyway, so it won't be a performance
378 // issue in any way.
379 guarantee(_saved_index == _index,
380 err_msg("saved index: %d index: %d", _saved_index, _index));
381 _saved_index = -1;
382 }
384 void CMMarkStack::oops_do(OopClosure* f) {
385 assert(_saved_index == _index,
386 err_msg("saved index: %d index: %d", _saved_index, _index));
387 for (int i = 0; i < _index; i += 1) {
388 f->do_oop(&_base[i]);
389 }
390 }
392 bool ConcurrentMark::not_yet_marked(oop obj) const {
393 return _g1h->is_obj_ill(obj);
394 }
396 CMRootRegions::CMRootRegions() :
397 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
398 _should_abort(false), _next_survivor(NULL) { }
400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
401 _young_list = g1h->young_list();
402 _cm = cm;
403 }
405 void CMRootRegions::prepare_for_scan() {
406 assert(!scan_in_progress(), "pre-condition");
408 // Currently, only survivors can be root regions.
409 assert(_next_survivor == NULL, "pre-condition");
410 _next_survivor = _young_list->first_survivor_region();
411 _scan_in_progress = (_next_survivor != NULL);
412 _should_abort = false;
413 }
415 HeapRegion* CMRootRegions::claim_next() {
416 if (_should_abort) {
417 // If someone has set the should_abort flag, we return NULL to
418 // force the caller to bail out of their loop.
419 return NULL;
420 }
422 // Currently, only survivors can be root regions.
423 HeapRegion* res = _next_survivor;
424 if (res != NULL) {
425 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
426 // Read it again in case it changed while we were waiting for the lock.
427 res = _next_survivor;
428 if (res != NULL) {
429 if (res == _young_list->last_survivor_region()) {
430 // We just claimed the last survivor so store NULL to indicate
431 // that we're done.
432 _next_survivor = NULL;
433 } else {
434 _next_survivor = res->get_next_young_region();
435 }
436 } else {
437 // Someone else claimed the last survivor while we were trying
438 // to take the lock so nothing else to do.
439 }
440 }
441 assert(res == NULL || res->is_survivor(), "post-condition");
443 return res;
444 }
446 void CMRootRegions::scan_finished() {
447 assert(scan_in_progress(), "pre-condition");
449 // Currently, only survivors can be root regions.
450 if (!_should_abort) {
451 assert(_next_survivor == NULL, "we should have claimed all survivors");
452 }
453 _next_survivor = NULL;
455 {
456 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
457 _scan_in_progress = false;
458 RootRegionScan_lock->notify_all();
459 }
460 }
462 bool CMRootRegions::wait_until_scan_finished() {
463 if (!scan_in_progress()) return false;
465 {
466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
467 while (scan_in_progress()) {
468 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
469 }
470 }
471 return true;
472 }
474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
476 #endif // _MSC_VER
478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
479 return MAX2((n_par_threads + 2) / 4, 1U);
480 }
482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
483 _g1h(g1h),
484 _markBitMap1(log2_intptr(MinObjAlignment)),
485 _markBitMap2(log2_intptr(MinObjAlignment)),
486 _parallel_marking_threads(0),
487 _max_parallel_marking_threads(0),
488 _sleep_factor(0.0),
489 _marking_task_overhead(1.0),
490 _cleanup_sleep_factor(0.0),
491 _cleanup_task_overhead(1.0),
492 _cleanup_list("Cleanup List"),
493 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
494 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
495 CardTableModRefBS::card_shift,
496 false /* in_resource_area*/),
498 _prevMarkBitMap(&_markBitMap1),
499 _nextMarkBitMap(&_markBitMap2),
501 _markStack(this),
502 // _finger set in set_non_marking_state
504 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
505 // _active_tasks set in set_non_marking_state
506 // _tasks set inside the constructor
507 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
508 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
510 _has_overflown(false),
511 _concurrent(false),
512 _has_aborted(false),
513 _restart_for_overflow(false),
514 _concurrent_marking_in_progress(false),
516 // _verbose_level set below
518 _init_times(),
519 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
520 _cleanup_times(),
521 _total_counting_time(0.0),
522 _total_rs_scrub_time(0.0),
524 _parallel_workers(NULL),
526 _count_card_bitmaps(NULL),
527 _count_marked_bytes(NULL),
528 _completed_initialization(false) {
529 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
530 if (verbose_level < no_verbose) {
531 verbose_level = no_verbose;
532 }
533 if (verbose_level > high_verbose) {
534 verbose_level = high_verbose;
535 }
536 _verbose_level = verbose_level;
538 if (verbose_low()) {
539 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
540 "heap end = "PTR_FORMAT, _heap_start, _heap_end);
541 }
543 if (!_markBitMap1.allocate(heap_rs)) {
544 warning("Failed to allocate first CM bit map");
545 return;
546 }
547 if (!_markBitMap2.allocate(heap_rs)) {
548 warning("Failed to allocate second CM bit map");
549 return;
550 }
552 // Create & start a ConcurrentMark thread.
553 _cmThread = new ConcurrentMarkThread(this);
554 assert(cmThread() != NULL, "CM Thread should have been created");
555 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
556 if (_cmThread->osthread() == NULL) {
557 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
558 }
560 assert(CGC_lock != NULL, "Where's the CGC_lock?");
561 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
562 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
564 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
565 satb_qs.set_buffer_size(G1SATBBufferSize);
567 _root_regions.init(_g1h, this);
569 if (ConcGCThreads > ParallelGCThreads) {
570 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
571 "than ParallelGCThreads (" UINT32_FORMAT ").",
572 ConcGCThreads, ParallelGCThreads);
573 return;
574 }
575 if (ParallelGCThreads == 0) {
576 // if we are not running with any parallel GC threads we will not
577 // spawn any marking threads either
578 _parallel_marking_threads = 0;
579 _max_parallel_marking_threads = 0;
580 _sleep_factor = 0.0;
581 _marking_task_overhead = 1.0;
582 } else {
583 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
584 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
585 // if both are set
586 _sleep_factor = 0.0;
587 _marking_task_overhead = 1.0;
588 } else if (G1MarkingOverheadPercent > 0) {
589 // We will calculate the number of parallel marking threads based
590 // on a target overhead with respect to the soft real-time goal
591 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
592 double overall_cm_overhead =
593 (double) MaxGCPauseMillis * marking_overhead /
594 (double) GCPauseIntervalMillis;
595 double cpu_ratio = 1.0 / (double) os::processor_count();
596 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
597 double marking_task_overhead =
598 overall_cm_overhead / marking_thread_num *
599 (double) os::processor_count();
600 double sleep_factor =
601 (1.0 - marking_task_overhead) / marking_task_overhead;
603 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
604 _sleep_factor = sleep_factor;
605 _marking_task_overhead = marking_task_overhead;
606 } else {
607 // Calculate the number of parallel marking threads by scaling
608 // the number of parallel GC threads.
609 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
610 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
611 _sleep_factor = 0.0;
612 _marking_task_overhead = 1.0;
613 }
615 assert(ConcGCThreads > 0, "Should have been set");
616 _parallel_marking_threads = (uint) ConcGCThreads;
617 _max_parallel_marking_threads = _parallel_marking_threads;
619 if (parallel_marking_threads() > 1) {
620 _cleanup_task_overhead = 1.0;
621 } else {
622 _cleanup_task_overhead = marking_task_overhead();
623 }
624 _cleanup_sleep_factor =
625 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
627 #if 0
628 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
629 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
630 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
631 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
632 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
633 #endif
635 guarantee(parallel_marking_threads() > 0, "peace of mind");
636 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
637 _max_parallel_marking_threads, false, true);
638 if (_parallel_workers == NULL) {
639 vm_exit_during_initialization("Failed necessary allocation.");
640 } else {
641 _parallel_workers->initialize_workers();
642 }
643 }
645 if (FLAG_IS_DEFAULT(MarkStackSize)) {
646 uintx mark_stack_size =
647 MIN2(MarkStackSizeMax,
648 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
649 // Verify that the calculated value for MarkStackSize is in range.
650 // It would be nice to use the private utility routine from Arguments.
651 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
652 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
653 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
654 mark_stack_size, 1, MarkStackSizeMax);
655 return;
656 }
657 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
658 } else {
659 // Verify MarkStackSize is in range.
660 if (FLAG_IS_CMDLINE(MarkStackSize)) {
661 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
662 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
663 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
664 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
665 MarkStackSize, 1, MarkStackSizeMax);
666 return;
667 }
668 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
669 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
670 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
671 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
672 MarkStackSize, MarkStackSizeMax);
673 return;
674 }
675 }
676 }
677 }
679 if (!_markStack.allocate(MarkStackSize)) {
680 warning("Failed to allocate CM marking stack");
681 return;
682 }
684 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
685 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
687 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
688 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
690 BitMap::idx_t card_bm_size = _card_bm.size();
692 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
693 _active_tasks = _max_worker_id;
695 size_t max_regions = (size_t) _g1h->max_regions();
696 for (uint i = 0; i < _max_worker_id; ++i) {
697 CMTaskQueue* task_queue = new CMTaskQueue();
698 task_queue->initialize();
699 _task_queues->register_queue(i, task_queue);
701 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
702 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
704 _tasks[i] = new CMTask(i, this,
705 _count_marked_bytes[i],
706 &_count_card_bitmaps[i],
707 task_queue, _task_queues);
709 _accum_task_vtime[i] = 0.0;
710 }
712 // Calculate the card number for the bottom of the heap. Used
713 // in biasing indexes into the accounting card bitmaps.
714 _heap_bottom_card_num =
715 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
716 CardTableModRefBS::card_shift);
718 // Clear all the liveness counting data
719 clear_all_count_data();
721 // so that the call below can read a sensible value
722 _heap_start = (HeapWord*) heap_rs.base();
723 set_non_marking_state();
724 _completed_initialization = true;
725 }
727 void ConcurrentMark::update_g1_committed(bool force) {
728 // If concurrent marking is not in progress, then we do not need to
729 // update _heap_end.
730 if (!concurrent_marking_in_progress() && !force) return;
732 MemRegion committed = _g1h->g1_committed();
733 assert(committed.start() == _heap_start, "start shouldn't change");
734 HeapWord* new_end = committed.end();
735 if (new_end > _heap_end) {
736 // The heap has been expanded.
738 _heap_end = new_end;
739 }
740 // Notice that the heap can also shrink. However, this only happens
741 // during a Full GC (at least currently) and the entire marking
742 // phase will bail out and the task will not be restarted. So, let's
743 // do nothing.
744 }
746 void ConcurrentMark::reset() {
747 // Starting values for these two. This should be called in a STW
748 // phase. CM will be notified of any future g1_committed expansions
749 // will be at the end of evacuation pauses, when tasks are
750 // inactive.
751 MemRegion committed = _g1h->g1_committed();
752 _heap_start = committed.start();
753 _heap_end = committed.end();
755 // Separated the asserts so that we know which one fires.
756 assert(_heap_start != NULL, "heap bounds should look ok");
757 assert(_heap_end != NULL, "heap bounds should look ok");
758 assert(_heap_start < _heap_end, "heap bounds should look ok");
760 // Reset all the marking data structures and any necessary flags
761 reset_marking_state();
763 if (verbose_low()) {
764 gclog_or_tty->print_cr("[global] resetting");
765 }
767 // We do reset all of them, since different phases will use
768 // different number of active threads. So, it's easiest to have all
769 // of them ready.
770 for (uint i = 0; i < _max_worker_id; ++i) {
771 _tasks[i]->reset(_nextMarkBitMap);
772 }
774 // we need this to make sure that the flag is on during the evac
775 // pause with initial mark piggy-backed
776 set_concurrent_marking_in_progress();
777 }
780 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
781 _markStack.set_should_expand();
782 _markStack.setEmpty(); // Also clears the _markStack overflow flag
783 if (clear_overflow) {
784 clear_has_overflown();
785 } else {
786 assert(has_overflown(), "pre-condition");
787 }
788 _finger = _heap_start;
790 for (uint i = 0; i < _max_worker_id; ++i) {
791 CMTaskQueue* queue = _task_queues->queue(i);
792 queue->set_empty();
793 }
794 }
796 void ConcurrentMark::set_concurrency(uint active_tasks) {
797 assert(active_tasks <= _max_worker_id, "we should not have more");
799 _active_tasks = active_tasks;
800 // Need to update the three data structures below according to the
801 // number of active threads for this phase.
802 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
803 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
804 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
805 }
807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
808 set_concurrency(active_tasks);
810 _concurrent = concurrent;
811 // We propagate this to all tasks, not just the active ones.
812 for (uint i = 0; i < _max_worker_id; ++i)
813 _tasks[i]->set_concurrent(concurrent);
815 if (concurrent) {
816 set_concurrent_marking_in_progress();
817 } else {
818 // We currently assume that the concurrent flag has been set to
819 // false before we start remark. At this point we should also be
820 // in a STW phase.
821 assert(!concurrent_marking_in_progress(), "invariant");
822 assert(_finger == _heap_end,
823 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
824 _finger, _heap_end));
825 update_g1_committed(true);
826 }
827 }
829 void ConcurrentMark::set_non_marking_state() {
830 // We set the global marking state to some default values when we're
831 // not doing marking.
832 reset_marking_state();
833 _active_tasks = 0;
834 clear_concurrent_marking_in_progress();
835 }
837 ConcurrentMark::~ConcurrentMark() {
838 // The ConcurrentMark instance is never freed.
839 ShouldNotReachHere();
840 }
842 void ConcurrentMark::clearNextBitmap() {
843 G1CollectedHeap* g1h = G1CollectedHeap::heap();
844 G1CollectorPolicy* g1p = g1h->g1_policy();
846 // Make sure that the concurrent mark thread looks to still be in
847 // the current cycle.
848 guarantee(cmThread()->during_cycle(), "invariant");
850 // We are finishing up the current cycle by clearing the next
851 // marking bitmap and getting it ready for the next cycle. During
852 // this time no other cycle can start. So, let's make sure that this
853 // is the case.
854 guarantee(!g1h->mark_in_progress(), "invariant");
856 // clear the mark bitmap (no grey objects to start with).
857 // We need to do this in chunks and offer to yield in between
858 // each chunk.
859 HeapWord* start = _nextMarkBitMap->startWord();
860 HeapWord* end = _nextMarkBitMap->endWord();
861 HeapWord* cur = start;
862 size_t chunkSize = M;
863 while (cur < end) {
864 HeapWord* next = cur + chunkSize;
865 if (next > end) {
866 next = end;
867 }
868 MemRegion mr(cur,next);
869 _nextMarkBitMap->clearRange(mr);
870 cur = next;
871 do_yield_check();
873 // Repeat the asserts from above. We'll do them as asserts here to
874 // minimize their overhead on the product. However, we'll have
875 // them as guarantees at the beginning / end of the bitmap
876 // clearing to get some checking in the product.
877 assert(cmThread()->during_cycle(), "invariant");
878 assert(!g1h->mark_in_progress(), "invariant");
879 }
881 // Clear the liveness counting data
882 clear_all_count_data();
884 // Repeat the asserts from above.
885 guarantee(cmThread()->during_cycle(), "invariant");
886 guarantee(!g1h->mark_in_progress(), "invariant");
887 }
889 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
890 public:
891 bool doHeapRegion(HeapRegion* r) {
892 if (!r->continuesHumongous()) {
893 r->note_start_of_marking();
894 }
895 return false;
896 }
897 };
899 void ConcurrentMark::checkpointRootsInitialPre() {
900 G1CollectedHeap* g1h = G1CollectedHeap::heap();
901 G1CollectorPolicy* g1p = g1h->g1_policy();
903 _has_aborted = false;
905 #ifndef PRODUCT
906 if (G1PrintReachableAtInitialMark) {
907 print_reachable("at-cycle-start",
908 VerifyOption_G1UsePrevMarking, true /* all */);
909 }
910 #endif
912 // Initialise marking structures. This has to be done in a STW phase.
913 reset();
915 // For each region note start of marking.
916 NoteStartOfMarkHRClosure startcl;
917 g1h->heap_region_iterate(&startcl);
918 }
921 void ConcurrentMark::checkpointRootsInitialPost() {
922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
924 // If we force an overflow during remark, the remark operation will
925 // actually abort and we'll restart concurrent marking. If we always
926 // force an oveflow during remark we'll never actually complete the
927 // marking phase. So, we initilize this here, at the start of the
928 // cycle, so that at the remaining overflow number will decrease at
929 // every remark and we'll eventually not need to cause one.
930 force_overflow_stw()->init();
932 // Start Concurrent Marking weak-reference discovery.
933 ReferenceProcessor* rp = g1h->ref_processor_cm();
934 // enable ("weak") refs discovery
935 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
936 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
938 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
939 // This is the start of the marking cycle, we're expected all
940 // threads to have SATB queues with active set to false.
941 satb_mq_set.set_active_all_threads(true, /* new active value */
942 false /* expected_active */);
944 _root_regions.prepare_for_scan();
946 // update_g1_committed() will be called at the end of an evac pause
947 // when marking is on. So, it's also called at the end of the
948 // initial-mark pause to update the heap end, if the heap expands
949 // during it. No need to call it here.
950 }
952 /*
953 * Notice that in the next two methods, we actually leave the STS
954 * during the barrier sync and join it immediately afterwards. If we
955 * do not do this, the following deadlock can occur: one thread could
956 * be in the barrier sync code, waiting for the other thread to also
957 * sync up, whereas another one could be trying to yield, while also
958 * waiting for the other threads to sync up too.
959 *
960 * Note, however, that this code is also used during remark and in
961 * this case we should not attempt to leave / enter the STS, otherwise
962 * we'll either hit an asseert (debug / fastdebug) or deadlock
963 * (product). So we should only leave / enter the STS if we are
964 * operating concurrently.
965 *
966 * Because the thread that does the sync barrier has left the STS, it
967 * is possible to be suspended for a Full GC or an evacuation pause
968 * could occur. This is actually safe, since the entering the sync
969 * barrier is one of the last things do_marking_step() does, and it
970 * doesn't manipulate any data structures afterwards.
971 */
973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
974 if (verbose_low()) {
975 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
976 }
978 if (concurrent()) {
979 ConcurrentGCThread::stsLeave();
980 }
981 _first_overflow_barrier_sync.enter();
982 if (concurrent()) {
983 ConcurrentGCThread::stsJoin();
984 }
985 // at this point everyone should have synced up and not be doing any
986 // more work
988 if (verbose_low()) {
989 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
990 }
992 // If we're executing the concurrent phase of marking, reset the marking
993 // state; otherwise the marking state is reset after reference processing,
994 // during the remark pause.
995 // If we reset here as a result of an overflow during the remark we will
996 // see assertion failures from any subsequent set_concurrency_and_phase()
997 // calls.
998 if (concurrent()) {
999 // let the task associated with with worker 0 do this
1000 if (worker_id == 0) {
1001 // task 0 is responsible for clearing the global data structures
1002 // We should be here because of an overflow. During STW we should
1003 // not clear the overflow flag since we rely on it being true when
1004 // we exit this method to abort the pause and restart concurent
1005 // marking.
1006 reset_marking_state(true /* clear_overflow */);
1007 force_overflow()->update();
1009 if (G1Log::fine()) {
1010 gclog_or_tty->date_stamp(PrintGCDateStamps);
1011 gclog_or_tty->stamp(PrintGCTimeStamps);
1012 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1013 }
1014 }
1015 }
1017 // after this, each task should reset its own data structures then
1018 // then go into the second barrier
1019 }
1021 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1022 if (verbose_low()) {
1023 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1024 }
1026 if (concurrent()) {
1027 ConcurrentGCThread::stsLeave();
1028 }
1029 _second_overflow_barrier_sync.enter();
1030 if (concurrent()) {
1031 ConcurrentGCThread::stsJoin();
1032 }
1033 // at this point everything should be re-initialized and ready to go
1035 if (verbose_low()) {
1036 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1037 }
1038 }
1040 #ifndef PRODUCT
1041 void ForceOverflowSettings::init() {
1042 _num_remaining = G1ConcMarkForceOverflow;
1043 _force = false;
1044 update();
1045 }
1047 void ForceOverflowSettings::update() {
1048 if (_num_remaining > 0) {
1049 _num_remaining -= 1;
1050 _force = true;
1051 } else {
1052 _force = false;
1053 }
1054 }
1056 bool ForceOverflowSettings::should_force() {
1057 if (_force) {
1058 _force = false;
1059 return true;
1060 } else {
1061 return false;
1062 }
1063 }
1064 #endif // !PRODUCT
1066 class CMConcurrentMarkingTask: public AbstractGangTask {
1067 private:
1068 ConcurrentMark* _cm;
1069 ConcurrentMarkThread* _cmt;
1071 public:
1072 void work(uint worker_id) {
1073 assert(Thread::current()->is_ConcurrentGC_thread(),
1074 "this should only be done by a conc GC thread");
1075 ResourceMark rm;
1077 double start_vtime = os::elapsedVTime();
1079 ConcurrentGCThread::stsJoin();
1081 assert(worker_id < _cm->active_tasks(), "invariant");
1082 CMTask* the_task = _cm->task(worker_id);
1083 the_task->record_start_time();
1084 if (!_cm->has_aborted()) {
1085 do {
1086 double start_vtime_sec = os::elapsedVTime();
1087 double start_time_sec = os::elapsedTime();
1088 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1090 the_task->do_marking_step(mark_step_duration_ms,
1091 true /* do_termination */,
1092 false /* is_serial*/);
1094 double end_time_sec = os::elapsedTime();
1095 double end_vtime_sec = os::elapsedVTime();
1096 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1097 double elapsed_time_sec = end_time_sec - start_time_sec;
1098 _cm->clear_has_overflown();
1100 bool ret = _cm->do_yield_check(worker_id);
1102 jlong sleep_time_ms;
1103 if (!_cm->has_aborted() && the_task->has_aborted()) {
1104 sleep_time_ms =
1105 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1106 ConcurrentGCThread::stsLeave();
1107 os::sleep(Thread::current(), sleep_time_ms, false);
1108 ConcurrentGCThread::stsJoin();
1109 }
1110 double end_time2_sec = os::elapsedTime();
1111 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1113 #if 0
1114 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1115 "overhead %1.4lf",
1116 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1117 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1118 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1119 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1120 #endif
1121 } while (!_cm->has_aborted() && the_task->has_aborted());
1122 }
1123 the_task->record_end_time();
1124 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1126 ConcurrentGCThread::stsLeave();
1128 double end_vtime = os::elapsedVTime();
1129 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1130 }
1132 CMConcurrentMarkingTask(ConcurrentMark* cm,
1133 ConcurrentMarkThread* cmt) :
1134 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1136 ~CMConcurrentMarkingTask() { }
1137 };
1139 // Calculates the number of active workers for a concurrent
1140 // phase.
1141 uint ConcurrentMark::calc_parallel_marking_threads() {
1142 if (G1CollectedHeap::use_parallel_gc_threads()) {
1143 uint n_conc_workers = 0;
1144 if (!UseDynamicNumberOfGCThreads ||
1145 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1146 !ForceDynamicNumberOfGCThreads)) {
1147 n_conc_workers = max_parallel_marking_threads();
1148 } else {
1149 n_conc_workers =
1150 AdaptiveSizePolicy::calc_default_active_workers(
1151 max_parallel_marking_threads(),
1152 1, /* Minimum workers */
1153 parallel_marking_threads(),
1154 Threads::number_of_non_daemon_threads());
1155 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1156 // that scaling has already gone into "_max_parallel_marking_threads".
1157 }
1158 assert(n_conc_workers > 0, "Always need at least 1");
1159 return n_conc_workers;
1160 }
1161 // If we are not running with any parallel GC threads we will not
1162 // have spawned any marking threads either. Hence the number of
1163 // concurrent workers should be 0.
1164 return 0;
1165 }
1167 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1168 // Currently, only survivors can be root regions.
1169 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1170 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1172 const uintx interval = PrefetchScanIntervalInBytes;
1173 HeapWord* curr = hr->bottom();
1174 const HeapWord* end = hr->top();
1175 while (curr < end) {
1176 Prefetch::read(curr, interval);
1177 oop obj = oop(curr);
1178 int size = obj->oop_iterate(&cl);
1179 assert(size == obj->size(), "sanity");
1180 curr += size;
1181 }
1182 }
1184 class CMRootRegionScanTask : public AbstractGangTask {
1185 private:
1186 ConcurrentMark* _cm;
1188 public:
1189 CMRootRegionScanTask(ConcurrentMark* cm) :
1190 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1192 void work(uint worker_id) {
1193 assert(Thread::current()->is_ConcurrentGC_thread(),
1194 "this should only be done by a conc GC thread");
1196 CMRootRegions* root_regions = _cm->root_regions();
1197 HeapRegion* hr = root_regions->claim_next();
1198 while (hr != NULL) {
1199 _cm->scanRootRegion(hr, worker_id);
1200 hr = root_regions->claim_next();
1201 }
1202 }
1203 };
1205 void ConcurrentMark::scanRootRegions() {
1206 // scan_in_progress() will have been set to true only if there was
1207 // at least one root region to scan. So, if it's false, we
1208 // should not attempt to do any further work.
1209 if (root_regions()->scan_in_progress()) {
1210 _parallel_marking_threads = calc_parallel_marking_threads();
1211 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1212 "Maximum number of marking threads exceeded");
1213 uint active_workers = MAX2(1U, parallel_marking_threads());
1215 CMRootRegionScanTask task(this);
1216 if (use_parallel_marking_threads()) {
1217 _parallel_workers->set_active_workers((int) active_workers);
1218 _parallel_workers->run_task(&task);
1219 } else {
1220 task.work(0);
1221 }
1223 // It's possible that has_aborted() is true here without actually
1224 // aborting the survivor scan earlier. This is OK as it's
1225 // mainly used for sanity checking.
1226 root_regions()->scan_finished();
1227 }
1228 }
1230 void ConcurrentMark::markFromRoots() {
1231 // we might be tempted to assert that:
1232 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1233 // "inconsistent argument?");
1234 // However that wouldn't be right, because it's possible that
1235 // a safepoint is indeed in progress as a younger generation
1236 // stop-the-world GC happens even as we mark in this generation.
1238 _restart_for_overflow = false;
1239 force_overflow_conc()->init();
1241 // _g1h has _n_par_threads
1242 _parallel_marking_threads = calc_parallel_marking_threads();
1243 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1244 "Maximum number of marking threads exceeded");
1246 uint active_workers = MAX2(1U, parallel_marking_threads());
1248 // Parallel task terminator is set in "set_concurrency_and_phase()"
1249 set_concurrency_and_phase(active_workers, true /* concurrent */);
1251 CMConcurrentMarkingTask markingTask(this, cmThread());
1252 if (use_parallel_marking_threads()) {
1253 _parallel_workers->set_active_workers((int)active_workers);
1254 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1255 // and the decisions on that MT processing is made elsewhere.
1256 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1257 _parallel_workers->run_task(&markingTask);
1258 } else {
1259 markingTask.work(0);
1260 }
1261 print_stats();
1262 }
1264 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1265 // world is stopped at this checkpoint
1266 assert(SafepointSynchronize::is_at_safepoint(),
1267 "world should be stopped");
1269 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1271 // If a full collection has happened, we shouldn't do this.
1272 if (has_aborted()) {
1273 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1274 return;
1275 }
1277 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1279 if (VerifyDuringGC) {
1280 HandleMark hm; // handle scope
1281 Universe::heap()->prepare_for_verify();
1282 Universe::verify(VerifyOption_G1UsePrevMarking,
1283 " VerifyDuringGC:(before)");
1284 }
1286 G1CollectorPolicy* g1p = g1h->g1_policy();
1287 g1p->record_concurrent_mark_remark_start();
1289 double start = os::elapsedTime();
1291 checkpointRootsFinalWork();
1293 double mark_work_end = os::elapsedTime();
1295 weakRefsWork(clear_all_soft_refs);
1297 if (has_overflown()) {
1298 // Oops. We overflowed. Restart concurrent marking.
1299 _restart_for_overflow = true;
1300 if (G1TraceMarkStackOverflow) {
1301 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1302 }
1304 // Verify the heap w.r.t. the previous marking bitmap.
1305 if (VerifyDuringGC) {
1306 HandleMark hm; // handle scope
1307 Universe::heap()->prepare_for_verify();
1308 Universe::verify(VerifyOption_G1UsePrevMarking,
1309 " VerifyDuringGC:(overflow)");
1310 }
1312 // Clear the marking state because we will be restarting
1313 // marking due to overflowing the global mark stack.
1314 reset_marking_state();
1315 } else {
1316 // Aggregate the per-task counting data that we have accumulated
1317 // while marking.
1318 aggregate_count_data();
1320 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1321 // We're done with marking.
1322 // This is the end of the marking cycle, we're expected all
1323 // threads to have SATB queues with active set to true.
1324 satb_mq_set.set_active_all_threads(false, /* new active value */
1325 true /* expected_active */);
1327 if (VerifyDuringGC) {
1328 HandleMark hm; // handle scope
1329 Universe::heap()->prepare_for_verify();
1330 Universe::verify(VerifyOption_G1UseNextMarking,
1331 " VerifyDuringGC:(after)");
1332 }
1333 assert(!restart_for_overflow(), "sanity");
1334 // Completely reset the marking state since marking completed
1335 set_non_marking_state();
1336 }
1338 // Expand the marking stack, if we have to and if we can.
1339 if (_markStack.should_expand()) {
1340 _markStack.expand();
1341 }
1343 // Statistics
1344 double now = os::elapsedTime();
1345 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1346 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1347 _remark_times.add((now - start) * 1000.0);
1349 g1p->record_concurrent_mark_remark_end();
1351 G1CMIsAliveClosure is_alive(g1h);
1352 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1353 }
1355 // Base class of the closures that finalize and verify the
1356 // liveness counting data.
1357 class CMCountDataClosureBase: public HeapRegionClosure {
1358 protected:
1359 G1CollectedHeap* _g1h;
1360 ConcurrentMark* _cm;
1361 CardTableModRefBS* _ct_bs;
1363 BitMap* _region_bm;
1364 BitMap* _card_bm;
1366 // Takes a region that's not empty (i.e., it has at least one
1367 // live object in it and sets its corresponding bit on the region
1368 // bitmap to 1. If the region is "starts humongous" it will also set
1369 // to 1 the bits on the region bitmap that correspond to its
1370 // associated "continues humongous" regions.
1371 void set_bit_for_region(HeapRegion* hr) {
1372 assert(!hr->continuesHumongous(), "should have filtered those out");
1374 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1375 if (!hr->startsHumongous()) {
1376 // Normal (non-humongous) case: just set the bit.
1377 _region_bm->par_at_put(index, true);
1378 } else {
1379 // Starts humongous case: calculate how many regions are part of
1380 // this humongous region and then set the bit range.
1381 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1382 _region_bm->par_at_put_range(index, end_index, true);
1383 }
1384 }
1386 public:
1387 CMCountDataClosureBase(G1CollectedHeap* g1h,
1388 BitMap* region_bm, BitMap* card_bm):
1389 _g1h(g1h), _cm(g1h->concurrent_mark()),
1390 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1391 _region_bm(region_bm), _card_bm(card_bm) { }
1392 };
1394 // Closure that calculates the # live objects per region. Used
1395 // for verification purposes during the cleanup pause.
1396 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1397 CMBitMapRO* _bm;
1398 size_t _region_marked_bytes;
1400 public:
1401 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1402 BitMap* region_bm, BitMap* card_bm) :
1403 CMCountDataClosureBase(g1h, region_bm, card_bm),
1404 _bm(bm), _region_marked_bytes(0) { }
1406 bool doHeapRegion(HeapRegion* hr) {
1408 if (hr->continuesHumongous()) {
1409 // We will ignore these here and process them when their
1410 // associated "starts humongous" region is processed (see
1411 // set_bit_for_heap_region()). Note that we cannot rely on their
1412 // associated "starts humongous" region to have their bit set to
1413 // 1 since, due to the region chunking in the parallel region
1414 // iteration, a "continues humongous" region might be visited
1415 // before its associated "starts humongous".
1416 return false;
1417 }
1419 HeapWord* ntams = hr->next_top_at_mark_start();
1420 HeapWord* start = hr->bottom();
1422 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1423 err_msg("Preconditions not met - "
1424 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1425 start, ntams, hr->end()));
1427 // Find the first marked object at or after "start".
1428 start = _bm->getNextMarkedWordAddress(start, ntams);
1430 size_t marked_bytes = 0;
1432 while (start < ntams) {
1433 oop obj = oop(start);
1434 int obj_sz = obj->size();
1435 HeapWord* obj_end = start + obj_sz;
1437 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1438 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1440 // Note: if we're looking at the last region in heap - obj_end
1441 // could be actually just beyond the end of the heap; end_idx
1442 // will then correspond to a (non-existent) card that is also
1443 // just beyond the heap.
1444 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1445 // end of object is not card aligned - increment to cover
1446 // all the cards spanned by the object
1447 end_idx += 1;
1448 }
1450 // Set the bits in the card BM for the cards spanned by this object.
1451 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1453 // Add the size of this object to the number of marked bytes.
1454 marked_bytes += (size_t)obj_sz * HeapWordSize;
1456 // Find the next marked object after this one.
1457 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1458 }
1460 // Mark the allocated-since-marking portion...
1461 HeapWord* top = hr->top();
1462 if (ntams < top) {
1463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1464 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1466 // Note: if we're looking at the last region in heap - top
1467 // could be actually just beyond the end of the heap; end_idx
1468 // will then correspond to a (non-existent) card that is also
1469 // just beyond the heap.
1470 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1471 // end of object is not card aligned - increment to cover
1472 // all the cards spanned by the object
1473 end_idx += 1;
1474 }
1475 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1477 // This definitely means the region has live objects.
1478 set_bit_for_region(hr);
1479 }
1481 // Update the live region bitmap.
1482 if (marked_bytes > 0) {
1483 set_bit_for_region(hr);
1484 }
1486 // Set the marked bytes for the current region so that
1487 // it can be queried by a calling verificiation routine
1488 _region_marked_bytes = marked_bytes;
1490 return false;
1491 }
1493 size_t region_marked_bytes() const { return _region_marked_bytes; }
1494 };
1496 // Heap region closure used for verifying the counting data
1497 // that was accumulated concurrently and aggregated during
1498 // the remark pause. This closure is applied to the heap
1499 // regions during the STW cleanup pause.
1501 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1502 G1CollectedHeap* _g1h;
1503 ConcurrentMark* _cm;
1504 CalcLiveObjectsClosure _calc_cl;
1505 BitMap* _region_bm; // Region BM to be verified
1506 BitMap* _card_bm; // Card BM to be verified
1507 bool _verbose; // verbose output?
1509 BitMap* _exp_region_bm; // Expected Region BM values
1510 BitMap* _exp_card_bm; // Expected card BM values
1512 int _failures;
1514 public:
1515 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1516 BitMap* region_bm,
1517 BitMap* card_bm,
1518 BitMap* exp_region_bm,
1519 BitMap* exp_card_bm,
1520 bool verbose) :
1521 _g1h(g1h), _cm(g1h->concurrent_mark()),
1522 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1523 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1524 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1525 _failures(0) { }
1527 int failures() const { return _failures; }
1529 bool doHeapRegion(HeapRegion* hr) {
1530 if (hr->continuesHumongous()) {
1531 // We will ignore these here and process them when their
1532 // associated "starts humongous" region is processed (see
1533 // set_bit_for_heap_region()). Note that we cannot rely on their
1534 // associated "starts humongous" region to have their bit set to
1535 // 1 since, due to the region chunking in the parallel region
1536 // iteration, a "continues humongous" region might be visited
1537 // before its associated "starts humongous".
1538 return false;
1539 }
1541 int failures = 0;
1543 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1544 // this region and set the corresponding bits in the expected region
1545 // and card bitmaps.
1546 bool res = _calc_cl.doHeapRegion(hr);
1547 assert(res == false, "should be continuing");
1549 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1550 Mutex::_no_safepoint_check_flag);
1552 // Verify the marked bytes for this region.
1553 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1554 size_t act_marked_bytes = hr->next_marked_bytes();
1556 // We're not OK if expected marked bytes > actual marked bytes. It means
1557 // we have missed accounting some objects during the actual marking.
1558 if (exp_marked_bytes > act_marked_bytes) {
1559 if (_verbose) {
1560 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1561 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1562 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1563 }
1564 failures += 1;
1565 }
1567 // Verify the bit, for this region, in the actual and expected
1568 // (which was just calculated) region bit maps.
1569 // We're not OK if the bit in the calculated expected region
1570 // bitmap is set and the bit in the actual region bitmap is not.
1571 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1573 bool expected = _exp_region_bm->at(index);
1574 bool actual = _region_bm->at(index);
1575 if (expected && !actual) {
1576 if (_verbose) {
1577 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1578 "expected: %s, actual: %s",
1579 hr->hrs_index(),
1580 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1581 }
1582 failures += 1;
1583 }
1585 // Verify that the card bit maps for the cards spanned by the current
1586 // region match. We have an error if we have a set bit in the expected
1587 // bit map and the corresponding bit in the actual bitmap is not set.
1589 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1590 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1592 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1593 expected = _exp_card_bm->at(i);
1594 actual = _card_bm->at(i);
1596 if (expected && !actual) {
1597 if (_verbose) {
1598 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1599 "expected: %s, actual: %s",
1600 hr->hrs_index(), i,
1601 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1602 }
1603 failures += 1;
1604 }
1605 }
1607 if (failures > 0 && _verbose) {
1608 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1609 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1610 HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1611 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1612 }
1614 _failures += failures;
1616 // We could stop iteration over the heap when we
1617 // find the first violating region by returning true.
1618 return false;
1619 }
1620 };
1622 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1623 protected:
1624 G1CollectedHeap* _g1h;
1625 ConcurrentMark* _cm;
1626 BitMap* _actual_region_bm;
1627 BitMap* _actual_card_bm;
1629 uint _n_workers;
1631 BitMap* _expected_region_bm;
1632 BitMap* _expected_card_bm;
1634 int _failures;
1635 bool _verbose;
1637 public:
1638 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1639 BitMap* region_bm, BitMap* card_bm,
1640 BitMap* expected_region_bm, BitMap* expected_card_bm)
1641 : AbstractGangTask("G1 verify final counting"),
1642 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1643 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1644 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1645 _failures(0), _verbose(false),
1646 _n_workers(0) {
1647 assert(VerifyDuringGC, "don't call this otherwise");
1649 // Use the value already set as the number of active threads
1650 // in the call to run_task().
1651 if (G1CollectedHeap::use_parallel_gc_threads()) {
1652 assert( _g1h->workers()->active_workers() > 0,
1653 "Should have been previously set");
1654 _n_workers = _g1h->workers()->active_workers();
1655 } else {
1656 _n_workers = 1;
1657 }
1659 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1660 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1662 _verbose = _cm->verbose_medium();
1663 }
1665 void work(uint worker_id) {
1666 assert(worker_id < _n_workers, "invariant");
1668 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1669 _actual_region_bm, _actual_card_bm,
1670 _expected_region_bm,
1671 _expected_card_bm,
1672 _verbose);
1674 if (G1CollectedHeap::use_parallel_gc_threads()) {
1675 _g1h->heap_region_par_iterate_chunked(&verify_cl,
1676 worker_id,
1677 _n_workers,
1678 HeapRegion::VerifyCountClaimValue);
1679 } else {
1680 _g1h->heap_region_iterate(&verify_cl);
1681 }
1683 Atomic::add(verify_cl.failures(), &_failures);
1684 }
1686 int failures() const { return _failures; }
1687 };
1689 // Closure that finalizes the liveness counting data.
1690 // Used during the cleanup pause.
1691 // Sets the bits corresponding to the interval [NTAMS, top]
1692 // (which contains the implicitly live objects) in the
1693 // card liveness bitmap. Also sets the bit for each region,
1694 // containing live data, in the region liveness bitmap.
1696 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1697 public:
1698 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1699 BitMap* region_bm,
1700 BitMap* card_bm) :
1701 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1703 bool doHeapRegion(HeapRegion* hr) {
1705 if (hr->continuesHumongous()) {
1706 // We will ignore these here and process them when their
1707 // associated "starts humongous" region is processed (see
1708 // set_bit_for_heap_region()). Note that we cannot rely on their
1709 // associated "starts humongous" region to have their bit set to
1710 // 1 since, due to the region chunking in the parallel region
1711 // iteration, a "continues humongous" region might be visited
1712 // before its associated "starts humongous".
1713 return false;
1714 }
1716 HeapWord* ntams = hr->next_top_at_mark_start();
1717 HeapWord* top = hr->top();
1719 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1721 // Mark the allocated-since-marking portion...
1722 if (ntams < top) {
1723 // This definitely means the region has live objects.
1724 set_bit_for_region(hr);
1726 // Now set the bits in the card bitmap for [ntams, top)
1727 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1728 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1730 // Note: if we're looking at the last region in heap - top
1731 // could be actually just beyond the end of the heap; end_idx
1732 // will then correspond to a (non-existent) card that is also
1733 // just beyond the heap.
1734 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1735 // end of object is not card aligned - increment to cover
1736 // all the cards spanned by the object
1737 end_idx += 1;
1738 }
1740 assert(end_idx <= _card_bm->size(),
1741 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1742 end_idx, _card_bm->size()));
1743 assert(start_idx < _card_bm->size(),
1744 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1745 start_idx, _card_bm->size()));
1747 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1748 }
1750 // Set the bit for the region if it contains live data
1751 if (hr->next_marked_bytes() > 0) {
1752 set_bit_for_region(hr);
1753 }
1755 return false;
1756 }
1757 };
1759 class G1ParFinalCountTask: public AbstractGangTask {
1760 protected:
1761 G1CollectedHeap* _g1h;
1762 ConcurrentMark* _cm;
1763 BitMap* _actual_region_bm;
1764 BitMap* _actual_card_bm;
1766 uint _n_workers;
1768 public:
1769 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1770 : AbstractGangTask("G1 final counting"),
1771 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1772 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1773 _n_workers(0) {
1774 // Use the value already set as the number of active threads
1775 // in the call to run_task().
1776 if (G1CollectedHeap::use_parallel_gc_threads()) {
1777 assert( _g1h->workers()->active_workers() > 0,
1778 "Should have been previously set");
1779 _n_workers = _g1h->workers()->active_workers();
1780 } else {
1781 _n_workers = 1;
1782 }
1783 }
1785 void work(uint worker_id) {
1786 assert(worker_id < _n_workers, "invariant");
1788 FinalCountDataUpdateClosure final_update_cl(_g1h,
1789 _actual_region_bm,
1790 _actual_card_bm);
1792 if (G1CollectedHeap::use_parallel_gc_threads()) {
1793 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1794 worker_id,
1795 _n_workers,
1796 HeapRegion::FinalCountClaimValue);
1797 } else {
1798 _g1h->heap_region_iterate(&final_update_cl);
1799 }
1800 }
1801 };
1803 class G1ParNoteEndTask;
1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1806 G1CollectedHeap* _g1;
1807 size_t _max_live_bytes;
1808 uint _regions_claimed;
1809 size_t _freed_bytes;
1810 FreeRegionList* _local_cleanup_list;
1811 HeapRegionSetCount _old_regions_removed;
1812 HeapRegionSetCount _humongous_regions_removed;
1813 HRRSCleanupTask* _hrrs_cleanup_task;
1814 double _claimed_region_time;
1815 double _max_region_time;
1817 public:
1818 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1819 FreeRegionList* local_cleanup_list,
1820 HRRSCleanupTask* hrrs_cleanup_task) :
1821 _g1(g1),
1822 _max_live_bytes(0), _regions_claimed(0),
1823 _freed_bytes(0),
1824 _claimed_region_time(0.0), _max_region_time(0.0),
1825 _local_cleanup_list(local_cleanup_list),
1826 _old_regions_removed(),
1827 _humongous_regions_removed(),
1828 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1830 size_t freed_bytes() { return _freed_bytes; }
1831 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1832 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1834 bool doHeapRegion(HeapRegion *hr) {
1835 if (hr->continuesHumongous()) {
1836 return false;
1837 }
1838 // We use a claim value of zero here because all regions
1839 // were claimed with value 1 in the FinalCount task.
1840 _g1->reset_gc_time_stamps(hr);
1841 double start = os::elapsedTime();
1842 _regions_claimed++;
1843 hr->note_end_of_marking();
1844 _max_live_bytes += hr->max_live_bytes();
1846 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1847 _freed_bytes += hr->used();
1848 hr->set_containing_set(NULL);
1849 if (hr->isHumongous()) {
1850 assert(hr->startsHumongous(), "we should only see starts humongous");
1851 _humongous_regions_removed.increment(1u, hr->capacity());
1852 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1853 } else {
1854 _old_regions_removed.increment(1u, hr->capacity());
1855 _g1->free_region(hr, _local_cleanup_list, true);
1856 }
1857 } else {
1858 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1859 }
1861 double region_time = (os::elapsedTime() - start);
1862 _claimed_region_time += region_time;
1863 if (region_time > _max_region_time) {
1864 _max_region_time = region_time;
1865 }
1866 return false;
1867 }
1869 size_t max_live_bytes() { return _max_live_bytes; }
1870 uint regions_claimed() { return _regions_claimed; }
1871 double claimed_region_time_sec() { return _claimed_region_time; }
1872 double max_region_time_sec() { return _max_region_time; }
1873 };
1875 class G1ParNoteEndTask: public AbstractGangTask {
1876 friend class G1NoteEndOfConcMarkClosure;
1878 protected:
1879 G1CollectedHeap* _g1h;
1880 size_t _max_live_bytes;
1881 size_t _freed_bytes;
1882 FreeRegionList* _cleanup_list;
1884 public:
1885 G1ParNoteEndTask(G1CollectedHeap* g1h,
1886 FreeRegionList* cleanup_list) :
1887 AbstractGangTask("G1 note end"), _g1h(g1h),
1888 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1890 void work(uint worker_id) {
1891 double start = os::elapsedTime();
1892 FreeRegionList local_cleanup_list("Local Cleanup List");
1893 HRRSCleanupTask hrrs_cleanup_task;
1894 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1895 &hrrs_cleanup_task);
1896 if (G1CollectedHeap::use_parallel_gc_threads()) {
1897 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1898 _g1h->workers()->active_workers(),
1899 HeapRegion::NoteEndClaimValue);
1900 } else {
1901 _g1h->heap_region_iterate(&g1_note_end);
1902 }
1903 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1905 // Now update the lists
1906 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1907 {
1908 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1909 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1910 _max_live_bytes += g1_note_end.max_live_bytes();
1911 _freed_bytes += g1_note_end.freed_bytes();
1913 // If we iterate over the global cleanup list at the end of
1914 // cleanup to do this printing we will not guarantee to only
1915 // generate output for the newly-reclaimed regions (the list
1916 // might not be empty at the beginning of cleanup; we might
1917 // still be working on its previous contents). So we do the
1918 // printing here, before we append the new regions to the global
1919 // cleanup list.
1921 G1HRPrinter* hr_printer = _g1h->hr_printer();
1922 if (hr_printer->is_active()) {
1923 FreeRegionListIterator iter(&local_cleanup_list);
1924 while (iter.more_available()) {
1925 HeapRegion* hr = iter.get_next();
1926 hr_printer->cleanup(hr);
1927 }
1928 }
1930 _cleanup_list->add_ordered(&local_cleanup_list);
1931 assert(local_cleanup_list.is_empty(), "post-condition");
1933 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1934 }
1935 }
1936 size_t max_live_bytes() { return _max_live_bytes; }
1937 size_t freed_bytes() { return _freed_bytes; }
1938 };
1940 class G1ParScrubRemSetTask: public AbstractGangTask {
1941 protected:
1942 G1RemSet* _g1rs;
1943 BitMap* _region_bm;
1944 BitMap* _card_bm;
1945 public:
1946 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1947 BitMap* region_bm, BitMap* card_bm) :
1948 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1949 _region_bm(region_bm), _card_bm(card_bm) { }
1951 void work(uint worker_id) {
1952 if (G1CollectedHeap::use_parallel_gc_threads()) {
1953 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1954 HeapRegion::ScrubRemSetClaimValue);
1955 } else {
1956 _g1rs->scrub(_region_bm, _card_bm);
1957 }
1958 }
1960 };
1962 void ConcurrentMark::cleanup() {
1963 // world is stopped at this checkpoint
1964 assert(SafepointSynchronize::is_at_safepoint(),
1965 "world should be stopped");
1966 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1968 // If a full collection has happened, we shouldn't do this.
1969 if (has_aborted()) {
1970 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1971 return;
1972 }
1974 g1h->verify_region_sets_optional();
1976 if (VerifyDuringGC) {
1977 HandleMark hm; // handle scope
1978 Universe::heap()->prepare_for_verify();
1979 Universe::verify(VerifyOption_G1UsePrevMarking,
1980 " VerifyDuringGC:(before)");
1981 }
1983 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1984 g1p->record_concurrent_mark_cleanup_start();
1986 double start = os::elapsedTime();
1988 HeapRegionRemSet::reset_for_cleanup_tasks();
1990 uint n_workers;
1992 // Do counting once more with the world stopped for good measure.
1993 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1995 if (G1CollectedHeap::use_parallel_gc_threads()) {
1996 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1997 "sanity check");
1999 g1h->set_par_threads();
2000 n_workers = g1h->n_par_threads();
2001 assert(g1h->n_par_threads() == n_workers,
2002 "Should not have been reset");
2003 g1h->workers()->run_task(&g1_par_count_task);
2004 // Done with the parallel phase so reset to 0.
2005 g1h->set_par_threads(0);
2007 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2008 "sanity check");
2009 } else {
2010 n_workers = 1;
2011 g1_par_count_task.work(0);
2012 }
2014 if (VerifyDuringGC) {
2015 // Verify that the counting data accumulated during marking matches
2016 // that calculated by walking the marking bitmap.
2018 // Bitmaps to hold expected values
2019 BitMap expected_region_bm(_region_bm.size(), false);
2020 BitMap expected_card_bm(_card_bm.size(), false);
2022 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2023 &_region_bm,
2024 &_card_bm,
2025 &expected_region_bm,
2026 &expected_card_bm);
2028 if (G1CollectedHeap::use_parallel_gc_threads()) {
2029 g1h->set_par_threads((int)n_workers);
2030 g1h->workers()->run_task(&g1_par_verify_task);
2031 // Done with the parallel phase so reset to 0.
2032 g1h->set_par_threads(0);
2034 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2035 "sanity check");
2036 } else {
2037 g1_par_verify_task.work(0);
2038 }
2040 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2041 }
2043 size_t start_used_bytes = g1h->used();
2044 g1h->set_marking_complete();
2046 double count_end = os::elapsedTime();
2047 double this_final_counting_time = (count_end - start);
2048 _total_counting_time += this_final_counting_time;
2050 if (G1PrintRegionLivenessInfo) {
2051 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2052 _g1h->heap_region_iterate(&cl);
2053 }
2055 // Install newly created mark bitMap as "prev".
2056 swapMarkBitMaps();
2058 g1h->reset_gc_time_stamp();
2060 // Note end of marking in all heap regions.
2061 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2062 if (G1CollectedHeap::use_parallel_gc_threads()) {
2063 g1h->set_par_threads((int)n_workers);
2064 g1h->workers()->run_task(&g1_par_note_end_task);
2065 g1h->set_par_threads(0);
2067 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2068 "sanity check");
2069 } else {
2070 g1_par_note_end_task.work(0);
2071 }
2072 g1h->check_gc_time_stamps();
2074 if (!cleanup_list_is_empty()) {
2075 // The cleanup list is not empty, so we'll have to process it
2076 // concurrently. Notify anyone else that might be wanting free
2077 // regions that there will be more free regions coming soon.
2078 g1h->set_free_regions_coming();
2079 }
2081 // call below, since it affects the metric by which we sort the heap
2082 // regions.
2083 if (G1ScrubRemSets) {
2084 double rs_scrub_start = os::elapsedTime();
2085 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2086 if (G1CollectedHeap::use_parallel_gc_threads()) {
2087 g1h->set_par_threads((int)n_workers);
2088 g1h->workers()->run_task(&g1_par_scrub_rs_task);
2089 g1h->set_par_threads(0);
2091 assert(g1h->check_heap_region_claim_values(
2092 HeapRegion::ScrubRemSetClaimValue),
2093 "sanity check");
2094 } else {
2095 g1_par_scrub_rs_task.work(0);
2096 }
2098 double rs_scrub_end = os::elapsedTime();
2099 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2100 _total_rs_scrub_time += this_rs_scrub_time;
2101 }
2103 // this will also free any regions totally full of garbage objects,
2104 // and sort the regions.
2105 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2107 // Statistics.
2108 double end = os::elapsedTime();
2109 _cleanup_times.add((end - start) * 1000.0);
2111 if (G1Log::fine()) {
2112 g1h->print_size_transition(gclog_or_tty,
2113 start_used_bytes,
2114 g1h->used(),
2115 g1h->capacity());
2116 }
2118 // Clean up will have freed any regions completely full of garbage.
2119 // Update the soft reference policy with the new heap occupancy.
2120 Universe::update_heap_info_at_gc();
2122 // We need to make this be a "collection" so any collection pause that
2123 // races with it goes around and waits for completeCleanup to finish.
2124 g1h->increment_total_collections();
2126 // We reclaimed old regions so we should calculate the sizes to make
2127 // sure we update the old gen/space data.
2128 g1h->g1mm()->update_sizes();
2130 if (VerifyDuringGC) {
2131 HandleMark hm; // handle scope
2132 Universe::heap()->prepare_for_verify();
2133 Universe::verify(VerifyOption_G1UsePrevMarking,
2134 " VerifyDuringGC:(after)");
2135 }
2137 g1h->verify_region_sets_optional();
2138 g1h->trace_heap_after_concurrent_cycle();
2139 }
2141 void ConcurrentMark::completeCleanup() {
2142 if (has_aborted()) return;
2144 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2146 _cleanup_list.verify_optional();
2147 FreeRegionList tmp_free_list("Tmp Free List");
2149 if (G1ConcRegionFreeingVerbose) {
2150 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2151 "cleanup list has %u entries",
2152 _cleanup_list.length());
2153 }
2155 // Noone else should be accessing the _cleanup_list at this point,
2156 // so it's not necessary to take any locks
2157 while (!_cleanup_list.is_empty()) {
2158 HeapRegion* hr = _cleanup_list.remove_head();
2159 assert(hr != NULL, "Got NULL from a non-empty list");
2160 hr->par_clear();
2161 tmp_free_list.add_ordered(hr);
2163 // Instead of adding one region at a time to the secondary_free_list,
2164 // we accumulate them in the local list and move them a few at a
2165 // time. This also cuts down on the number of notify_all() calls
2166 // we do during this process. We'll also append the local list when
2167 // _cleanup_list is empty (which means we just removed the last
2168 // region from the _cleanup_list).
2169 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2170 _cleanup_list.is_empty()) {
2171 if (G1ConcRegionFreeingVerbose) {
2172 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2173 "appending %u entries to the secondary_free_list, "
2174 "cleanup list still has %u entries",
2175 tmp_free_list.length(),
2176 _cleanup_list.length());
2177 }
2179 {
2180 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2181 g1h->secondary_free_list_add(&tmp_free_list);
2182 SecondaryFreeList_lock->notify_all();
2183 }
2185 if (G1StressConcRegionFreeing) {
2186 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2187 os::sleep(Thread::current(), (jlong) 1, false);
2188 }
2189 }
2190 }
2191 }
2192 assert(tmp_free_list.is_empty(), "post-condition");
2193 }
2195 // Supporting Object and Oop closures for reference discovery
2196 // and processing in during marking
2198 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2199 HeapWord* addr = (HeapWord*)obj;
2200 return addr != NULL &&
2201 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2202 }
2204 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2205 // Uses the CMTask associated with a worker thread (for serial reference
2206 // processing the CMTask for worker 0 is used) to preserve (mark) and
2207 // trace referent objects.
2208 //
2209 // Using the CMTask and embedded local queues avoids having the worker
2210 // threads operating on the global mark stack. This reduces the risk
2211 // of overflowing the stack - which we would rather avoid at this late
2212 // state. Also using the tasks' local queues removes the potential
2213 // of the workers interfering with each other that could occur if
2214 // operating on the global stack.
2216 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2217 ConcurrentMark* _cm;
2218 CMTask* _task;
2219 int _ref_counter_limit;
2220 int _ref_counter;
2221 bool _is_serial;
2222 public:
2223 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2224 _cm(cm), _task(task), _is_serial(is_serial),
2225 _ref_counter_limit(G1RefProcDrainInterval) {
2226 assert(_ref_counter_limit > 0, "sanity");
2227 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2228 _ref_counter = _ref_counter_limit;
2229 }
2231 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2232 virtual void do_oop( oop* p) { do_oop_work(p); }
2234 template <class T> void do_oop_work(T* p) {
2235 if (!_cm->has_overflown()) {
2236 oop obj = oopDesc::load_decode_heap_oop(p);
2237 if (_cm->verbose_high()) {
2238 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2239 "*"PTR_FORMAT" = "PTR_FORMAT,
2240 _task->worker_id(), p, (void*) obj);
2241 }
2243 _task->deal_with_reference(obj);
2244 _ref_counter--;
2246 if (_ref_counter == 0) {
2247 // We have dealt with _ref_counter_limit references, pushing them
2248 // and objects reachable from them on to the local stack (and
2249 // possibly the global stack). Call CMTask::do_marking_step() to
2250 // process these entries.
2251 //
2252 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2253 // there's nothing more to do (i.e. we're done with the entries that
2254 // were pushed as a result of the CMTask::deal_with_reference() calls
2255 // above) or we overflow.
2256 //
2257 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2258 // flag while there may still be some work to do. (See the comment at
2259 // the beginning of CMTask::do_marking_step() for those conditions -
2260 // one of which is reaching the specified time target.) It is only
2261 // when CMTask::do_marking_step() returns without setting the
2262 // has_aborted() flag that the marking step has completed.
2263 do {
2264 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2265 _task->do_marking_step(mark_step_duration_ms,
2266 false /* do_termination */,
2267 _is_serial);
2268 } while (_task->has_aborted() && !_cm->has_overflown());
2269 _ref_counter = _ref_counter_limit;
2270 }
2271 } else {
2272 if (_cm->verbose_high()) {
2273 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2274 }
2275 }
2276 }
2277 };
2279 // 'Drain' oop closure used by both serial and parallel reference processing.
2280 // Uses the CMTask associated with a given worker thread (for serial
2281 // reference processing the CMtask for worker 0 is used). Calls the
2282 // do_marking_step routine, with an unbelievably large timeout value,
2283 // to drain the marking data structures of the remaining entries
2284 // added by the 'keep alive' oop closure above.
2286 class G1CMDrainMarkingStackClosure: public VoidClosure {
2287 ConcurrentMark* _cm;
2288 CMTask* _task;
2289 bool _is_serial;
2290 public:
2291 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2292 _cm(cm), _task(task), _is_serial(is_serial) {
2293 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2294 }
2296 void do_void() {
2297 do {
2298 if (_cm->verbose_high()) {
2299 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2300 _task->worker_id(), BOOL_TO_STR(_is_serial));
2301 }
2303 // We call CMTask::do_marking_step() to completely drain the local
2304 // and global marking stacks of entries pushed by the 'keep alive'
2305 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2306 //
2307 // CMTask::do_marking_step() is called in a loop, which we'll exit
2308 // if there's nothing more to do (i.e. we'completely drained the
2309 // entries that were pushed as a a result of applying the 'keep alive'
2310 // closure to the entries on the discovered ref lists) or we overflow
2311 // the global marking stack.
2312 //
2313 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2314 // flag while there may still be some work to do. (See the comment at
2315 // the beginning of CMTask::do_marking_step() for those conditions -
2316 // one of which is reaching the specified time target.) It is only
2317 // when CMTask::do_marking_step() returns without setting the
2318 // has_aborted() flag that the marking step has completed.
2320 _task->do_marking_step(1000000000.0 /* something very large */,
2321 true /* do_termination */,
2322 _is_serial);
2323 } while (_task->has_aborted() && !_cm->has_overflown());
2324 }
2325 };
2327 // Implementation of AbstractRefProcTaskExecutor for parallel
2328 // reference processing at the end of G1 concurrent marking
2330 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2331 private:
2332 G1CollectedHeap* _g1h;
2333 ConcurrentMark* _cm;
2334 WorkGang* _workers;
2335 int _active_workers;
2337 public:
2338 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2339 ConcurrentMark* cm,
2340 WorkGang* workers,
2341 int n_workers) :
2342 _g1h(g1h), _cm(cm),
2343 _workers(workers), _active_workers(n_workers) { }
2345 // Executes the given task using concurrent marking worker threads.
2346 virtual void execute(ProcessTask& task);
2347 virtual void execute(EnqueueTask& task);
2348 };
2350 class G1CMRefProcTaskProxy: public AbstractGangTask {
2351 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2352 ProcessTask& _proc_task;
2353 G1CollectedHeap* _g1h;
2354 ConcurrentMark* _cm;
2356 public:
2357 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2358 G1CollectedHeap* g1h,
2359 ConcurrentMark* cm) :
2360 AbstractGangTask("Process reference objects in parallel"),
2361 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2362 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2363 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2364 }
2366 virtual void work(uint worker_id) {
2367 CMTask* task = _cm->task(worker_id);
2368 G1CMIsAliveClosure g1_is_alive(_g1h);
2369 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2370 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2372 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2373 }
2374 };
2376 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2377 assert(_workers != NULL, "Need parallel worker threads.");
2378 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2380 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2382 // We need to reset the concurrency level before each
2383 // proxy task execution, so that the termination protocol
2384 // and overflow handling in CMTask::do_marking_step() knows
2385 // how many workers to wait for.
2386 _cm->set_concurrency(_active_workers);
2387 _g1h->set_par_threads(_active_workers);
2388 _workers->run_task(&proc_task_proxy);
2389 _g1h->set_par_threads(0);
2390 }
2392 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2393 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2394 EnqueueTask& _enq_task;
2396 public:
2397 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2398 AbstractGangTask("Enqueue reference objects in parallel"),
2399 _enq_task(enq_task) { }
2401 virtual void work(uint worker_id) {
2402 _enq_task.work(worker_id);
2403 }
2404 };
2406 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2407 assert(_workers != NULL, "Need parallel worker threads.");
2408 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2410 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2412 // Not strictly necessary but...
2413 //
2414 // We need to reset the concurrency level before each
2415 // proxy task execution, so that the termination protocol
2416 // and overflow handling in CMTask::do_marking_step() knows
2417 // how many workers to wait for.
2418 _cm->set_concurrency(_active_workers);
2419 _g1h->set_par_threads(_active_workers);
2420 _workers->run_task(&enq_task_proxy);
2421 _g1h->set_par_threads(0);
2422 }
2424 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2425 if (has_overflown()) {
2426 // Skip processing the discovered references if we have
2427 // overflown the global marking stack. Reference objects
2428 // only get discovered once so it is OK to not
2429 // de-populate the discovered reference lists. We could have,
2430 // but the only benefit would be that, when marking restarts,
2431 // less reference objects are discovered.
2432 return;
2433 }
2435 ResourceMark rm;
2436 HandleMark hm;
2438 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2440 // Is alive closure.
2441 G1CMIsAliveClosure g1_is_alive(g1h);
2443 // Inner scope to exclude the cleaning of the string and symbol
2444 // tables from the displayed time.
2445 {
2446 if (G1Log::finer()) {
2447 gclog_or_tty->put(' ');
2448 }
2449 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
2451 ReferenceProcessor* rp = g1h->ref_processor_cm();
2453 // See the comment in G1CollectedHeap::ref_processing_init()
2454 // about how reference processing currently works in G1.
2456 // Set the soft reference policy
2457 rp->setup_policy(clear_all_soft_refs);
2458 assert(_markStack.isEmpty(), "mark stack should be empty");
2460 // Instances of the 'Keep Alive' and 'Complete GC' closures used
2461 // in serial reference processing. Note these closures are also
2462 // used for serially processing (by the the current thread) the
2463 // JNI references during parallel reference processing.
2464 //
2465 // These closures do not need to synchronize with the worker
2466 // threads involved in parallel reference processing as these
2467 // instances are executed serially by the current thread (e.g.
2468 // reference processing is not multi-threaded and is thus
2469 // performed by the current thread instead of a gang worker).
2470 //
2471 // The gang tasks involved in parallel reference procssing create
2472 // their own instances of these closures, which do their own
2473 // synchronization among themselves.
2474 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2475 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2477 // We need at least one active thread. If reference processing
2478 // is not multi-threaded we use the current (VMThread) thread,
2479 // otherwise we use the work gang from the G1CollectedHeap and
2480 // we utilize all the worker threads we can.
2481 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2482 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2483 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2485 // Parallel processing task executor.
2486 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2487 g1h->workers(), active_workers);
2488 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2490 // Set the concurrency level. The phase was already set prior to
2491 // executing the remark task.
2492 set_concurrency(active_workers);
2494 // Set the degree of MT processing here. If the discovery was done MT,
2495 // the number of threads involved during discovery could differ from
2496 // the number of active workers. This is OK as long as the discovered
2497 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2498 rp->set_active_mt_degree(active_workers);
2500 // Process the weak references.
2501 const ReferenceProcessorStats& stats =
2502 rp->process_discovered_references(&g1_is_alive,
2503 &g1_keep_alive,
2504 &g1_drain_mark_stack,
2505 executor,
2506 g1h->gc_timer_cm());
2507 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2509 // The do_oop work routines of the keep_alive and drain_marking_stack
2510 // oop closures will set the has_overflown flag if we overflow the
2511 // global marking stack.
2513 assert(_markStack.overflow() || _markStack.isEmpty(),
2514 "mark stack should be empty (unless it overflowed)");
2516 if (_markStack.overflow()) {
2517 // This should have been done already when we tried to push an
2518 // entry on to the global mark stack. But let's do it again.
2519 set_has_overflown();
2520 }
2522 assert(rp->num_q() == active_workers, "why not");
2524 rp->enqueue_discovered_references(executor);
2526 rp->verify_no_references_recorded();
2527 assert(!rp->discovery_enabled(), "Post condition");
2528 }
2530 if (has_overflown()) {
2531 // We can not trust g1_is_alive if the marking stack overflowed
2532 return;
2533 }
2535 g1h->unlink_string_and_symbol_table(&g1_is_alive,
2536 /* process_strings */ false, // currently strings are always roots
2537 /* process_symbols */ true);
2538 }
2540 void ConcurrentMark::swapMarkBitMaps() {
2541 CMBitMapRO* temp = _prevMarkBitMap;
2542 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2543 _nextMarkBitMap = (CMBitMap*) temp;
2544 }
2546 class CMRemarkTask: public AbstractGangTask {
2547 private:
2548 ConcurrentMark* _cm;
2549 bool _is_serial;
2550 public:
2551 void work(uint worker_id) {
2552 // Since all available tasks are actually started, we should
2553 // only proceed if we're supposed to be actived.
2554 if (worker_id < _cm->active_tasks()) {
2555 CMTask* task = _cm->task(worker_id);
2556 task->record_start_time();
2557 do {
2558 task->do_marking_step(1000000000.0 /* something very large */,
2559 true /* do_termination */,
2560 _is_serial);
2561 } while (task->has_aborted() && !_cm->has_overflown());
2562 // If we overflow, then we do not want to restart. We instead
2563 // want to abort remark and do concurrent marking again.
2564 task->record_end_time();
2565 }
2566 }
2568 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2569 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2570 _cm->terminator()->reset_for_reuse(active_workers);
2571 }
2572 };
2574 void ConcurrentMark::checkpointRootsFinalWork() {
2575 ResourceMark rm;
2576 HandleMark hm;
2577 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2579 g1h->ensure_parsability(false);
2581 if (G1CollectedHeap::use_parallel_gc_threads()) {
2582 G1CollectedHeap::StrongRootsScope srs(g1h);
2583 // this is remark, so we'll use up all active threads
2584 uint active_workers = g1h->workers()->active_workers();
2585 if (active_workers == 0) {
2586 assert(active_workers > 0, "Should have been set earlier");
2587 active_workers = (uint) ParallelGCThreads;
2588 g1h->workers()->set_active_workers(active_workers);
2589 }
2590 set_concurrency_and_phase(active_workers, false /* concurrent */);
2591 // Leave _parallel_marking_threads at it's
2592 // value originally calculated in the ConcurrentMark
2593 // constructor and pass values of the active workers
2594 // through the gang in the task.
2596 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2597 // We will start all available threads, even if we decide that the
2598 // active_workers will be fewer. The extra ones will just bail out
2599 // immediately.
2600 g1h->set_par_threads(active_workers);
2601 g1h->workers()->run_task(&remarkTask);
2602 g1h->set_par_threads(0);
2603 } else {
2604 G1CollectedHeap::StrongRootsScope srs(g1h);
2605 uint active_workers = 1;
2606 set_concurrency_and_phase(active_workers, false /* concurrent */);
2608 // Note - if there's no work gang then the VMThread will be
2609 // the thread to execute the remark - serially. We have
2610 // to pass true for the is_serial parameter so that
2611 // CMTask::do_marking_step() doesn't enter the sync
2612 // barriers in the event of an overflow. Doing so will
2613 // cause an assert that the current thread is not a
2614 // concurrent GC thread.
2615 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2616 remarkTask.work(0);
2617 }
2618 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2619 guarantee(has_overflown() ||
2620 satb_mq_set.completed_buffers_num() == 0,
2621 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2622 BOOL_TO_STR(has_overflown()),
2623 satb_mq_set.completed_buffers_num()));
2625 print_stats();
2626 }
2628 #ifndef PRODUCT
2630 class PrintReachableOopClosure: public OopClosure {
2631 private:
2632 G1CollectedHeap* _g1h;
2633 outputStream* _out;
2634 VerifyOption _vo;
2635 bool _all;
2637 public:
2638 PrintReachableOopClosure(outputStream* out,
2639 VerifyOption vo,
2640 bool all) :
2641 _g1h(G1CollectedHeap::heap()),
2642 _out(out), _vo(vo), _all(all) { }
2644 void do_oop(narrowOop* p) { do_oop_work(p); }
2645 void do_oop( oop* p) { do_oop_work(p); }
2647 template <class T> void do_oop_work(T* p) {
2648 oop obj = oopDesc::load_decode_heap_oop(p);
2649 const char* str = NULL;
2650 const char* str2 = "";
2652 if (obj == NULL) {
2653 str = "";
2654 } else if (!_g1h->is_in_g1_reserved(obj)) {
2655 str = " O";
2656 } else {
2657 HeapRegion* hr = _g1h->heap_region_containing(obj);
2658 guarantee(hr != NULL, "invariant");
2659 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2660 bool marked = _g1h->is_marked(obj, _vo);
2662 if (over_tams) {
2663 str = " >";
2664 if (marked) {
2665 str2 = " AND MARKED";
2666 }
2667 } else if (marked) {
2668 str = " M";
2669 } else {
2670 str = " NOT";
2671 }
2672 }
2674 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
2675 p, (void*) obj, str, str2);
2676 }
2677 };
2679 class PrintReachableObjectClosure : public ObjectClosure {
2680 private:
2681 G1CollectedHeap* _g1h;
2682 outputStream* _out;
2683 VerifyOption _vo;
2684 bool _all;
2685 HeapRegion* _hr;
2687 public:
2688 PrintReachableObjectClosure(outputStream* out,
2689 VerifyOption vo,
2690 bool all,
2691 HeapRegion* hr) :
2692 _g1h(G1CollectedHeap::heap()),
2693 _out(out), _vo(vo), _all(all), _hr(hr) { }
2695 void do_object(oop o) {
2696 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2697 bool marked = _g1h->is_marked(o, _vo);
2698 bool print_it = _all || over_tams || marked;
2700 if (print_it) {
2701 _out->print_cr(" "PTR_FORMAT"%s",
2702 (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
2703 PrintReachableOopClosure oopCl(_out, _vo, _all);
2704 o->oop_iterate_no_header(&oopCl);
2705 }
2706 }
2707 };
2709 class PrintReachableRegionClosure : public HeapRegionClosure {
2710 private:
2711 G1CollectedHeap* _g1h;
2712 outputStream* _out;
2713 VerifyOption _vo;
2714 bool _all;
2716 public:
2717 bool doHeapRegion(HeapRegion* hr) {
2718 HeapWord* b = hr->bottom();
2719 HeapWord* e = hr->end();
2720 HeapWord* t = hr->top();
2721 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2722 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2723 "TAMS: "PTR_FORMAT, b, e, t, p);
2724 _out->cr();
2726 HeapWord* from = b;
2727 HeapWord* to = t;
2729 if (to > from) {
2730 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2731 _out->cr();
2732 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2733 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2734 _out->cr();
2735 }
2737 return false;
2738 }
2740 PrintReachableRegionClosure(outputStream* out,
2741 VerifyOption vo,
2742 bool all) :
2743 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2744 };
2746 void ConcurrentMark::print_reachable(const char* str,
2747 VerifyOption vo,
2748 bool all) {
2749 gclog_or_tty->cr();
2750 gclog_or_tty->print_cr("== Doing heap dump... ");
2752 if (G1PrintReachableBaseFile == NULL) {
2753 gclog_or_tty->print_cr(" #### error: no base file defined");
2754 return;
2755 }
2757 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2758 (JVM_MAXPATHLEN - 1)) {
2759 gclog_or_tty->print_cr(" #### error: file name too long");
2760 return;
2761 }
2763 char file_name[JVM_MAXPATHLEN];
2764 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2765 gclog_or_tty->print_cr(" dumping to file %s", file_name);
2767 fileStream fout(file_name);
2768 if (!fout.is_open()) {
2769 gclog_or_tty->print_cr(" #### error: could not open file");
2770 return;
2771 }
2773 outputStream* out = &fout;
2774 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2775 out->cr();
2777 out->print_cr("--- ITERATING OVER REGIONS");
2778 out->cr();
2779 PrintReachableRegionClosure rcl(out, vo, all);
2780 _g1h->heap_region_iterate(&rcl);
2781 out->cr();
2783 gclog_or_tty->print_cr(" done");
2784 gclog_or_tty->flush();
2785 }
2787 #endif // PRODUCT
2789 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2790 // Note we are overriding the read-only view of the prev map here, via
2791 // the cast.
2792 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2793 }
2795 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2796 _nextMarkBitMap->clearRange(mr);
2797 }
2799 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2800 clearRangePrevBitmap(mr);
2801 clearRangeNextBitmap(mr);
2802 }
2804 HeapRegion*
2805 ConcurrentMark::claim_region(uint worker_id) {
2806 // "checkpoint" the finger
2807 HeapWord* finger = _finger;
2809 // _heap_end will not change underneath our feet; it only changes at
2810 // yield points.
2811 while (finger < _heap_end) {
2812 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2814 // Note on how this code handles humongous regions. In the
2815 // normal case the finger will reach the start of a "starts
2816 // humongous" (SH) region. Its end will either be the end of the
2817 // last "continues humongous" (CH) region in the sequence, or the
2818 // standard end of the SH region (if the SH is the only region in
2819 // the sequence). That way claim_region() will skip over the CH
2820 // regions. However, there is a subtle race between a CM thread
2821 // executing this method and a mutator thread doing a humongous
2822 // object allocation. The two are not mutually exclusive as the CM
2823 // thread does not need to hold the Heap_lock when it gets
2824 // here. So there is a chance that claim_region() will come across
2825 // a free region that's in the progress of becoming a SH or a CH
2826 // region. In the former case, it will either
2827 // a) Miss the update to the region's end, in which case it will
2828 // visit every subsequent CH region, will find their bitmaps
2829 // empty, and do nothing, or
2830 // b) Will observe the update of the region's end (in which case
2831 // it will skip the subsequent CH regions).
2832 // If it comes across a region that suddenly becomes CH, the
2833 // scenario will be similar to b). So, the race between
2834 // claim_region() and a humongous object allocation might force us
2835 // to do a bit of unnecessary work (due to some unnecessary bitmap
2836 // iterations) but it should not introduce and correctness issues.
2837 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2838 HeapWord* bottom = curr_region->bottom();
2839 HeapWord* end = curr_region->end();
2840 HeapWord* limit = curr_region->next_top_at_mark_start();
2842 if (verbose_low()) {
2843 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2844 "["PTR_FORMAT", "PTR_FORMAT"), "
2845 "limit = "PTR_FORMAT,
2846 worker_id, curr_region, bottom, end, limit);
2847 }
2849 // Is the gap between reading the finger and doing the CAS too long?
2850 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2851 if (res == finger) {
2852 // we succeeded
2854 // notice that _finger == end cannot be guaranteed here since,
2855 // someone else might have moved the finger even further
2856 assert(_finger >= end, "the finger should have moved forward");
2858 if (verbose_low()) {
2859 gclog_or_tty->print_cr("[%u] we were successful with region = "
2860 PTR_FORMAT, worker_id, curr_region);
2861 }
2863 if (limit > bottom) {
2864 if (verbose_low()) {
2865 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2866 "returning it ", worker_id, curr_region);
2867 }
2868 return curr_region;
2869 } else {
2870 assert(limit == bottom,
2871 "the region limit should be at bottom");
2872 if (verbose_low()) {
2873 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2874 "returning NULL", worker_id, curr_region);
2875 }
2876 // we return NULL and the caller should try calling
2877 // claim_region() again.
2878 return NULL;
2879 }
2880 } else {
2881 assert(_finger > finger, "the finger should have moved forward");
2882 if (verbose_low()) {
2883 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2884 "global finger = "PTR_FORMAT", "
2885 "our finger = "PTR_FORMAT,
2886 worker_id, _finger, finger);
2887 }
2889 // read it again
2890 finger = _finger;
2891 }
2892 }
2894 return NULL;
2895 }
2897 #ifndef PRODUCT
2898 enum VerifyNoCSetOopsPhase {
2899 VerifyNoCSetOopsStack,
2900 VerifyNoCSetOopsQueues,
2901 VerifyNoCSetOopsSATBCompleted,
2902 VerifyNoCSetOopsSATBThread
2903 };
2905 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
2906 private:
2907 G1CollectedHeap* _g1h;
2908 VerifyNoCSetOopsPhase _phase;
2909 int _info;
2911 const char* phase_str() {
2912 switch (_phase) {
2913 case VerifyNoCSetOopsStack: return "Stack";
2914 case VerifyNoCSetOopsQueues: return "Queue";
2915 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2916 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
2917 default: ShouldNotReachHere();
2918 }
2919 return NULL;
2920 }
2922 void do_object_work(oop obj) {
2923 guarantee(!_g1h->obj_in_cs(obj),
2924 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2925 (void*) obj, phase_str(), _info));
2926 }
2928 public:
2929 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2931 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2932 _phase = phase;
2933 _info = info;
2934 }
2936 virtual void do_oop(oop* p) {
2937 oop obj = oopDesc::load_decode_heap_oop(p);
2938 do_object_work(obj);
2939 }
2941 virtual void do_oop(narrowOop* p) {
2942 // We should not come across narrow oops while scanning marking
2943 // stacks and SATB buffers.
2944 ShouldNotReachHere();
2945 }
2947 virtual void do_object(oop obj) {
2948 do_object_work(obj);
2949 }
2950 };
2952 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2953 bool verify_enqueued_buffers,
2954 bool verify_thread_buffers,
2955 bool verify_fingers) {
2956 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2957 if (!G1CollectedHeap::heap()->mark_in_progress()) {
2958 return;
2959 }
2961 VerifyNoCSetOopsClosure cl;
2963 if (verify_stacks) {
2964 // Verify entries on the global mark stack
2965 cl.set_phase(VerifyNoCSetOopsStack);
2966 _markStack.oops_do(&cl);
2968 // Verify entries on the task queues
2969 for (uint i = 0; i < _max_worker_id; i += 1) {
2970 cl.set_phase(VerifyNoCSetOopsQueues, i);
2971 CMTaskQueue* queue = _task_queues->queue(i);
2972 queue->oops_do(&cl);
2973 }
2974 }
2976 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2978 // Verify entries on the enqueued SATB buffers
2979 if (verify_enqueued_buffers) {
2980 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2981 satb_qs.iterate_completed_buffers_read_only(&cl);
2982 }
2984 // Verify entries on the per-thread SATB buffers
2985 if (verify_thread_buffers) {
2986 cl.set_phase(VerifyNoCSetOopsSATBThread);
2987 satb_qs.iterate_thread_buffers_read_only(&cl);
2988 }
2990 if (verify_fingers) {
2991 // Verify the global finger
2992 HeapWord* global_finger = finger();
2993 if (global_finger != NULL && global_finger < _heap_end) {
2994 // The global finger always points to a heap region boundary. We
2995 // use heap_region_containing_raw() to get the containing region
2996 // given that the global finger could be pointing to a free region
2997 // which subsequently becomes continues humongous. If that
2998 // happens, heap_region_containing() will return the bottom of the
2999 // corresponding starts humongous region and the check below will
3000 // not hold any more.
3001 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3002 guarantee(global_finger == global_hr->bottom(),
3003 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3004 global_finger, HR_FORMAT_PARAMS(global_hr)));
3005 }
3007 // Verify the task fingers
3008 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3009 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3010 CMTask* task = _tasks[i];
3011 HeapWord* task_finger = task->finger();
3012 if (task_finger != NULL && task_finger < _heap_end) {
3013 // See above note on the global finger verification.
3014 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3015 guarantee(task_finger == task_hr->bottom() ||
3016 !task_hr->in_collection_set(),
3017 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3018 task_finger, HR_FORMAT_PARAMS(task_hr)));
3019 }
3020 }
3021 }
3022 }
3023 #endif // PRODUCT
3025 // Aggregate the counting data that was constructed concurrently
3026 // with marking.
3027 class AggregateCountDataHRClosure: public HeapRegionClosure {
3028 G1CollectedHeap* _g1h;
3029 ConcurrentMark* _cm;
3030 CardTableModRefBS* _ct_bs;
3031 BitMap* _cm_card_bm;
3032 uint _max_worker_id;
3034 public:
3035 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3036 BitMap* cm_card_bm,
3037 uint max_worker_id) :
3038 _g1h(g1h), _cm(g1h->concurrent_mark()),
3039 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3040 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3042 bool doHeapRegion(HeapRegion* hr) {
3043 if (hr->continuesHumongous()) {
3044 // We will ignore these here and process them when their
3045 // associated "starts humongous" region is processed.
3046 // Note that we cannot rely on their associated
3047 // "starts humongous" region to have their bit set to 1
3048 // since, due to the region chunking in the parallel region
3049 // iteration, a "continues humongous" region might be visited
3050 // before its associated "starts humongous".
3051 return false;
3052 }
3054 HeapWord* start = hr->bottom();
3055 HeapWord* limit = hr->next_top_at_mark_start();
3056 HeapWord* end = hr->end();
3058 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3059 err_msg("Preconditions not met - "
3060 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3061 "top: "PTR_FORMAT", end: "PTR_FORMAT,
3062 start, limit, hr->top(), hr->end()));
3064 assert(hr->next_marked_bytes() == 0, "Precondition");
3066 if (start == limit) {
3067 // NTAMS of this region has not been set so nothing to do.
3068 return false;
3069 }
3071 // 'start' should be in the heap.
3072 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3073 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3074 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3076 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3077 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3078 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3080 // If ntams is not card aligned then we bump card bitmap index
3081 // for limit so that we get the all the cards spanned by
3082 // the object ending at ntams.
3083 // Note: if this is the last region in the heap then ntams
3084 // could be actually just beyond the end of the the heap;
3085 // limit_idx will then correspond to a (non-existent) card
3086 // that is also outside the heap.
3087 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3088 limit_idx += 1;
3089 }
3091 assert(limit_idx <= end_idx, "or else use atomics");
3093 // Aggregate the "stripe" in the count data associated with hr.
3094 uint hrs_index = hr->hrs_index();
3095 size_t marked_bytes = 0;
3097 for (uint i = 0; i < _max_worker_id; i += 1) {
3098 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3099 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3101 // Fetch the marked_bytes in this region for task i and
3102 // add it to the running total for this region.
3103 marked_bytes += marked_bytes_array[hrs_index];
3105 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3106 // into the global card bitmap.
3107 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3109 while (scan_idx < limit_idx) {
3110 assert(task_card_bm->at(scan_idx) == true, "should be");
3111 _cm_card_bm->set_bit(scan_idx);
3112 assert(_cm_card_bm->at(scan_idx) == true, "should be");
3114 // BitMap::get_next_one_offset() can handle the case when
3115 // its left_offset parameter is greater than its right_offset
3116 // parameter. It does, however, have an early exit if
3117 // left_offset == right_offset. So let's limit the value
3118 // passed in for left offset here.
3119 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3120 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3121 }
3122 }
3124 // Update the marked bytes for this region.
3125 hr->add_to_marked_bytes(marked_bytes);
3127 // Next heap region
3128 return false;
3129 }
3130 };
3132 class G1AggregateCountDataTask: public AbstractGangTask {
3133 protected:
3134 G1CollectedHeap* _g1h;
3135 ConcurrentMark* _cm;
3136 BitMap* _cm_card_bm;
3137 uint _max_worker_id;
3138 int _active_workers;
3140 public:
3141 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3142 ConcurrentMark* cm,
3143 BitMap* cm_card_bm,
3144 uint max_worker_id,
3145 int n_workers) :
3146 AbstractGangTask("Count Aggregation"),
3147 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3148 _max_worker_id(max_worker_id),
3149 _active_workers(n_workers) { }
3151 void work(uint worker_id) {
3152 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3154 if (G1CollectedHeap::use_parallel_gc_threads()) {
3155 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3156 _active_workers,
3157 HeapRegion::AggregateCountClaimValue);
3158 } else {
3159 _g1h->heap_region_iterate(&cl);
3160 }
3161 }
3162 };
3165 void ConcurrentMark::aggregate_count_data() {
3166 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3167 _g1h->workers()->active_workers() :
3168 1);
3170 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3171 _max_worker_id, n_workers);
3173 if (G1CollectedHeap::use_parallel_gc_threads()) {
3174 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3175 "sanity check");
3176 _g1h->set_par_threads(n_workers);
3177 _g1h->workers()->run_task(&g1_par_agg_task);
3178 _g1h->set_par_threads(0);
3180 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3181 "sanity check");
3182 _g1h->reset_heap_region_claim_values();
3183 } else {
3184 g1_par_agg_task.work(0);
3185 }
3186 }
3188 // Clear the per-worker arrays used to store the per-region counting data
3189 void ConcurrentMark::clear_all_count_data() {
3190 // Clear the global card bitmap - it will be filled during
3191 // liveness count aggregation (during remark) and the
3192 // final counting task.
3193 _card_bm.clear();
3195 // Clear the global region bitmap - it will be filled as part
3196 // of the final counting task.
3197 _region_bm.clear();
3199 uint max_regions = _g1h->max_regions();
3200 assert(_max_worker_id > 0, "uninitialized");
3202 for (uint i = 0; i < _max_worker_id; i += 1) {
3203 BitMap* task_card_bm = count_card_bitmap_for(i);
3204 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3206 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3207 assert(marked_bytes_array != NULL, "uninitialized");
3209 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3210 task_card_bm->clear();
3211 }
3212 }
3214 void ConcurrentMark::print_stats() {
3215 if (verbose_stats()) {
3216 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3217 for (size_t i = 0; i < _active_tasks; ++i) {
3218 _tasks[i]->print_stats();
3219 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3220 }
3221 }
3222 }
3224 // abandon current marking iteration due to a Full GC
3225 void ConcurrentMark::abort() {
3226 // Clear all marks to force marking thread to do nothing
3227 _nextMarkBitMap->clearAll();
3228 // Clear the liveness counting data
3229 clear_all_count_data();
3230 // Empty mark stack
3231 reset_marking_state();
3232 for (uint i = 0; i < _max_worker_id; ++i) {
3233 _tasks[i]->clear_region_fields();
3234 }
3235 _has_aborted = true;
3237 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3238 satb_mq_set.abandon_partial_marking();
3239 // This can be called either during or outside marking, we'll read
3240 // the expected_active value from the SATB queue set.
3241 satb_mq_set.set_active_all_threads(
3242 false, /* new active value */
3243 satb_mq_set.is_active() /* expected_active */);
3245 _g1h->trace_heap_after_concurrent_cycle();
3246 _g1h->register_concurrent_cycle_end();
3247 }
3249 static void print_ms_time_info(const char* prefix, const char* name,
3250 NumberSeq& ns) {
3251 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3252 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3253 if (ns.num() > 0) {
3254 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
3255 prefix, ns.sd(), ns.maximum());
3256 }
3257 }
3259 void ConcurrentMark::print_summary_info() {
3260 gclog_or_tty->print_cr(" Concurrent marking:");
3261 print_ms_time_info(" ", "init marks", _init_times);
3262 print_ms_time_info(" ", "remarks", _remark_times);
3263 {
3264 print_ms_time_info(" ", "final marks", _remark_mark_times);
3265 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
3267 }
3268 print_ms_time_info(" ", "cleanups", _cleanup_times);
3269 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
3270 _total_counting_time,
3271 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3272 (double)_cleanup_times.num()
3273 : 0.0));
3274 if (G1ScrubRemSets) {
3275 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
3276 _total_rs_scrub_time,
3277 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3278 (double)_cleanup_times.num()
3279 : 0.0));
3280 }
3281 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
3282 (_init_times.sum() + _remark_times.sum() +
3283 _cleanup_times.sum())/1000.0);
3284 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
3285 "(%8.2f s marking).",
3286 cmThread()->vtime_accum(),
3287 cmThread()->vtime_mark_accum());
3288 }
3290 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3291 if (use_parallel_marking_threads()) {
3292 _parallel_workers->print_worker_threads_on(st);
3293 }
3294 }
3296 void ConcurrentMark::print_on_error(outputStream* st) const {
3297 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3298 _prevMarkBitMap, _nextMarkBitMap);
3299 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3300 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3301 }
3303 // We take a break if someone is trying to stop the world.
3304 bool ConcurrentMark::do_yield_check(uint worker_id) {
3305 if (should_yield()) {
3306 if (worker_id == 0) {
3307 _g1h->g1_policy()->record_concurrent_pause();
3308 }
3309 cmThread()->yield();
3310 return true;
3311 } else {
3312 return false;
3313 }
3314 }
3316 bool ConcurrentMark::should_yield() {
3317 return cmThread()->should_yield();
3318 }
3320 bool ConcurrentMark::containing_card_is_marked(void* p) {
3321 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3322 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3323 }
3325 bool ConcurrentMark::containing_cards_are_marked(void* start,
3326 void* last) {
3327 return containing_card_is_marked(start) &&
3328 containing_card_is_marked(last);
3329 }
3331 #ifndef PRODUCT
3332 // for debugging purposes
3333 void ConcurrentMark::print_finger() {
3334 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3335 _heap_start, _heap_end, _finger);
3336 for (uint i = 0; i < _max_worker_id; ++i) {
3337 gclog_or_tty->print(" %u: "PTR_FORMAT, i, _tasks[i]->finger());
3338 }
3339 gclog_or_tty->print_cr("");
3340 }
3341 #endif
3343 void CMTask::scan_object(oop obj) {
3344 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3346 if (_cm->verbose_high()) {
3347 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3348 _worker_id, (void*) obj);
3349 }
3351 size_t obj_size = obj->size();
3352 _words_scanned += obj_size;
3354 obj->oop_iterate(_cm_oop_closure);
3355 statsOnly( ++_objs_scanned );
3356 check_limits();
3357 }
3359 // Closure for iteration over bitmaps
3360 class CMBitMapClosure : public BitMapClosure {
3361 private:
3362 // the bitmap that is being iterated over
3363 CMBitMap* _nextMarkBitMap;
3364 ConcurrentMark* _cm;
3365 CMTask* _task;
3367 public:
3368 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3369 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3371 bool do_bit(size_t offset) {
3372 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3373 assert(_nextMarkBitMap->isMarked(addr), "invariant");
3374 assert( addr < _cm->finger(), "invariant");
3376 statsOnly( _task->increase_objs_found_on_bitmap() );
3377 assert(addr >= _task->finger(), "invariant");
3379 // We move that task's local finger along.
3380 _task->move_finger_to(addr);
3382 _task->scan_object(oop(addr));
3383 // we only partially drain the local queue and global stack
3384 _task->drain_local_queue(true);
3385 _task->drain_global_stack(true);
3387 // if the has_aborted flag has been raised, we need to bail out of
3388 // the iteration
3389 return !_task->has_aborted();
3390 }
3391 };
3393 // Closure for iterating over objects, currently only used for
3394 // processing SATB buffers.
3395 class CMObjectClosure : public ObjectClosure {
3396 private:
3397 CMTask* _task;
3399 public:
3400 void do_object(oop obj) {
3401 _task->deal_with_reference(obj);
3402 }
3404 CMObjectClosure(CMTask* task) : _task(task) { }
3405 };
3407 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3408 ConcurrentMark* cm,
3409 CMTask* task)
3410 : _g1h(g1h), _cm(cm), _task(task) {
3411 assert(_ref_processor == NULL, "should be initialized to NULL");
3413 if (G1UseConcMarkReferenceProcessing) {
3414 _ref_processor = g1h->ref_processor_cm();
3415 assert(_ref_processor != NULL, "should not be NULL");
3416 }
3417 }
3419 void CMTask::setup_for_region(HeapRegion* hr) {
3420 // Separated the asserts so that we know which one fires.
3421 assert(hr != NULL,
3422 "claim_region() should have filtered out continues humongous regions");
3423 assert(!hr->continuesHumongous(),
3424 "claim_region() should have filtered out continues humongous regions");
3426 if (_cm->verbose_low()) {
3427 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3428 _worker_id, hr);
3429 }
3431 _curr_region = hr;
3432 _finger = hr->bottom();
3433 update_region_limit();
3434 }
3436 void CMTask::update_region_limit() {
3437 HeapRegion* hr = _curr_region;
3438 HeapWord* bottom = hr->bottom();
3439 HeapWord* limit = hr->next_top_at_mark_start();
3441 if (limit == bottom) {
3442 if (_cm->verbose_low()) {
3443 gclog_or_tty->print_cr("[%u] found an empty region "
3444 "["PTR_FORMAT", "PTR_FORMAT")",
3445 _worker_id, bottom, limit);
3446 }
3447 // The region was collected underneath our feet.
3448 // We set the finger to bottom to ensure that the bitmap
3449 // iteration that will follow this will not do anything.
3450 // (this is not a condition that holds when we set the region up,
3451 // as the region is not supposed to be empty in the first place)
3452 _finger = bottom;
3453 } else if (limit >= _region_limit) {
3454 assert(limit >= _finger, "peace of mind");
3455 } else {
3456 assert(limit < _region_limit, "only way to get here");
3457 // This can happen under some pretty unusual circumstances. An
3458 // evacuation pause empties the region underneath our feet (NTAMS
3459 // at bottom). We then do some allocation in the region (NTAMS
3460 // stays at bottom), followed by the region being used as a GC
3461 // alloc region (NTAMS will move to top() and the objects
3462 // originally below it will be grayed). All objects now marked in
3463 // the region are explicitly grayed, if below the global finger,
3464 // and we do not need in fact to scan anything else. So, we simply
3465 // set _finger to be limit to ensure that the bitmap iteration
3466 // doesn't do anything.
3467 _finger = limit;
3468 }
3470 _region_limit = limit;
3471 }
3473 void CMTask::giveup_current_region() {
3474 assert(_curr_region != NULL, "invariant");
3475 if (_cm->verbose_low()) {
3476 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3477 _worker_id, _curr_region);
3478 }
3479 clear_region_fields();
3480 }
3482 void CMTask::clear_region_fields() {
3483 // Values for these three fields that indicate that we're not
3484 // holding on to a region.
3485 _curr_region = NULL;
3486 _finger = NULL;
3487 _region_limit = NULL;
3488 }
3490 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3491 if (cm_oop_closure == NULL) {
3492 assert(_cm_oop_closure != NULL, "invariant");
3493 } else {
3494 assert(_cm_oop_closure == NULL, "invariant");
3495 }
3496 _cm_oop_closure = cm_oop_closure;
3497 }
3499 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3500 guarantee(nextMarkBitMap != NULL, "invariant");
3502 if (_cm->verbose_low()) {
3503 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3504 }
3506 _nextMarkBitMap = nextMarkBitMap;
3507 clear_region_fields();
3509 _calls = 0;
3510 _elapsed_time_ms = 0.0;
3511 _termination_time_ms = 0.0;
3512 _termination_start_time_ms = 0.0;
3514 #if _MARKING_STATS_
3515 _local_pushes = 0;
3516 _local_pops = 0;
3517 _local_max_size = 0;
3518 _objs_scanned = 0;
3519 _global_pushes = 0;
3520 _global_pops = 0;
3521 _global_max_size = 0;
3522 _global_transfers_to = 0;
3523 _global_transfers_from = 0;
3524 _regions_claimed = 0;
3525 _objs_found_on_bitmap = 0;
3526 _satb_buffers_processed = 0;
3527 _steal_attempts = 0;
3528 _steals = 0;
3529 _aborted = 0;
3530 _aborted_overflow = 0;
3531 _aborted_cm_aborted = 0;
3532 _aborted_yield = 0;
3533 _aborted_timed_out = 0;
3534 _aborted_satb = 0;
3535 _aborted_termination = 0;
3536 #endif // _MARKING_STATS_
3537 }
3539 bool CMTask::should_exit_termination() {
3540 regular_clock_call();
3541 // This is called when we are in the termination protocol. We should
3542 // quit if, for some reason, this task wants to abort or the global
3543 // stack is not empty (this means that we can get work from it).
3544 return !_cm->mark_stack_empty() || has_aborted();
3545 }
3547 void CMTask::reached_limit() {
3548 assert(_words_scanned >= _words_scanned_limit ||
3549 _refs_reached >= _refs_reached_limit ,
3550 "shouldn't have been called otherwise");
3551 regular_clock_call();
3552 }
3554 void CMTask::regular_clock_call() {
3555 if (has_aborted()) return;
3557 // First, we need to recalculate the words scanned and refs reached
3558 // limits for the next clock call.
3559 recalculate_limits();
3561 // During the regular clock call we do the following
3563 // (1) If an overflow has been flagged, then we abort.
3564 if (_cm->has_overflown()) {
3565 set_has_aborted();
3566 return;
3567 }
3569 // If we are not concurrent (i.e. we're doing remark) we don't need
3570 // to check anything else. The other steps are only needed during
3571 // the concurrent marking phase.
3572 if (!concurrent()) return;
3574 // (2) If marking has been aborted for Full GC, then we also abort.
3575 if (_cm->has_aborted()) {
3576 set_has_aborted();
3577 statsOnly( ++_aborted_cm_aborted );
3578 return;
3579 }
3581 double curr_time_ms = os::elapsedVTime() * 1000.0;
3583 // (3) If marking stats are enabled, then we update the step history.
3584 #if _MARKING_STATS_
3585 if (_words_scanned >= _words_scanned_limit) {
3586 ++_clock_due_to_scanning;
3587 }
3588 if (_refs_reached >= _refs_reached_limit) {
3589 ++_clock_due_to_marking;
3590 }
3592 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3593 _interval_start_time_ms = curr_time_ms;
3594 _all_clock_intervals_ms.add(last_interval_ms);
3596 if (_cm->verbose_medium()) {
3597 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3598 "scanned = %d%s, refs reached = %d%s",
3599 _worker_id, last_interval_ms,
3600 _words_scanned,
3601 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3602 _refs_reached,
3603 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3604 }
3605 #endif // _MARKING_STATS_
3607 // (4) We check whether we should yield. If we have to, then we abort.
3608 if (_cm->should_yield()) {
3609 // We should yield. To do this we abort the task. The caller is
3610 // responsible for yielding.
3611 set_has_aborted();
3612 statsOnly( ++_aborted_yield );
3613 return;
3614 }
3616 // (5) We check whether we've reached our time quota. If we have,
3617 // then we abort.
3618 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3619 if (elapsed_time_ms > _time_target_ms) {
3620 set_has_aborted();
3621 _has_timed_out = true;
3622 statsOnly( ++_aborted_timed_out );
3623 return;
3624 }
3626 // (6) Finally, we check whether there are enough completed STAB
3627 // buffers available for processing. If there are, we abort.
3628 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3629 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3630 if (_cm->verbose_low()) {
3631 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3632 _worker_id);
3633 }
3634 // we do need to process SATB buffers, we'll abort and restart
3635 // the marking task to do so
3636 set_has_aborted();
3637 statsOnly( ++_aborted_satb );
3638 return;
3639 }
3640 }
3642 void CMTask::recalculate_limits() {
3643 _real_words_scanned_limit = _words_scanned + words_scanned_period;
3644 _words_scanned_limit = _real_words_scanned_limit;
3646 _real_refs_reached_limit = _refs_reached + refs_reached_period;
3647 _refs_reached_limit = _real_refs_reached_limit;
3648 }
3650 void CMTask::decrease_limits() {
3651 // This is called when we believe that we're going to do an infrequent
3652 // operation which will increase the per byte scanned cost (i.e. move
3653 // entries to/from the global stack). It basically tries to decrease the
3654 // scanning limit so that the clock is called earlier.
3656 if (_cm->verbose_medium()) {
3657 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3658 }
3660 _words_scanned_limit = _real_words_scanned_limit -
3661 3 * words_scanned_period / 4;
3662 _refs_reached_limit = _real_refs_reached_limit -
3663 3 * refs_reached_period / 4;
3664 }
3666 void CMTask::move_entries_to_global_stack() {
3667 // local array where we'll store the entries that will be popped
3668 // from the local queue
3669 oop buffer[global_stack_transfer_size];
3671 int n = 0;
3672 oop obj;
3673 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3674 buffer[n] = obj;
3675 ++n;
3676 }
3678 if (n > 0) {
3679 // we popped at least one entry from the local queue
3681 statsOnly( ++_global_transfers_to; _local_pops += n );
3683 if (!_cm->mark_stack_push(buffer, n)) {
3684 if (_cm->verbose_low()) {
3685 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3686 _worker_id);
3687 }
3688 set_has_aborted();
3689 } else {
3690 // the transfer was successful
3692 if (_cm->verbose_medium()) {
3693 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3694 _worker_id, n);
3695 }
3696 statsOnly( int tmp_size = _cm->mark_stack_size();
3697 if (tmp_size > _global_max_size) {
3698 _global_max_size = tmp_size;
3699 }
3700 _global_pushes += n );
3701 }
3702 }
3704 // this operation was quite expensive, so decrease the limits
3705 decrease_limits();
3706 }
3708 void CMTask::get_entries_from_global_stack() {
3709 // local array where we'll store the entries that will be popped
3710 // from the global stack.
3711 oop buffer[global_stack_transfer_size];
3712 int n;
3713 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3714 assert(n <= global_stack_transfer_size,
3715 "we should not pop more than the given limit");
3716 if (n > 0) {
3717 // yes, we did actually pop at least one entry
3719 statsOnly( ++_global_transfers_from; _global_pops += n );
3720 if (_cm->verbose_medium()) {
3721 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3722 _worker_id, n);
3723 }
3724 for (int i = 0; i < n; ++i) {
3725 bool success = _task_queue->push(buffer[i]);
3726 // We only call this when the local queue is empty or under a
3727 // given target limit. So, we do not expect this push to fail.
3728 assert(success, "invariant");
3729 }
3731 statsOnly( int tmp_size = _task_queue->size();
3732 if (tmp_size > _local_max_size) {
3733 _local_max_size = tmp_size;
3734 }
3735 _local_pushes += n );
3736 }
3738 // this operation was quite expensive, so decrease the limits
3739 decrease_limits();
3740 }
3742 void CMTask::drain_local_queue(bool partially) {
3743 if (has_aborted()) return;
3745 // Decide what the target size is, depending whether we're going to
3746 // drain it partially (so that other tasks can steal if they run out
3747 // of things to do) or totally (at the very end).
3748 size_t target_size;
3749 if (partially) {
3750 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3751 } else {
3752 target_size = 0;
3753 }
3755 if (_task_queue->size() > target_size) {
3756 if (_cm->verbose_high()) {
3757 gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3758 _worker_id, target_size);
3759 }
3761 oop obj;
3762 bool ret = _task_queue->pop_local(obj);
3763 while (ret) {
3764 statsOnly( ++_local_pops );
3766 if (_cm->verbose_high()) {
3767 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3768 (void*) obj);
3769 }
3771 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3772 assert(!_g1h->is_on_master_free_list(
3773 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3775 scan_object(obj);
3777 if (_task_queue->size() <= target_size || has_aborted()) {
3778 ret = false;
3779 } else {
3780 ret = _task_queue->pop_local(obj);
3781 }
3782 }
3784 if (_cm->verbose_high()) {
3785 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3786 _worker_id, _task_queue->size());
3787 }
3788 }
3789 }
3791 void CMTask::drain_global_stack(bool partially) {
3792 if (has_aborted()) return;
3794 // We have a policy to drain the local queue before we attempt to
3795 // drain the global stack.
3796 assert(partially || _task_queue->size() == 0, "invariant");
3798 // Decide what the target size is, depending whether we're going to
3799 // drain it partially (so that other tasks can steal if they run out
3800 // of things to do) or totally (at the very end). Notice that,
3801 // because we move entries from the global stack in chunks or
3802 // because another task might be doing the same, we might in fact
3803 // drop below the target. But, this is not a problem.
3804 size_t target_size;
3805 if (partially) {
3806 target_size = _cm->partial_mark_stack_size_target();
3807 } else {
3808 target_size = 0;
3809 }
3811 if (_cm->mark_stack_size() > target_size) {
3812 if (_cm->verbose_low()) {
3813 gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3814 _worker_id, target_size);
3815 }
3817 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3818 get_entries_from_global_stack();
3819 drain_local_queue(partially);
3820 }
3822 if (_cm->verbose_low()) {
3823 gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3824 _worker_id, _cm->mark_stack_size());
3825 }
3826 }
3827 }
3829 // SATB Queue has several assumptions on whether to call the par or
3830 // non-par versions of the methods. this is why some of the code is
3831 // replicated. We should really get rid of the single-threaded version
3832 // of the code to simplify things.
3833 void CMTask::drain_satb_buffers() {
3834 if (has_aborted()) return;
3836 // We set this so that the regular clock knows that we're in the
3837 // middle of draining buffers and doesn't set the abort flag when it
3838 // notices that SATB buffers are available for draining. It'd be
3839 // very counter productive if it did that. :-)
3840 _draining_satb_buffers = true;
3842 CMObjectClosure oc(this);
3843 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3844 if (G1CollectedHeap::use_parallel_gc_threads()) {
3845 satb_mq_set.set_par_closure(_worker_id, &oc);
3846 } else {
3847 satb_mq_set.set_closure(&oc);
3848 }
3850 // This keeps claiming and applying the closure to completed buffers
3851 // until we run out of buffers or we need to abort.
3852 if (G1CollectedHeap::use_parallel_gc_threads()) {
3853 while (!has_aborted() &&
3854 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3855 if (_cm->verbose_medium()) {
3856 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3857 }
3858 statsOnly( ++_satb_buffers_processed );
3859 regular_clock_call();
3860 }
3861 } else {
3862 while (!has_aborted() &&
3863 satb_mq_set.apply_closure_to_completed_buffer()) {
3864 if (_cm->verbose_medium()) {
3865 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3866 }
3867 statsOnly( ++_satb_buffers_processed );
3868 regular_clock_call();
3869 }
3870 }
3872 if (!concurrent() && !has_aborted()) {
3873 // We should only do this during remark.
3874 if (G1CollectedHeap::use_parallel_gc_threads()) {
3875 satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3876 } else {
3877 satb_mq_set.iterate_closure_all_threads();
3878 }
3879 }
3881 _draining_satb_buffers = false;
3883 assert(has_aborted() ||
3884 concurrent() ||
3885 satb_mq_set.completed_buffers_num() == 0, "invariant");
3887 if (G1CollectedHeap::use_parallel_gc_threads()) {
3888 satb_mq_set.set_par_closure(_worker_id, NULL);
3889 } else {
3890 satb_mq_set.set_closure(NULL);
3891 }
3893 // again, this was a potentially expensive operation, decrease the
3894 // limits to get the regular clock call early
3895 decrease_limits();
3896 }
3898 void CMTask::print_stats() {
3899 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3900 _worker_id, _calls);
3901 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3902 _elapsed_time_ms, _termination_time_ms);
3903 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3904 _step_times_ms.num(), _step_times_ms.avg(),
3905 _step_times_ms.sd());
3906 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
3907 _step_times_ms.maximum(), _step_times_ms.sum());
3909 #if _MARKING_STATS_
3910 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3911 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3912 _all_clock_intervals_ms.sd());
3913 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
3914 _all_clock_intervals_ms.maximum(),
3915 _all_clock_intervals_ms.sum());
3916 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
3917 _clock_due_to_scanning, _clock_due_to_marking);
3918 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
3919 _objs_scanned, _objs_found_on_bitmap);
3920 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
3921 _local_pushes, _local_pops, _local_max_size);
3922 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
3923 _global_pushes, _global_pops, _global_max_size);
3924 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
3925 _global_transfers_to,_global_transfers_from);
3926 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
3927 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
3928 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
3929 _steal_attempts, _steals);
3930 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
3931 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
3932 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3933 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
3934 _aborted_timed_out, _aborted_satb, _aborted_termination);
3935 #endif // _MARKING_STATS_
3936 }
3938 /*****************************************************************************
3940 The do_marking_step(time_target_ms, ...) method is the building
3941 block of the parallel marking framework. It can be called in parallel
3942 with other invocations of do_marking_step() on different tasks
3943 (but only one per task, obviously) and concurrently with the
3944 mutator threads, or during remark, hence it eliminates the need
3945 for two versions of the code. When called during remark, it will
3946 pick up from where the task left off during the concurrent marking
3947 phase. Interestingly, tasks are also claimable during evacuation
3948 pauses too, since do_marking_step() ensures that it aborts before
3949 it needs to yield.
3951 The data structures that it uses to do marking work are the
3952 following:
3954 (1) Marking Bitmap. If there are gray objects that appear only
3955 on the bitmap (this happens either when dealing with an overflow
3956 or when the initial marking phase has simply marked the roots
3957 and didn't push them on the stack), then tasks claim heap
3958 regions whose bitmap they then scan to find gray objects. A
3959 global finger indicates where the end of the last claimed region
3960 is. A local finger indicates how far into the region a task has
3961 scanned. The two fingers are used to determine how to gray an
3962 object (i.e. whether simply marking it is OK, as it will be
3963 visited by a task in the future, or whether it needs to be also
3964 pushed on a stack).
3966 (2) Local Queue. The local queue of the task which is accessed
3967 reasonably efficiently by the task. Other tasks can steal from
3968 it when they run out of work. Throughout the marking phase, a
3969 task attempts to keep its local queue short but not totally
3970 empty, so that entries are available for stealing by other
3971 tasks. Only when there is no more work, a task will totally
3972 drain its local queue.
3974 (3) Global Mark Stack. This handles local queue overflow. During
3975 marking only sets of entries are moved between it and the local
3976 queues, as access to it requires a mutex and more fine-grain
3977 interaction with it which might cause contention. If it
3978 overflows, then the marking phase should restart and iterate
3979 over the bitmap to identify gray objects. Throughout the marking
3980 phase, tasks attempt to keep the global mark stack at a small
3981 length but not totally empty, so that entries are available for
3982 popping by other tasks. Only when there is no more work, tasks
3983 will totally drain the global mark stack.
3985 (4) SATB Buffer Queue. This is where completed SATB buffers are
3986 made available. Buffers are regularly removed from this queue
3987 and scanned for roots, so that the queue doesn't get too
3988 long. During remark, all completed buffers are processed, as
3989 well as the filled in parts of any uncompleted buffers.
3991 The do_marking_step() method tries to abort when the time target
3992 has been reached. There are a few other cases when the
3993 do_marking_step() method also aborts:
3995 (1) When the marking phase has been aborted (after a Full GC).
3997 (2) When a global overflow (on the global stack) has been
3998 triggered. Before the task aborts, it will actually sync up with
3999 the other tasks to ensure that all the marking data structures
4000 (local queues, stacks, fingers etc.) are re-initialized so that
4001 when do_marking_step() completes, the marking phase can
4002 immediately restart.
4004 (3) When enough completed SATB buffers are available. The
4005 do_marking_step() method only tries to drain SATB buffers right
4006 at the beginning. So, if enough buffers are available, the
4007 marking step aborts and the SATB buffers are processed at
4008 the beginning of the next invocation.
4010 (4) To yield. when we have to yield then we abort and yield
4011 right at the end of do_marking_step(). This saves us from a lot
4012 of hassle as, by yielding we might allow a Full GC. If this
4013 happens then objects will be compacted underneath our feet, the
4014 heap might shrink, etc. We save checking for this by just
4015 aborting and doing the yield right at the end.
4017 From the above it follows that the do_marking_step() method should
4018 be called in a loop (or, otherwise, regularly) until it completes.
4020 If a marking step completes without its has_aborted() flag being
4021 true, it means it has completed the current marking phase (and
4022 also all other marking tasks have done so and have all synced up).
4024 A method called regular_clock_call() is invoked "regularly" (in
4025 sub ms intervals) throughout marking. It is this clock method that
4026 checks all the abort conditions which were mentioned above and
4027 decides when the task should abort. A work-based scheme is used to
4028 trigger this clock method: when the number of object words the
4029 marking phase has scanned or the number of references the marking
4030 phase has visited reach a given limit. Additional invocations to
4031 the method clock have been planted in a few other strategic places
4032 too. The initial reason for the clock method was to avoid calling
4033 vtime too regularly, as it is quite expensive. So, once it was in
4034 place, it was natural to piggy-back all the other conditions on it
4035 too and not constantly check them throughout the code.
4037 If do_termination is true then do_marking_step will enter its
4038 termination protocol.
4040 The value of is_serial must be true when do_marking_step is being
4041 called serially (i.e. by the VMThread) and do_marking_step should
4042 skip any synchronization in the termination and overflow code.
4043 Examples include the serial remark code and the serial reference
4044 processing closures.
4046 The value of is_serial must be false when do_marking_step is
4047 being called by any of the worker threads in a work gang.
4048 Examples include the concurrent marking code (CMMarkingTask),
4049 the MT remark code, and the MT reference processing closures.
4051 *****************************************************************************/
4053 void CMTask::do_marking_step(double time_target_ms,
4054 bool do_termination,
4055 bool is_serial) {
4056 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4057 assert(concurrent() == _cm->concurrent(), "they should be the same");
4059 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4060 assert(_task_queues != NULL, "invariant");
4061 assert(_task_queue != NULL, "invariant");
4062 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4064 assert(!_claimed,
4065 "only one thread should claim this task at any one time");
4067 // OK, this doesn't safeguard again all possible scenarios, as it is
4068 // possible for two threads to set the _claimed flag at the same
4069 // time. But it is only for debugging purposes anyway and it will
4070 // catch most problems.
4071 _claimed = true;
4073 _start_time_ms = os::elapsedVTime() * 1000.0;
4074 statsOnly( _interval_start_time_ms = _start_time_ms );
4076 // If do_stealing is true then do_marking_step will attempt to
4077 // steal work from the other CMTasks. It only makes sense to
4078 // enable stealing when the termination protocol is enabled
4079 // and do_marking_step() is not being called serially.
4080 bool do_stealing = do_termination && !is_serial;
4082 double diff_prediction_ms =
4083 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4084 _time_target_ms = time_target_ms - diff_prediction_ms;
4086 // set up the variables that are used in the work-based scheme to
4087 // call the regular clock method
4088 _words_scanned = 0;
4089 _refs_reached = 0;
4090 recalculate_limits();
4092 // clear all flags
4093 clear_has_aborted();
4094 _has_timed_out = false;
4095 _draining_satb_buffers = false;
4097 ++_calls;
4099 if (_cm->verbose_low()) {
4100 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4101 "target = %1.2lfms >>>>>>>>>>",
4102 _worker_id, _calls, _time_target_ms);
4103 }
4105 // Set up the bitmap and oop closures. Anything that uses them is
4106 // eventually called from this method, so it is OK to allocate these
4107 // statically.
4108 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4109 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4110 set_cm_oop_closure(&cm_oop_closure);
4112 if (_cm->has_overflown()) {
4113 // This can happen if the mark stack overflows during a GC pause
4114 // and this task, after a yield point, restarts. We have to abort
4115 // as we need to get into the overflow protocol which happens
4116 // right at the end of this task.
4117 set_has_aborted();
4118 }
4120 // First drain any available SATB buffers. After this, we will not
4121 // look at SATB buffers before the next invocation of this method.
4122 // If enough completed SATB buffers are queued up, the regular clock
4123 // will abort this task so that it restarts.
4124 drain_satb_buffers();
4125 // ...then partially drain the local queue and the global stack
4126 drain_local_queue(true);
4127 drain_global_stack(true);
4129 do {
4130 if (!has_aborted() && _curr_region != NULL) {
4131 // This means that we're already holding on to a region.
4132 assert(_finger != NULL, "if region is not NULL, then the finger "
4133 "should not be NULL either");
4135 // We might have restarted this task after an evacuation pause
4136 // which might have evacuated the region we're holding on to
4137 // underneath our feet. Let's read its limit again to make sure
4138 // that we do not iterate over a region of the heap that
4139 // contains garbage (update_region_limit() will also move
4140 // _finger to the start of the region if it is found empty).
4141 update_region_limit();
4142 // We will start from _finger not from the start of the region,
4143 // as we might be restarting this task after aborting half-way
4144 // through scanning this region. In this case, _finger points to
4145 // the address where we last found a marked object. If this is a
4146 // fresh region, _finger points to start().
4147 MemRegion mr = MemRegion(_finger, _region_limit);
4149 if (_cm->verbose_low()) {
4150 gclog_or_tty->print_cr("[%u] we're scanning part "
4151 "["PTR_FORMAT", "PTR_FORMAT") "
4152 "of region "HR_FORMAT,
4153 _worker_id, _finger, _region_limit,
4154 HR_FORMAT_PARAMS(_curr_region));
4155 }
4157 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4158 "humongous regions should go around loop once only");
4160 // Some special cases:
4161 // If the memory region is empty, we can just give up the region.
4162 // If the current region is humongous then we only need to check
4163 // the bitmap for the bit associated with the start of the object,
4164 // scan the object if it's live, and give up the region.
4165 // Otherwise, let's iterate over the bitmap of the part of the region
4166 // that is left.
4167 // If the iteration is successful, give up the region.
4168 if (mr.is_empty()) {
4169 giveup_current_region();
4170 regular_clock_call();
4171 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4172 if (_nextMarkBitMap->isMarked(mr.start())) {
4173 // The object is marked - apply the closure
4174 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4175 bitmap_closure.do_bit(offset);
4176 }
4177 // Even if this task aborted while scanning the humongous object
4178 // we can (and should) give up the current region.
4179 giveup_current_region();
4180 regular_clock_call();
4181 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4182 giveup_current_region();
4183 regular_clock_call();
4184 } else {
4185 assert(has_aborted(), "currently the only way to do so");
4186 // The only way to abort the bitmap iteration is to return
4187 // false from the do_bit() method. However, inside the
4188 // do_bit() method we move the _finger to point to the
4189 // object currently being looked at. So, if we bail out, we
4190 // have definitely set _finger to something non-null.
4191 assert(_finger != NULL, "invariant");
4193 // Region iteration was actually aborted. So now _finger
4194 // points to the address of the object we last scanned. If we
4195 // leave it there, when we restart this task, we will rescan
4196 // the object. It is easy to avoid this. We move the finger by
4197 // enough to point to the next possible object header (the
4198 // bitmap knows by how much we need to move it as it knows its
4199 // granularity).
4200 assert(_finger < _region_limit, "invariant");
4201 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4202 // Check if bitmap iteration was aborted while scanning the last object
4203 if (new_finger >= _region_limit) {
4204 giveup_current_region();
4205 } else {
4206 move_finger_to(new_finger);
4207 }
4208 }
4209 }
4210 // At this point we have either completed iterating over the
4211 // region we were holding on to, or we have aborted.
4213 // We then partially drain the local queue and the global stack.
4214 // (Do we really need this?)
4215 drain_local_queue(true);
4216 drain_global_stack(true);
4218 // Read the note on the claim_region() method on why it might
4219 // return NULL with potentially more regions available for
4220 // claiming and why we have to check out_of_regions() to determine
4221 // whether we're done or not.
4222 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4223 // We are going to try to claim a new region. We should have
4224 // given up on the previous one.
4225 // Separated the asserts so that we know which one fires.
4226 assert(_curr_region == NULL, "invariant");
4227 assert(_finger == NULL, "invariant");
4228 assert(_region_limit == NULL, "invariant");
4229 if (_cm->verbose_low()) {
4230 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4231 }
4232 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4233 if (claimed_region != NULL) {
4234 // Yes, we managed to claim one
4235 statsOnly( ++_regions_claimed );
4237 if (_cm->verbose_low()) {
4238 gclog_or_tty->print_cr("[%u] we successfully claimed "
4239 "region "PTR_FORMAT,
4240 _worker_id, claimed_region);
4241 }
4243 setup_for_region(claimed_region);
4244 assert(_curr_region == claimed_region, "invariant");
4245 }
4246 // It is important to call the regular clock here. It might take
4247 // a while to claim a region if, for example, we hit a large
4248 // block of empty regions. So we need to call the regular clock
4249 // method once round the loop to make sure it's called
4250 // frequently enough.
4251 regular_clock_call();
4252 }
4254 if (!has_aborted() && _curr_region == NULL) {
4255 assert(_cm->out_of_regions(),
4256 "at this point we should be out of regions");
4257 }
4258 } while ( _curr_region != NULL && !has_aborted());
4260 if (!has_aborted()) {
4261 // We cannot check whether the global stack is empty, since other
4262 // tasks might be pushing objects to it concurrently.
4263 assert(_cm->out_of_regions(),
4264 "at this point we should be out of regions");
4266 if (_cm->verbose_low()) {
4267 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4268 }
4270 // Try to reduce the number of available SATB buffers so that
4271 // remark has less work to do.
4272 drain_satb_buffers();
4273 }
4275 // Since we've done everything else, we can now totally drain the
4276 // local queue and global stack.
4277 drain_local_queue(false);
4278 drain_global_stack(false);
4280 // Attempt at work stealing from other task's queues.
4281 if (do_stealing && !has_aborted()) {
4282 // We have not aborted. This means that we have finished all that
4283 // we could. Let's try to do some stealing...
4285 // We cannot check whether the global stack is empty, since other
4286 // tasks might be pushing objects to it concurrently.
4287 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4288 "only way to reach here");
4290 if (_cm->verbose_low()) {
4291 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4292 }
4294 while (!has_aborted()) {
4295 oop obj;
4296 statsOnly( ++_steal_attempts );
4298 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4299 if (_cm->verbose_medium()) {
4300 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4301 _worker_id, (void*) obj);
4302 }
4304 statsOnly( ++_steals );
4306 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4307 "any stolen object should be marked");
4308 scan_object(obj);
4310 // And since we're towards the end, let's totally drain the
4311 // local queue and global stack.
4312 drain_local_queue(false);
4313 drain_global_stack(false);
4314 } else {
4315 break;
4316 }
4317 }
4318 }
4320 // If we are about to wrap up and go into termination, check if we
4321 // should raise the overflow flag.
4322 if (do_termination && !has_aborted()) {
4323 if (_cm->force_overflow()->should_force()) {
4324 _cm->set_has_overflown();
4325 regular_clock_call();
4326 }
4327 }
4329 // We still haven't aborted. Now, let's try to get into the
4330 // termination protocol.
4331 if (do_termination && !has_aborted()) {
4332 // We cannot check whether the global stack is empty, since other
4333 // tasks might be concurrently pushing objects on it.
4334 // Separated the asserts so that we know which one fires.
4335 assert(_cm->out_of_regions(), "only way to reach here");
4336 assert(_task_queue->size() == 0, "only way to reach here");
4338 if (_cm->verbose_low()) {
4339 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4340 }
4342 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4344 // The CMTask class also extends the TerminatorTerminator class,
4345 // hence its should_exit_termination() method will also decide
4346 // whether to exit the termination protocol or not.
4347 bool finished = (is_serial ||
4348 _cm->terminator()->offer_termination(this));
4349 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4350 _termination_time_ms +=
4351 termination_end_time_ms - _termination_start_time_ms;
4353 if (finished) {
4354 // We're all done.
4356 if (_worker_id == 0) {
4357 // let's allow task 0 to do this
4358 if (concurrent()) {
4359 assert(_cm->concurrent_marking_in_progress(), "invariant");
4360 // we need to set this to false before the next
4361 // safepoint. This way we ensure that the marking phase
4362 // doesn't observe any more heap expansions.
4363 _cm->clear_concurrent_marking_in_progress();
4364 }
4365 }
4367 // We can now guarantee that the global stack is empty, since
4368 // all other tasks have finished. We separated the guarantees so
4369 // that, if a condition is false, we can immediately find out
4370 // which one.
4371 guarantee(_cm->out_of_regions(), "only way to reach here");
4372 guarantee(_cm->mark_stack_empty(), "only way to reach here");
4373 guarantee(_task_queue->size() == 0, "only way to reach here");
4374 guarantee(!_cm->has_overflown(), "only way to reach here");
4375 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4377 if (_cm->verbose_low()) {
4378 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4379 }
4380 } else {
4381 // Apparently there's more work to do. Let's abort this task. It
4382 // will restart it and we can hopefully find more things to do.
4384 if (_cm->verbose_low()) {
4385 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4386 _worker_id);
4387 }
4389 set_has_aborted();
4390 statsOnly( ++_aborted_termination );
4391 }
4392 }
4394 // Mainly for debugging purposes to make sure that a pointer to the
4395 // closure which was statically allocated in this frame doesn't
4396 // escape it by accident.
4397 set_cm_oop_closure(NULL);
4398 double end_time_ms = os::elapsedVTime() * 1000.0;
4399 double elapsed_time_ms = end_time_ms - _start_time_ms;
4400 // Update the step history.
4401 _step_times_ms.add(elapsed_time_ms);
4403 if (has_aborted()) {
4404 // The task was aborted for some reason.
4406 statsOnly( ++_aborted );
4408 if (_has_timed_out) {
4409 double diff_ms = elapsed_time_ms - _time_target_ms;
4410 // Keep statistics of how well we did with respect to hitting
4411 // our target only if we actually timed out (if we aborted for
4412 // other reasons, then the results might get skewed).
4413 _marking_step_diffs_ms.add(diff_ms);
4414 }
4416 if (_cm->has_overflown()) {
4417 // This is the interesting one. We aborted because a global
4418 // overflow was raised. This means we have to restart the
4419 // marking phase and start iterating over regions. However, in
4420 // order to do this we have to make sure that all tasks stop
4421 // what they are doing and re-initialise in a safe manner. We
4422 // will achieve this with the use of two barrier sync points.
4424 if (_cm->verbose_low()) {
4425 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4426 }
4428 if (!is_serial) {
4429 // We only need to enter the sync barrier if being called
4430 // from a parallel context
4431 _cm->enter_first_sync_barrier(_worker_id);
4433 // When we exit this sync barrier we know that all tasks have
4434 // stopped doing marking work. So, it's now safe to
4435 // re-initialise our data structures. At the end of this method,
4436 // task 0 will clear the global data structures.
4437 }
4439 statsOnly( ++_aborted_overflow );
4441 // We clear the local state of this task...
4442 clear_region_fields();
4444 if (!is_serial) {
4445 // ...and enter the second barrier.
4446 _cm->enter_second_sync_barrier(_worker_id);
4447 }
4448 // At this point, if we're during the concurrent phase of
4449 // marking, everything has been re-initialized and we're
4450 // ready to restart.
4451 }
4453 if (_cm->verbose_low()) {
4454 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4455 "elapsed = %1.2lfms <<<<<<<<<<",
4456 _worker_id, _time_target_ms, elapsed_time_ms);
4457 if (_cm->has_aborted()) {
4458 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4459 _worker_id);
4460 }
4461 }
4462 } else {
4463 if (_cm->verbose_low()) {
4464 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4465 "elapsed = %1.2lfms <<<<<<<<<<",
4466 _worker_id, _time_target_ms, elapsed_time_ms);
4467 }
4468 }
4470 _claimed = false;
4471 }
4473 CMTask::CMTask(uint worker_id,
4474 ConcurrentMark* cm,
4475 size_t* marked_bytes,
4476 BitMap* card_bm,
4477 CMTaskQueue* task_queue,
4478 CMTaskQueueSet* task_queues)
4479 : _g1h(G1CollectedHeap::heap()),
4480 _worker_id(worker_id), _cm(cm),
4481 _claimed(false),
4482 _nextMarkBitMap(NULL), _hash_seed(17),
4483 _task_queue(task_queue),
4484 _task_queues(task_queues),
4485 _cm_oop_closure(NULL),
4486 _marked_bytes_array(marked_bytes),
4487 _card_bm(card_bm) {
4488 guarantee(task_queue != NULL, "invariant");
4489 guarantee(task_queues != NULL, "invariant");
4491 statsOnly( _clock_due_to_scanning = 0;
4492 _clock_due_to_marking = 0 );
4494 _marking_step_diffs_ms.add(0.5);
4495 }
4497 // These are formatting macros that are used below to ensure
4498 // consistent formatting. The *_H_* versions are used to format the
4499 // header for a particular value and they should be kept consistent
4500 // with the corresponding macro. Also note that most of the macros add
4501 // the necessary white space (as a prefix) which makes them a bit
4502 // easier to compose.
4504 // All the output lines are prefixed with this string to be able to
4505 // identify them easily in a large log file.
4506 #define G1PPRL_LINE_PREFIX "###"
4508 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4509 #ifdef _LP64
4510 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4511 #else // _LP64
4512 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4513 #endif // _LP64
4515 // For per-region info
4516 #define G1PPRL_TYPE_FORMAT " %-4s"
4517 #define G1PPRL_TYPE_H_FORMAT " %4s"
4518 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4519 #define G1PPRL_BYTE_H_FORMAT " %9s"
4520 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4521 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4523 // For summary info
4524 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4525 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4526 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4527 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4529 G1PrintRegionLivenessInfoClosure::
4530 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4531 : _out(out),
4532 _total_used_bytes(0), _total_capacity_bytes(0),
4533 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4534 _hum_used_bytes(0), _hum_capacity_bytes(0),
4535 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4536 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4537 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4538 MemRegion g1_committed = g1h->g1_committed();
4539 MemRegion g1_reserved = g1h->g1_reserved();
4540 double now = os::elapsedTime();
4542 // Print the header of the output.
4543 _out->cr();
4544 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4545 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4546 G1PPRL_SUM_ADDR_FORMAT("committed")
4547 G1PPRL_SUM_ADDR_FORMAT("reserved")
4548 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4549 g1_committed.start(), g1_committed.end(),
4550 g1_reserved.start(), g1_reserved.end(),
4551 HeapRegion::GrainBytes);
4552 _out->print_cr(G1PPRL_LINE_PREFIX);
4553 _out->print_cr(G1PPRL_LINE_PREFIX
4554 G1PPRL_TYPE_H_FORMAT
4555 G1PPRL_ADDR_BASE_H_FORMAT
4556 G1PPRL_BYTE_H_FORMAT
4557 G1PPRL_BYTE_H_FORMAT
4558 G1PPRL_BYTE_H_FORMAT
4559 G1PPRL_DOUBLE_H_FORMAT
4560 G1PPRL_BYTE_H_FORMAT
4561 G1PPRL_BYTE_H_FORMAT,
4562 "type", "address-range",
4563 "used", "prev-live", "next-live", "gc-eff",
4564 "remset", "code-roots");
4565 _out->print_cr(G1PPRL_LINE_PREFIX
4566 G1PPRL_TYPE_H_FORMAT
4567 G1PPRL_ADDR_BASE_H_FORMAT
4568 G1PPRL_BYTE_H_FORMAT
4569 G1PPRL_BYTE_H_FORMAT
4570 G1PPRL_BYTE_H_FORMAT
4571 G1PPRL_DOUBLE_H_FORMAT
4572 G1PPRL_BYTE_H_FORMAT
4573 G1PPRL_BYTE_H_FORMAT,
4574 "", "",
4575 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4576 "(bytes)", "(bytes)");
4577 }
4579 // It takes as a parameter a reference to one of the _hum_* fields, it
4580 // deduces the corresponding value for a region in a humongous region
4581 // series (either the region size, or what's left if the _hum_* field
4582 // is < the region size), and updates the _hum_* field accordingly.
4583 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4584 size_t bytes = 0;
4585 // The > 0 check is to deal with the prev and next live bytes which
4586 // could be 0.
4587 if (*hum_bytes > 0) {
4588 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4589 *hum_bytes -= bytes;
4590 }
4591 return bytes;
4592 }
4594 // It deduces the values for a region in a humongous region series
4595 // from the _hum_* fields and updates those accordingly. It assumes
4596 // that that _hum_* fields have already been set up from the "starts
4597 // humongous" region and we visit the regions in address order.
4598 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4599 size_t* capacity_bytes,
4600 size_t* prev_live_bytes,
4601 size_t* next_live_bytes) {
4602 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4603 *used_bytes = get_hum_bytes(&_hum_used_bytes);
4604 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
4605 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4606 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4607 }
4609 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4610 const char* type = "";
4611 HeapWord* bottom = r->bottom();
4612 HeapWord* end = r->end();
4613 size_t capacity_bytes = r->capacity();
4614 size_t used_bytes = r->used();
4615 size_t prev_live_bytes = r->live_bytes();
4616 size_t next_live_bytes = r->next_live_bytes();
4617 double gc_eff = r->gc_efficiency();
4618 size_t remset_bytes = r->rem_set()->mem_size();
4619 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4621 if (r->used() == 0) {
4622 type = "FREE";
4623 } else if (r->is_survivor()) {
4624 type = "SURV";
4625 } else if (r->is_young()) {
4626 type = "EDEN";
4627 } else if (r->startsHumongous()) {
4628 type = "HUMS";
4630 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4631 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4632 "they should have been zeroed after the last time we used them");
4633 // Set up the _hum_* fields.
4634 _hum_capacity_bytes = capacity_bytes;
4635 _hum_used_bytes = used_bytes;
4636 _hum_prev_live_bytes = prev_live_bytes;
4637 _hum_next_live_bytes = next_live_bytes;
4638 get_hum_bytes(&used_bytes, &capacity_bytes,
4639 &prev_live_bytes, &next_live_bytes);
4640 end = bottom + HeapRegion::GrainWords;
4641 } else if (r->continuesHumongous()) {
4642 type = "HUMC";
4643 get_hum_bytes(&used_bytes, &capacity_bytes,
4644 &prev_live_bytes, &next_live_bytes);
4645 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4646 } else {
4647 type = "OLD";
4648 }
4650 _total_used_bytes += used_bytes;
4651 _total_capacity_bytes += capacity_bytes;
4652 _total_prev_live_bytes += prev_live_bytes;
4653 _total_next_live_bytes += next_live_bytes;
4654 _total_remset_bytes += remset_bytes;
4655 _total_strong_code_roots_bytes += strong_code_roots_bytes;
4657 // Print a line for this particular region.
4658 _out->print_cr(G1PPRL_LINE_PREFIX
4659 G1PPRL_TYPE_FORMAT
4660 G1PPRL_ADDR_BASE_FORMAT
4661 G1PPRL_BYTE_FORMAT
4662 G1PPRL_BYTE_FORMAT
4663 G1PPRL_BYTE_FORMAT
4664 G1PPRL_DOUBLE_FORMAT
4665 G1PPRL_BYTE_FORMAT
4666 G1PPRL_BYTE_FORMAT,
4667 type, bottom, end,
4668 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4669 remset_bytes, strong_code_roots_bytes);
4671 return false;
4672 }
4674 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4675 // add static memory usages to remembered set sizes
4676 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4677 // Print the footer of the output.
4678 _out->print_cr(G1PPRL_LINE_PREFIX);
4679 _out->print_cr(G1PPRL_LINE_PREFIX
4680 " SUMMARY"
4681 G1PPRL_SUM_MB_FORMAT("capacity")
4682 G1PPRL_SUM_MB_PERC_FORMAT("used")
4683 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4684 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4685 G1PPRL_SUM_MB_FORMAT("remset")
4686 G1PPRL_SUM_MB_FORMAT("code-roots"),
4687 bytes_to_mb(_total_capacity_bytes),
4688 bytes_to_mb(_total_used_bytes),
4689 perc(_total_used_bytes, _total_capacity_bytes),
4690 bytes_to_mb(_total_prev_live_bytes),
4691 perc(_total_prev_live_bytes, _total_capacity_bytes),
4692 bytes_to_mb(_total_next_live_bytes),
4693 perc(_total_next_live_bytes, _total_capacity_bytes),
4694 bytes_to_mb(_total_remset_bytes),
4695 bytes_to_mb(_total_strong_code_roots_bytes));
4696 _out->cr();
4697 }