Mon, 24 Mar 2014 15:30:14 +0100
8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by: mgerdin, brutisso
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1Log.hpp"
33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
34 #include "gc_implementation/g1/g1RemSet.hpp"
35 #include "gc_implementation/g1/heapRegion.inline.hpp"
36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
38 #include "gc_implementation/shared/vmGCOperations.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp"
42 #include "memory/genOopClosures.inline.hpp"
43 #include "memory/referencePolicy.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "services/memTracker.hpp"
50 // Concurrent marking bit map wrapper
52 CMBitMapRO::CMBitMapRO(int shifter) :
53 _bm(),
54 _shifter(shifter) {
55 _bmStartWord = 0;
56 _bmWordSize = 0;
57 }
59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
60 HeapWord* limit) const {
61 // First we must round addr *up* to a possible object boundary.
62 addr = (HeapWord*)align_size_up((intptr_t)addr,
63 HeapWordSize << _shifter);
64 size_t addrOffset = heapWordToOffset(addr);
65 if (limit == NULL) {
66 limit = _bmStartWord + _bmWordSize;
67 }
68 size_t limitOffset = heapWordToOffset(limit);
69 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
70 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
71 assert(nextAddr >= addr, "get_next_one postcondition");
72 assert(nextAddr == limit || isMarked(nextAddr),
73 "get_next_one postcondition");
74 return nextAddr;
75 }
77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
78 HeapWord* limit) const {
79 size_t addrOffset = heapWordToOffset(addr);
80 if (limit == NULL) {
81 limit = _bmStartWord + _bmWordSize;
82 }
83 size_t limitOffset = heapWordToOffset(limit);
84 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
85 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
86 assert(nextAddr >= addr, "get_next_one postcondition");
87 assert(nextAddr == limit || !isMarked(nextAddr),
88 "get_next_one postcondition");
89 return nextAddr;
90 }
92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
93 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
94 return (int) (diff >> _shifter);
95 }
97 #ifndef PRODUCT
98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
99 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
100 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
101 "size inconsistency");
102 return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
103 _bmWordSize == heap_rs.size()>>LogHeapWordSize;
104 }
105 #endif
107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
108 _bm.print_on_error(st, prefix);
109 }
111 bool CMBitMap::allocate(ReservedSpace heap_rs) {
112 _bmStartWord = (HeapWord*)(heap_rs.base());
113 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
114 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
115 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
116 if (!brs.is_reserved()) {
117 warning("ConcurrentMark marking bit map allocation failure");
118 return false;
119 }
120 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
121 // For now we'll just commit all of the bit map up front.
122 // Later on we'll try to be more parsimonious with swap.
123 if (!_virtual_space.initialize(brs, brs.size())) {
124 warning("ConcurrentMark marking bit map backing store failure");
125 return false;
126 }
127 assert(_virtual_space.committed_size() == brs.size(),
128 "didn't reserve backing store for all of concurrent marking bit map?");
129 _bm.set_map((uintptr_t*)_virtual_space.low());
130 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
131 _bmWordSize, "inconsistency in bit map sizing");
132 _bm.set_size(_bmWordSize >> _shifter);
133 return true;
134 }
136 void CMBitMap::clearAll() {
137 _bm.clear();
138 return;
139 }
141 void CMBitMap::markRange(MemRegion mr) {
142 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
143 assert(!mr.is_empty(), "unexpected empty region");
144 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
145 ((HeapWord *) mr.end())),
146 "markRange memory region end is not card aligned");
147 // convert address range into offset range
148 _bm.at_put_range(heapWordToOffset(mr.start()),
149 heapWordToOffset(mr.end()), true);
150 }
152 void CMBitMap::clearRange(MemRegion mr) {
153 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
154 assert(!mr.is_empty(), "unexpected empty region");
155 // convert address range into offset range
156 _bm.at_put_range(heapWordToOffset(mr.start()),
157 heapWordToOffset(mr.end()), false);
158 }
160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
161 HeapWord* end_addr) {
162 HeapWord* start = getNextMarkedWordAddress(addr);
163 start = MIN2(start, end_addr);
164 HeapWord* end = getNextUnmarkedWordAddress(start);
165 end = MIN2(end, end_addr);
166 assert(start <= end, "Consistency check");
167 MemRegion mr(start, end);
168 if (!mr.is_empty()) {
169 clearRange(mr);
170 }
171 return mr;
172 }
174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
175 _base(NULL), _cm(cm)
176 #ifdef ASSERT
177 , _drain_in_progress(false)
178 , _drain_in_progress_yields(false)
179 #endif
180 {}
182 bool CMMarkStack::allocate(size_t capacity) {
183 // allocate a stack of the requisite depth
184 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
185 if (!rs.is_reserved()) {
186 warning("ConcurrentMark MarkStack allocation failure");
187 return false;
188 }
189 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
190 if (!_virtual_space.initialize(rs, rs.size())) {
191 warning("ConcurrentMark MarkStack backing store failure");
192 // Release the virtual memory reserved for the marking stack
193 rs.release();
194 return false;
195 }
196 assert(_virtual_space.committed_size() == rs.size(),
197 "Didn't reserve backing store for all of ConcurrentMark stack?");
198 _base = (oop*) _virtual_space.low();
199 setEmpty();
200 _capacity = (jint) capacity;
201 _saved_index = -1;
202 _should_expand = false;
203 NOT_PRODUCT(_max_depth = 0);
204 return true;
205 }
207 void CMMarkStack::expand() {
208 // Called, during remark, if we've overflown the marking stack during marking.
209 assert(isEmpty(), "stack should been emptied while handling overflow");
210 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
211 // Clear expansion flag
212 _should_expand = false;
213 if (_capacity == (jint) MarkStackSizeMax) {
214 if (PrintGCDetails && Verbose) {
215 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
216 }
217 return;
218 }
219 // Double capacity if possible
220 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
221 // Do not give up existing stack until we have managed to
222 // get the double capacity that we desired.
223 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
224 sizeof(oop)));
225 if (rs.is_reserved()) {
226 // Release the backing store associated with old stack
227 _virtual_space.release();
228 // Reinitialize virtual space for new stack
229 if (!_virtual_space.initialize(rs, rs.size())) {
230 fatal("Not enough swap for expanded marking stack capacity");
231 }
232 _base = (oop*)(_virtual_space.low());
233 _index = 0;
234 _capacity = new_capacity;
235 } else {
236 if (PrintGCDetails && Verbose) {
237 // Failed to double capacity, continue;
238 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
239 SIZE_FORMAT"K to " SIZE_FORMAT"K",
240 _capacity / K, new_capacity / K);
241 }
242 }
243 }
245 void CMMarkStack::set_should_expand() {
246 // If we're resetting the marking state because of an
247 // marking stack overflow, record that we should, if
248 // possible, expand the stack.
249 _should_expand = _cm->has_overflown();
250 }
252 CMMarkStack::~CMMarkStack() {
253 if (_base != NULL) {
254 _base = NULL;
255 _virtual_space.release();
256 }
257 }
259 void CMMarkStack::par_push(oop ptr) {
260 while (true) {
261 if (isFull()) {
262 _overflow = true;
263 return;
264 }
265 // Otherwise...
266 jint index = _index;
267 jint next_index = index+1;
268 jint res = Atomic::cmpxchg(next_index, &_index, index);
269 if (res == index) {
270 _base[index] = ptr;
271 // Note that we don't maintain this atomically. We could, but it
272 // doesn't seem necessary.
273 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
274 return;
275 }
276 // Otherwise, we need to try again.
277 }
278 }
280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
281 while (true) {
282 if (isFull()) {
283 _overflow = true;
284 return;
285 }
286 // Otherwise...
287 jint index = _index;
288 jint next_index = index + n;
289 if (next_index > _capacity) {
290 _overflow = true;
291 return;
292 }
293 jint res = Atomic::cmpxchg(next_index, &_index, index);
294 if (res == index) {
295 for (int i = 0; i < n; i++) {
296 int ind = index + i;
297 assert(ind < _capacity, "By overflow test above.");
298 _base[ind] = ptr_arr[i];
299 }
300 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
301 return;
302 }
303 // Otherwise, we need to try again.
304 }
305 }
307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
308 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
309 jint start = _index;
310 jint next_index = start + n;
311 if (next_index > _capacity) {
312 _overflow = true;
313 return;
314 }
315 // Otherwise.
316 _index = next_index;
317 for (int i = 0; i < n; i++) {
318 int ind = start + i;
319 assert(ind < _capacity, "By overflow test above.");
320 _base[ind] = ptr_arr[i];
321 }
322 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
323 }
325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
326 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
327 jint index = _index;
328 if (index == 0) {
329 *n = 0;
330 return false;
331 } else {
332 int k = MIN2(max, index);
333 jint new_ind = index - k;
334 for (int j = 0; j < k; j++) {
335 ptr_arr[j] = _base[new_ind + j];
336 }
337 _index = new_ind;
338 *n = k;
339 return true;
340 }
341 }
343 template<class OopClosureClass>
344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
345 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
346 || SafepointSynchronize::is_at_safepoint(),
347 "Drain recursion must be yield-safe.");
348 bool res = true;
349 debug_only(_drain_in_progress = true);
350 debug_only(_drain_in_progress_yields = yield_after);
351 while (!isEmpty()) {
352 oop newOop = pop();
353 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
354 assert(newOop->is_oop(), "Expected an oop");
355 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
356 "only grey objects on this stack");
357 newOop->oop_iterate(cl);
358 if (yield_after && _cm->do_yield_check()) {
359 res = false;
360 break;
361 }
362 }
363 debug_only(_drain_in_progress = false);
364 return res;
365 }
367 void CMMarkStack::note_start_of_gc() {
368 assert(_saved_index == -1,
369 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
370 _saved_index = _index;
371 }
373 void CMMarkStack::note_end_of_gc() {
374 // This is intentionally a guarantee, instead of an assert. If we
375 // accidentally add something to the mark stack during GC, it
376 // will be a correctness issue so it's better if we crash. we'll
377 // only check this once per GC anyway, so it won't be a performance
378 // issue in any way.
379 guarantee(_saved_index == _index,
380 err_msg("saved index: %d index: %d", _saved_index, _index));
381 _saved_index = -1;
382 }
384 void CMMarkStack::oops_do(OopClosure* f) {
385 assert(_saved_index == _index,
386 err_msg("saved index: %d index: %d", _saved_index, _index));
387 for (int i = 0; i < _index; i += 1) {
388 f->do_oop(&_base[i]);
389 }
390 }
392 bool ConcurrentMark::not_yet_marked(oop obj) const {
393 return _g1h->is_obj_ill(obj);
394 }
396 CMRootRegions::CMRootRegions() :
397 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
398 _should_abort(false), _next_survivor(NULL) { }
400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
401 _young_list = g1h->young_list();
402 _cm = cm;
403 }
405 void CMRootRegions::prepare_for_scan() {
406 assert(!scan_in_progress(), "pre-condition");
408 // Currently, only survivors can be root regions.
409 assert(_next_survivor == NULL, "pre-condition");
410 _next_survivor = _young_list->first_survivor_region();
411 _scan_in_progress = (_next_survivor != NULL);
412 _should_abort = false;
413 }
415 HeapRegion* CMRootRegions::claim_next() {
416 if (_should_abort) {
417 // If someone has set the should_abort flag, we return NULL to
418 // force the caller to bail out of their loop.
419 return NULL;
420 }
422 // Currently, only survivors can be root regions.
423 HeapRegion* res = _next_survivor;
424 if (res != NULL) {
425 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
426 // Read it again in case it changed while we were waiting for the lock.
427 res = _next_survivor;
428 if (res != NULL) {
429 if (res == _young_list->last_survivor_region()) {
430 // We just claimed the last survivor so store NULL to indicate
431 // that we're done.
432 _next_survivor = NULL;
433 } else {
434 _next_survivor = res->get_next_young_region();
435 }
436 } else {
437 // Someone else claimed the last survivor while we were trying
438 // to take the lock so nothing else to do.
439 }
440 }
441 assert(res == NULL || res->is_survivor(), "post-condition");
443 return res;
444 }
446 void CMRootRegions::scan_finished() {
447 assert(scan_in_progress(), "pre-condition");
449 // Currently, only survivors can be root regions.
450 if (!_should_abort) {
451 assert(_next_survivor == NULL, "we should have claimed all survivors");
452 }
453 _next_survivor = NULL;
455 {
456 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
457 _scan_in_progress = false;
458 RootRegionScan_lock->notify_all();
459 }
460 }
462 bool CMRootRegions::wait_until_scan_finished() {
463 if (!scan_in_progress()) return false;
465 {
466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
467 while (scan_in_progress()) {
468 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
469 }
470 }
471 return true;
472 }
474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
476 #endif // _MSC_VER
478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
479 return MAX2((n_par_threads + 2) / 4, 1U);
480 }
482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
483 _g1h(g1h),
484 _markBitMap1(log2_intptr(MinObjAlignment)),
485 _markBitMap2(log2_intptr(MinObjAlignment)),
486 _parallel_marking_threads(0),
487 _max_parallel_marking_threads(0),
488 _sleep_factor(0.0),
489 _marking_task_overhead(1.0),
490 _cleanup_sleep_factor(0.0),
491 _cleanup_task_overhead(1.0),
492 _cleanup_list("Cleanup List"),
493 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
494 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
495 CardTableModRefBS::card_shift,
496 false /* in_resource_area*/),
498 _prevMarkBitMap(&_markBitMap1),
499 _nextMarkBitMap(&_markBitMap2),
501 _markStack(this),
502 // _finger set in set_non_marking_state
504 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
505 // _active_tasks set in set_non_marking_state
506 // _tasks set inside the constructor
507 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
508 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
510 _has_overflown(false),
511 _concurrent(false),
512 _has_aborted(false),
513 _restart_for_overflow(false),
514 _concurrent_marking_in_progress(false),
516 // _verbose_level set below
518 _init_times(),
519 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
520 _cleanup_times(),
521 _total_counting_time(0.0),
522 _total_rs_scrub_time(0.0),
524 _parallel_workers(NULL),
526 _count_card_bitmaps(NULL),
527 _count_marked_bytes(NULL),
528 _completed_initialization(false) {
529 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
530 if (verbose_level < no_verbose) {
531 verbose_level = no_verbose;
532 }
533 if (verbose_level > high_verbose) {
534 verbose_level = high_verbose;
535 }
536 _verbose_level = verbose_level;
538 if (verbose_low()) {
539 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
540 "heap end = "PTR_FORMAT, _heap_start, _heap_end);
541 }
543 if (!_markBitMap1.allocate(heap_rs)) {
544 warning("Failed to allocate first CM bit map");
545 return;
546 }
547 if (!_markBitMap2.allocate(heap_rs)) {
548 warning("Failed to allocate second CM bit map");
549 return;
550 }
552 // Create & start a ConcurrentMark thread.
553 _cmThread = new ConcurrentMarkThread(this);
554 assert(cmThread() != NULL, "CM Thread should have been created");
555 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
556 if (_cmThread->osthread() == NULL) {
557 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
558 }
560 assert(CGC_lock != NULL, "Where's the CGC_lock?");
561 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
562 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
564 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
565 satb_qs.set_buffer_size(G1SATBBufferSize);
567 _root_regions.init(_g1h, this);
569 if (ConcGCThreads > ParallelGCThreads) {
570 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
571 "than ParallelGCThreads (" UINT32_FORMAT ").",
572 ConcGCThreads, ParallelGCThreads);
573 return;
574 }
575 if (ParallelGCThreads == 0) {
576 // if we are not running with any parallel GC threads we will not
577 // spawn any marking threads either
578 _parallel_marking_threads = 0;
579 _max_parallel_marking_threads = 0;
580 _sleep_factor = 0.0;
581 _marking_task_overhead = 1.0;
582 } else {
583 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
584 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
585 // if both are set
586 _sleep_factor = 0.0;
587 _marking_task_overhead = 1.0;
588 } else if (G1MarkingOverheadPercent > 0) {
589 // We will calculate the number of parallel marking threads based
590 // on a target overhead with respect to the soft real-time goal
591 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
592 double overall_cm_overhead =
593 (double) MaxGCPauseMillis * marking_overhead /
594 (double) GCPauseIntervalMillis;
595 double cpu_ratio = 1.0 / (double) os::processor_count();
596 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
597 double marking_task_overhead =
598 overall_cm_overhead / marking_thread_num *
599 (double) os::processor_count();
600 double sleep_factor =
601 (1.0 - marking_task_overhead) / marking_task_overhead;
603 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
604 _sleep_factor = sleep_factor;
605 _marking_task_overhead = marking_task_overhead;
606 } else {
607 // Calculate the number of parallel marking threads by scaling
608 // the number of parallel GC threads.
609 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
610 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
611 _sleep_factor = 0.0;
612 _marking_task_overhead = 1.0;
613 }
615 assert(ConcGCThreads > 0, "Should have been set");
616 _parallel_marking_threads = (uint) ConcGCThreads;
617 _max_parallel_marking_threads = _parallel_marking_threads;
619 if (parallel_marking_threads() > 1) {
620 _cleanup_task_overhead = 1.0;
621 } else {
622 _cleanup_task_overhead = marking_task_overhead();
623 }
624 _cleanup_sleep_factor =
625 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
627 #if 0
628 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
629 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
630 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
631 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
632 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
633 #endif
635 guarantee(parallel_marking_threads() > 0, "peace of mind");
636 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
637 _max_parallel_marking_threads, false, true);
638 if (_parallel_workers == NULL) {
639 vm_exit_during_initialization("Failed necessary allocation.");
640 } else {
641 _parallel_workers->initialize_workers();
642 }
643 }
645 if (FLAG_IS_DEFAULT(MarkStackSize)) {
646 uintx mark_stack_size =
647 MIN2(MarkStackSizeMax,
648 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
649 // Verify that the calculated value for MarkStackSize is in range.
650 // It would be nice to use the private utility routine from Arguments.
651 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
652 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
653 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
654 mark_stack_size, 1, MarkStackSizeMax);
655 return;
656 }
657 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
658 } else {
659 // Verify MarkStackSize is in range.
660 if (FLAG_IS_CMDLINE(MarkStackSize)) {
661 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
662 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
663 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
664 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
665 MarkStackSize, 1, MarkStackSizeMax);
666 return;
667 }
668 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
669 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
670 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
671 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
672 MarkStackSize, MarkStackSizeMax);
673 return;
674 }
675 }
676 }
677 }
679 if (!_markStack.allocate(MarkStackSize)) {
680 warning("Failed to allocate CM marking stack");
681 return;
682 }
684 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
685 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
687 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
688 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
690 BitMap::idx_t card_bm_size = _card_bm.size();
692 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
693 _active_tasks = _max_worker_id;
695 size_t max_regions = (size_t) _g1h->max_regions();
696 for (uint i = 0; i < _max_worker_id; ++i) {
697 CMTaskQueue* task_queue = new CMTaskQueue();
698 task_queue->initialize();
699 _task_queues->register_queue(i, task_queue);
701 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
702 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
704 _tasks[i] = new CMTask(i, this,
705 _count_marked_bytes[i],
706 &_count_card_bitmaps[i],
707 task_queue, _task_queues);
709 _accum_task_vtime[i] = 0.0;
710 }
712 // Calculate the card number for the bottom of the heap. Used
713 // in biasing indexes into the accounting card bitmaps.
714 _heap_bottom_card_num =
715 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
716 CardTableModRefBS::card_shift);
718 // Clear all the liveness counting data
719 clear_all_count_data();
721 // so that the call below can read a sensible value
722 _heap_start = (HeapWord*) heap_rs.base();
723 set_non_marking_state();
724 _completed_initialization = true;
725 }
727 void ConcurrentMark::update_g1_committed(bool force) {
728 // If concurrent marking is not in progress, then we do not need to
729 // update _heap_end.
730 if (!concurrent_marking_in_progress() && !force) return;
732 MemRegion committed = _g1h->g1_committed();
733 assert(committed.start() == _heap_start, "start shouldn't change");
734 HeapWord* new_end = committed.end();
735 if (new_end > _heap_end) {
736 // The heap has been expanded.
738 _heap_end = new_end;
739 }
740 // Notice that the heap can also shrink. However, this only happens
741 // during a Full GC (at least currently) and the entire marking
742 // phase will bail out and the task will not be restarted. So, let's
743 // do nothing.
744 }
746 void ConcurrentMark::reset() {
747 // Starting values for these two. This should be called in a STW
748 // phase. CM will be notified of any future g1_committed expansions
749 // will be at the end of evacuation pauses, when tasks are
750 // inactive.
751 MemRegion committed = _g1h->g1_committed();
752 _heap_start = committed.start();
753 _heap_end = committed.end();
755 // Separated the asserts so that we know which one fires.
756 assert(_heap_start != NULL, "heap bounds should look ok");
757 assert(_heap_end != NULL, "heap bounds should look ok");
758 assert(_heap_start < _heap_end, "heap bounds should look ok");
760 // Reset all the marking data structures and any necessary flags
761 reset_marking_state();
763 if (verbose_low()) {
764 gclog_or_tty->print_cr("[global] resetting");
765 }
767 // We do reset all of them, since different phases will use
768 // different number of active threads. So, it's easiest to have all
769 // of them ready.
770 for (uint i = 0; i < _max_worker_id; ++i) {
771 _tasks[i]->reset(_nextMarkBitMap);
772 }
774 // we need this to make sure that the flag is on during the evac
775 // pause with initial mark piggy-backed
776 set_concurrent_marking_in_progress();
777 }
780 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
781 _markStack.set_should_expand();
782 _markStack.setEmpty(); // Also clears the _markStack overflow flag
783 if (clear_overflow) {
784 clear_has_overflown();
785 } else {
786 assert(has_overflown(), "pre-condition");
787 }
788 _finger = _heap_start;
790 for (uint i = 0; i < _max_worker_id; ++i) {
791 CMTaskQueue* queue = _task_queues->queue(i);
792 queue->set_empty();
793 }
794 }
796 void ConcurrentMark::set_concurrency(uint active_tasks) {
797 assert(active_tasks <= _max_worker_id, "we should not have more");
799 _active_tasks = active_tasks;
800 // Need to update the three data structures below according to the
801 // number of active threads for this phase.
802 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
803 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
804 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
805 }
807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
808 set_concurrency(active_tasks);
810 _concurrent = concurrent;
811 // We propagate this to all tasks, not just the active ones.
812 for (uint i = 0; i < _max_worker_id; ++i)
813 _tasks[i]->set_concurrent(concurrent);
815 if (concurrent) {
816 set_concurrent_marking_in_progress();
817 } else {
818 // We currently assume that the concurrent flag has been set to
819 // false before we start remark. At this point we should also be
820 // in a STW phase.
821 assert(!concurrent_marking_in_progress(), "invariant");
822 assert(_finger == _heap_end,
823 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
824 _finger, _heap_end));
825 update_g1_committed(true);
826 }
827 }
829 void ConcurrentMark::set_non_marking_state() {
830 // We set the global marking state to some default values when we're
831 // not doing marking.
832 reset_marking_state();
833 _active_tasks = 0;
834 clear_concurrent_marking_in_progress();
835 }
837 ConcurrentMark::~ConcurrentMark() {
838 // The ConcurrentMark instance is never freed.
839 ShouldNotReachHere();
840 }
842 void ConcurrentMark::clearNextBitmap() {
843 G1CollectedHeap* g1h = G1CollectedHeap::heap();
844 G1CollectorPolicy* g1p = g1h->g1_policy();
846 // Make sure that the concurrent mark thread looks to still be in
847 // the current cycle.
848 guarantee(cmThread()->during_cycle(), "invariant");
850 // We are finishing up the current cycle by clearing the next
851 // marking bitmap and getting it ready for the next cycle. During
852 // this time no other cycle can start. So, let's make sure that this
853 // is the case.
854 guarantee(!g1h->mark_in_progress(), "invariant");
856 // clear the mark bitmap (no grey objects to start with).
857 // We need to do this in chunks and offer to yield in between
858 // each chunk.
859 HeapWord* start = _nextMarkBitMap->startWord();
860 HeapWord* end = _nextMarkBitMap->endWord();
861 HeapWord* cur = start;
862 size_t chunkSize = M;
863 while (cur < end) {
864 HeapWord* next = cur + chunkSize;
865 if (next > end) {
866 next = end;
867 }
868 MemRegion mr(cur,next);
869 _nextMarkBitMap->clearRange(mr);
870 cur = next;
871 do_yield_check();
873 // Repeat the asserts from above. We'll do them as asserts here to
874 // minimize their overhead on the product. However, we'll have
875 // them as guarantees at the beginning / end of the bitmap
876 // clearing to get some checking in the product.
877 assert(cmThread()->during_cycle(), "invariant");
878 assert(!g1h->mark_in_progress(), "invariant");
879 }
881 // Clear the liveness counting data
882 clear_all_count_data();
884 // Repeat the asserts from above.
885 guarantee(cmThread()->during_cycle(), "invariant");
886 guarantee(!g1h->mark_in_progress(), "invariant");
887 }
889 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
890 public:
891 bool doHeapRegion(HeapRegion* r) {
892 if (!r->continuesHumongous()) {
893 r->note_start_of_marking();
894 }
895 return false;
896 }
897 };
899 void ConcurrentMark::checkpointRootsInitialPre() {
900 G1CollectedHeap* g1h = G1CollectedHeap::heap();
901 G1CollectorPolicy* g1p = g1h->g1_policy();
903 _has_aborted = false;
905 #ifndef PRODUCT
906 if (G1PrintReachableAtInitialMark) {
907 print_reachable("at-cycle-start",
908 VerifyOption_G1UsePrevMarking, true /* all */);
909 }
910 #endif
912 // Initialise marking structures. This has to be done in a STW phase.
913 reset();
915 // For each region note start of marking.
916 NoteStartOfMarkHRClosure startcl;
917 g1h->heap_region_iterate(&startcl);
918 }
921 void ConcurrentMark::checkpointRootsInitialPost() {
922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
924 // If we force an overflow during remark, the remark operation will
925 // actually abort and we'll restart concurrent marking. If we always
926 // force an oveflow during remark we'll never actually complete the
927 // marking phase. So, we initilize this here, at the start of the
928 // cycle, so that at the remaining overflow number will decrease at
929 // every remark and we'll eventually not need to cause one.
930 force_overflow_stw()->init();
932 // Start Concurrent Marking weak-reference discovery.
933 ReferenceProcessor* rp = g1h->ref_processor_cm();
934 // enable ("weak") refs discovery
935 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
936 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
938 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
939 // This is the start of the marking cycle, we're expected all
940 // threads to have SATB queues with active set to false.
941 satb_mq_set.set_active_all_threads(true, /* new active value */
942 false /* expected_active */);
944 _root_regions.prepare_for_scan();
946 // update_g1_committed() will be called at the end of an evac pause
947 // when marking is on. So, it's also called at the end of the
948 // initial-mark pause to update the heap end, if the heap expands
949 // during it. No need to call it here.
950 }
952 /*
953 * Notice that in the next two methods, we actually leave the STS
954 * during the barrier sync and join it immediately afterwards. If we
955 * do not do this, the following deadlock can occur: one thread could
956 * be in the barrier sync code, waiting for the other thread to also
957 * sync up, whereas another one could be trying to yield, while also
958 * waiting for the other threads to sync up too.
959 *
960 * Note, however, that this code is also used during remark and in
961 * this case we should not attempt to leave / enter the STS, otherwise
962 * we'll either hit an asseert (debug / fastdebug) or deadlock
963 * (product). So we should only leave / enter the STS if we are
964 * operating concurrently.
965 *
966 * Because the thread that does the sync barrier has left the STS, it
967 * is possible to be suspended for a Full GC or an evacuation pause
968 * could occur. This is actually safe, since the entering the sync
969 * barrier is one of the last things do_marking_step() does, and it
970 * doesn't manipulate any data structures afterwards.
971 */
973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
974 if (verbose_low()) {
975 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
976 }
978 if (concurrent()) {
979 ConcurrentGCThread::stsLeave();
980 }
981 _first_overflow_barrier_sync.enter();
982 if (concurrent()) {
983 ConcurrentGCThread::stsJoin();
984 }
985 // at this point everyone should have synced up and not be doing any
986 // more work
988 if (verbose_low()) {
989 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
990 }
992 // If we're executing the concurrent phase of marking, reset the marking
993 // state; otherwise the marking state is reset after reference processing,
994 // during the remark pause.
995 // If we reset here as a result of an overflow during the remark we will
996 // see assertion failures from any subsequent set_concurrency_and_phase()
997 // calls.
998 if (concurrent()) {
999 // let the task associated with with worker 0 do this
1000 if (worker_id == 0) {
1001 // task 0 is responsible for clearing the global data structures
1002 // We should be here because of an overflow. During STW we should
1003 // not clear the overflow flag since we rely on it being true when
1004 // we exit this method to abort the pause and restart concurent
1005 // marking.
1006 reset_marking_state(true /* clear_overflow */);
1007 force_overflow()->update();
1009 if (G1Log::fine()) {
1010 gclog_or_tty->date_stamp(PrintGCDateStamps);
1011 gclog_or_tty->stamp(PrintGCTimeStamps);
1012 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1013 }
1014 }
1015 }
1017 // after this, each task should reset its own data structures then
1018 // then go into the second barrier
1019 }
1021 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1022 if (verbose_low()) {
1023 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1024 }
1026 if (concurrent()) {
1027 ConcurrentGCThread::stsLeave();
1028 }
1029 _second_overflow_barrier_sync.enter();
1030 if (concurrent()) {
1031 ConcurrentGCThread::stsJoin();
1032 }
1033 // at this point everything should be re-initialized and ready to go
1035 if (verbose_low()) {
1036 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1037 }
1038 }
1040 #ifndef PRODUCT
1041 void ForceOverflowSettings::init() {
1042 _num_remaining = G1ConcMarkForceOverflow;
1043 _force = false;
1044 update();
1045 }
1047 void ForceOverflowSettings::update() {
1048 if (_num_remaining > 0) {
1049 _num_remaining -= 1;
1050 _force = true;
1051 } else {
1052 _force = false;
1053 }
1054 }
1056 bool ForceOverflowSettings::should_force() {
1057 if (_force) {
1058 _force = false;
1059 return true;
1060 } else {
1061 return false;
1062 }
1063 }
1064 #endif // !PRODUCT
1066 class CMConcurrentMarkingTask: public AbstractGangTask {
1067 private:
1068 ConcurrentMark* _cm;
1069 ConcurrentMarkThread* _cmt;
1071 public:
1072 void work(uint worker_id) {
1073 assert(Thread::current()->is_ConcurrentGC_thread(),
1074 "this should only be done by a conc GC thread");
1075 ResourceMark rm;
1077 double start_vtime = os::elapsedVTime();
1079 ConcurrentGCThread::stsJoin();
1081 assert(worker_id < _cm->active_tasks(), "invariant");
1082 CMTask* the_task = _cm->task(worker_id);
1083 the_task->record_start_time();
1084 if (!_cm->has_aborted()) {
1085 do {
1086 double start_vtime_sec = os::elapsedVTime();
1087 double start_time_sec = os::elapsedTime();
1088 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1090 the_task->do_marking_step(mark_step_duration_ms,
1091 true /* do_termination */,
1092 false /* is_serial*/);
1094 double end_time_sec = os::elapsedTime();
1095 double end_vtime_sec = os::elapsedVTime();
1096 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1097 double elapsed_time_sec = end_time_sec - start_time_sec;
1098 _cm->clear_has_overflown();
1100 bool ret = _cm->do_yield_check(worker_id);
1102 jlong sleep_time_ms;
1103 if (!_cm->has_aborted() && the_task->has_aborted()) {
1104 sleep_time_ms =
1105 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1106 ConcurrentGCThread::stsLeave();
1107 os::sleep(Thread::current(), sleep_time_ms, false);
1108 ConcurrentGCThread::stsJoin();
1109 }
1110 double end_time2_sec = os::elapsedTime();
1111 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1113 #if 0
1114 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1115 "overhead %1.4lf",
1116 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1117 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1118 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1119 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1120 #endif
1121 } while (!_cm->has_aborted() && the_task->has_aborted());
1122 }
1123 the_task->record_end_time();
1124 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1126 ConcurrentGCThread::stsLeave();
1128 double end_vtime = os::elapsedVTime();
1129 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1130 }
1132 CMConcurrentMarkingTask(ConcurrentMark* cm,
1133 ConcurrentMarkThread* cmt) :
1134 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1136 ~CMConcurrentMarkingTask() { }
1137 };
1139 // Calculates the number of active workers for a concurrent
1140 // phase.
1141 uint ConcurrentMark::calc_parallel_marking_threads() {
1142 if (G1CollectedHeap::use_parallel_gc_threads()) {
1143 uint n_conc_workers = 0;
1144 if (!UseDynamicNumberOfGCThreads ||
1145 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1146 !ForceDynamicNumberOfGCThreads)) {
1147 n_conc_workers = max_parallel_marking_threads();
1148 } else {
1149 n_conc_workers =
1150 AdaptiveSizePolicy::calc_default_active_workers(
1151 max_parallel_marking_threads(),
1152 1, /* Minimum workers */
1153 parallel_marking_threads(),
1154 Threads::number_of_non_daemon_threads());
1155 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1156 // that scaling has already gone into "_max_parallel_marking_threads".
1157 }
1158 assert(n_conc_workers > 0, "Always need at least 1");
1159 return n_conc_workers;
1160 }
1161 // If we are not running with any parallel GC threads we will not
1162 // have spawned any marking threads either. Hence the number of
1163 // concurrent workers should be 0.
1164 return 0;
1165 }
1167 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1168 // Currently, only survivors can be root regions.
1169 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1170 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1172 const uintx interval = PrefetchScanIntervalInBytes;
1173 HeapWord* curr = hr->bottom();
1174 const HeapWord* end = hr->top();
1175 while (curr < end) {
1176 Prefetch::read(curr, interval);
1177 oop obj = oop(curr);
1178 int size = obj->oop_iterate(&cl);
1179 assert(size == obj->size(), "sanity");
1180 curr += size;
1181 }
1182 }
1184 class CMRootRegionScanTask : public AbstractGangTask {
1185 private:
1186 ConcurrentMark* _cm;
1188 public:
1189 CMRootRegionScanTask(ConcurrentMark* cm) :
1190 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1192 void work(uint worker_id) {
1193 assert(Thread::current()->is_ConcurrentGC_thread(),
1194 "this should only be done by a conc GC thread");
1196 CMRootRegions* root_regions = _cm->root_regions();
1197 HeapRegion* hr = root_regions->claim_next();
1198 while (hr != NULL) {
1199 _cm->scanRootRegion(hr, worker_id);
1200 hr = root_regions->claim_next();
1201 }
1202 }
1203 };
1205 void ConcurrentMark::scanRootRegions() {
1206 // scan_in_progress() will have been set to true only if there was
1207 // at least one root region to scan. So, if it's false, we
1208 // should not attempt to do any further work.
1209 if (root_regions()->scan_in_progress()) {
1210 _parallel_marking_threads = calc_parallel_marking_threads();
1211 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1212 "Maximum number of marking threads exceeded");
1213 uint active_workers = MAX2(1U, parallel_marking_threads());
1215 CMRootRegionScanTask task(this);
1216 if (use_parallel_marking_threads()) {
1217 _parallel_workers->set_active_workers((int) active_workers);
1218 _parallel_workers->run_task(&task);
1219 } else {
1220 task.work(0);
1221 }
1223 // It's possible that has_aborted() is true here without actually
1224 // aborting the survivor scan earlier. This is OK as it's
1225 // mainly used for sanity checking.
1226 root_regions()->scan_finished();
1227 }
1228 }
1230 void ConcurrentMark::markFromRoots() {
1231 // we might be tempted to assert that:
1232 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1233 // "inconsistent argument?");
1234 // However that wouldn't be right, because it's possible that
1235 // a safepoint is indeed in progress as a younger generation
1236 // stop-the-world GC happens even as we mark in this generation.
1238 _restart_for_overflow = false;
1239 force_overflow_conc()->init();
1241 // _g1h has _n_par_threads
1242 _parallel_marking_threads = calc_parallel_marking_threads();
1243 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1244 "Maximum number of marking threads exceeded");
1246 uint active_workers = MAX2(1U, parallel_marking_threads());
1248 // Parallel task terminator is set in "set_concurrency_and_phase()"
1249 set_concurrency_and_phase(active_workers, true /* concurrent */);
1251 CMConcurrentMarkingTask markingTask(this, cmThread());
1252 if (use_parallel_marking_threads()) {
1253 _parallel_workers->set_active_workers((int)active_workers);
1254 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1255 // and the decisions on that MT processing is made elsewhere.
1256 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1257 _parallel_workers->run_task(&markingTask);
1258 } else {
1259 markingTask.work(0);
1260 }
1261 print_stats();
1262 }
1264 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1265 // world is stopped at this checkpoint
1266 assert(SafepointSynchronize::is_at_safepoint(),
1267 "world should be stopped");
1269 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1271 // If a full collection has happened, we shouldn't do this.
1272 if (has_aborted()) {
1273 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1274 return;
1275 }
1277 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1279 if (VerifyDuringGC) {
1280 HandleMark hm; // handle scope
1281 Universe::heap()->prepare_for_verify();
1282 Universe::verify(VerifyOption_G1UsePrevMarking,
1283 " VerifyDuringGC:(before)");
1284 }
1286 G1CollectorPolicy* g1p = g1h->g1_policy();
1287 g1p->record_concurrent_mark_remark_start();
1289 double start = os::elapsedTime();
1291 checkpointRootsFinalWork();
1293 double mark_work_end = os::elapsedTime();
1295 weakRefsWork(clear_all_soft_refs);
1297 if (has_overflown()) {
1298 // Oops. We overflowed. Restart concurrent marking.
1299 _restart_for_overflow = true;
1300 if (G1TraceMarkStackOverflow) {
1301 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1302 }
1304 // Verify the heap w.r.t. the previous marking bitmap.
1305 if (VerifyDuringGC) {
1306 HandleMark hm; // handle scope
1307 Universe::heap()->prepare_for_verify();
1308 Universe::verify(VerifyOption_G1UsePrevMarking,
1309 " VerifyDuringGC:(overflow)");
1310 }
1312 // Clear the marking state because we will be restarting
1313 // marking due to overflowing the global mark stack.
1314 reset_marking_state();
1315 } else {
1316 // Aggregate the per-task counting data that we have accumulated
1317 // while marking.
1318 aggregate_count_data();
1320 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1321 // We're done with marking.
1322 // This is the end of the marking cycle, we're expected all
1323 // threads to have SATB queues with active set to true.
1324 satb_mq_set.set_active_all_threads(false, /* new active value */
1325 true /* expected_active */);
1327 if (VerifyDuringGC) {
1328 HandleMark hm; // handle scope
1329 Universe::heap()->prepare_for_verify();
1330 Universe::verify(VerifyOption_G1UseNextMarking,
1331 " VerifyDuringGC:(after)");
1332 }
1333 assert(!restart_for_overflow(), "sanity");
1334 // Completely reset the marking state since marking completed
1335 set_non_marking_state();
1336 }
1338 // Expand the marking stack, if we have to and if we can.
1339 if (_markStack.should_expand()) {
1340 _markStack.expand();
1341 }
1343 // Statistics
1344 double now = os::elapsedTime();
1345 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1346 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1347 _remark_times.add((now - start) * 1000.0);
1349 g1p->record_concurrent_mark_remark_end();
1351 G1CMIsAliveClosure is_alive(g1h);
1352 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1353 }
1355 // Base class of the closures that finalize and verify the
1356 // liveness counting data.
1357 class CMCountDataClosureBase: public HeapRegionClosure {
1358 protected:
1359 G1CollectedHeap* _g1h;
1360 ConcurrentMark* _cm;
1361 CardTableModRefBS* _ct_bs;
1363 BitMap* _region_bm;
1364 BitMap* _card_bm;
1366 // Takes a region that's not empty (i.e., it has at least one
1367 // live object in it and sets its corresponding bit on the region
1368 // bitmap to 1. If the region is "starts humongous" it will also set
1369 // to 1 the bits on the region bitmap that correspond to its
1370 // associated "continues humongous" regions.
1371 void set_bit_for_region(HeapRegion* hr) {
1372 assert(!hr->continuesHumongous(), "should have filtered those out");
1374 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1375 if (!hr->startsHumongous()) {
1376 // Normal (non-humongous) case: just set the bit.
1377 _region_bm->par_at_put(index, true);
1378 } else {
1379 // Starts humongous case: calculate how many regions are part of
1380 // this humongous region and then set the bit range.
1381 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1382 _region_bm->par_at_put_range(index, end_index, true);
1383 }
1384 }
1386 public:
1387 CMCountDataClosureBase(G1CollectedHeap* g1h,
1388 BitMap* region_bm, BitMap* card_bm):
1389 _g1h(g1h), _cm(g1h->concurrent_mark()),
1390 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1391 _region_bm(region_bm), _card_bm(card_bm) { }
1392 };
1394 // Closure that calculates the # live objects per region. Used
1395 // for verification purposes during the cleanup pause.
1396 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1397 CMBitMapRO* _bm;
1398 size_t _region_marked_bytes;
1400 public:
1401 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1402 BitMap* region_bm, BitMap* card_bm) :
1403 CMCountDataClosureBase(g1h, region_bm, card_bm),
1404 _bm(bm), _region_marked_bytes(0) { }
1406 bool doHeapRegion(HeapRegion* hr) {
1408 if (hr->continuesHumongous()) {
1409 // We will ignore these here and process them when their
1410 // associated "starts humongous" region is processed (see
1411 // set_bit_for_heap_region()). Note that we cannot rely on their
1412 // associated "starts humongous" region to have their bit set to
1413 // 1 since, due to the region chunking in the parallel region
1414 // iteration, a "continues humongous" region might be visited
1415 // before its associated "starts humongous".
1416 return false;
1417 }
1419 HeapWord* ntams = hr->next_top_at_mark_start();
1420 HeapWord* start = hr->bottom();
1422 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1423 err_msg("Preconditions not met - "
1424 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1425 start, ntams, hr->end()));
1427 // Find the first marked object at or after "start".
1428 start = _bm->getNextMarkedWordAddress(start, ntams);
1430 size_t marked_bytes = 0;
1432 while (start < ntams) {
1433 oop obj = oop(start);
1434 int obj_sz = obj->size();
1435 HeapWord* obj_end = start + obj_sz;
1437 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1438 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1440 // Note: if we're looking at the last region in heap - obj_end
1441 // could be actually just beyond the end of the heap; end_idx
1442 // will then correspond to a (non-existent) card that is also
1443 // just beyond the heap.
1444 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1445 // end of object is not card aligned - increment to cover
1446 // all the cards spanned by the object
1447 end_idx += 1;
1448 }
1450 // Set the bits in the card BM for the cards spanned by this object.
1451 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1453 // Add the size of this object to the number of marked bytes.
1454 marked_bytes += (size_t)obj_sz * HeapWordSize;
1456 // Find the next marked object after this one.
1457 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1458 }
1460 // Mark the allocated-since-marking portion...
1461 HeapWord* top = hr->top();
1462 if (ntams < top) {
1463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1464 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1466 // Note: if we're looking at the last region in heap - top
1467 // could be actually just beyond the end of the heap; end_idx
1468 // will then correspond to a (non-existent) card that is also
1469 // just beyond the heap.
1470 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1471 // end of object is not card aligned - increment to cover
1472 // all the cards spanned by the object
1473 end_idx += 1;
1474 }
1475 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1477 // This definitely means the region has live objects.
1478 set_bit_for_region(hr);
1479 }
1481 // Update the live region bitmap.
1482 if (marked_bytes > 0) {
1483 set_bit_for_region(hr);
1484 }
1486 // Set the marked bytes for the current region so that
1487 // it can be queried by a calling verificiation routine
1488 _region_marked_bytes = marked_bytes;
1490 return false;
1491 }
1493 size_t region_marked_bytes() const { return _region_marked_bytes; }
1494 };
1496 // Heap region closure used for verifying the counting data
1497 // that was accumulated concurrently and aggregated during
1498 // the remark pause. This closure is applied to the heap
1499 // regions during the STW cleanup pause.
1501 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1502 G1CollectedHeap* _g1h;
1503 ConcurrentMark* _cm;
1504 CalcLiveObjectsClosure _calc_cl;
1505 BitMap* _region_bm; // Region BM to be verified
1506 BitMap* _card_bm; // Card BM to be verified
1507 bool _verbose; // verbose output?
1509 BitMap* _exp_region_bm; // Expected Region BM values
1510 BitMap* _exp_card_bm; // Expected card BM values
1512 int _failures;
1514 public:
1515 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1516 BitMap* region_bm,
1517 BitMap* card_bm,
1518 BitMap* exp_region_bm,
1519 BitMap* exp_card_bm,
1520 bool verbose) :
1521 _g1h(g1h), _cm(g1h->concurrent_mark()),
1522 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1523 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1524 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1525 _failures(0) { }
1527 int failures() const { return _failures; }
1529 bool doHeapRegion(HeapRegion* hr) {
1530 if (hr->continuesHumongous()) {
1531 // We will ignore these here and process them when their
1532 // associated "starts humongous" region is processed (see
1533 // set_bit_for_heap_region()). Note that we cannot rely on their
1534 // associated "starts humongous" region to have their bit set to
1535 // 1 since, due to the region chunking in the parallel region
1536 // iteration, a "continues humongous" region might be visited
1537 // before its associated "starts humongous".
1538 return false;
1539 }
1541 int failures = 0;
1543 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1544 // this region and set the corresponding bits in the expected region
1545 // and card bitmaps.
1546 bool res = _calc_cl.doHeapRegion(hr);
1547 assert(res == false, "should be continuing");
1549 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1550 Mutex::_no_safepoint_check_flag);
1552 // Verify the marked bytes for this region.
1553 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1554 size_t act_marked_bytes = hr->next_marked_bytes();
1556 // We're not OK if expected marked bytes > actual marked bytes. It means
1557 // we have missed accounting some objects during the actual marking.
1558 if (exp_marked_bytes > act_marked_bytes) {
1559 if (_verbose) {
1560 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1561 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1562 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1563 }
1564 failures += 1;
1565 }
1567 // Verify the bit, for this region, in the actual and expected
1568 // (which was just calculated) region bit maps.
1569 // We're not OK if the bit in the calculated expected region
1570 // bitmap is set and the bit in the actual region bitmap is not.
1571 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1573 bool expected = _exp_region_bm->at(index);
1574 bool actual = _region_bm->at(index);
1575 if (expected && !actual) {
1576 if (_verbose) {
1577 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1578 "expected: %s, actual: %s",
1579 hr->hrs_index(),
1580 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1581 }
1582 failures += 1;
1583 }
1585 // Verify that the card bit maps for the cards spanned by the current
1586 // region match. We have an error if we have a set bit in the expected
1587 // bit map and the corresponding bit in the actual bitmap is not set.
1589 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1590 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1592 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1593 expected = _exp_card_bm->at(i);
1594 actual = _card_bm->at(i);
1596 if (expected && !actual) {
1597 if (_verbose) {
1598 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1599 "expected: %s, actual: %s",
1600 hr->hrs_index(), i,
1601 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1602 }
1603 failures += 1;
1604 }
1605 }
1607 if (failures > 0 && _verbose) {
1608 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1609 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1610 HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
1611 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1612 }
1614 _failures += failures;
1616 // We could stop iteration over the heap when we
1617 // find the first violating region by returning true.
1618 return false;
1619 }
1620 };
1622 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1623 protected:
1624 G1CollectedHeap* _g1h;
1625 ConcurrentMark* _cm;
1626 BitMap* _actual_region_bm;
1627 BitMap* _actual_card_bm;
1629 uint _n_workers;
1631 BitMap* _expected_region_bm;
1632 BitMap* _expected_card_bm;
1634 int _failures;
1635 bool _verbose;
1637 public:
1638 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1639 BitMap* region_bm, BitMap* card_bm,
1640 BitMap* expected_region_bm, BitMap* expected_card_bm)
1641 : AbstractGangTask("G1 verify final counting"),
1642 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1643 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1644 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1645 _failures(0), _verbose(false),
1646 _n_workers(0) {
1647 assert(VerifyDuringGC, "don't call this otherwise");
1649 // Use the value already set as the number of active threads
1650 // in the call to run_task().
1651 if (G1CollectedHeap::use_parallel_gc_threads()) {
1652 assert( _g1h->workers()->active_workers() > 0,
1653 "Should have been previously set");
1654 _n_workers = _g1h->workers()->active_workers();
1655 } else {
1656 _n_workers = 1;
1657 }
1659 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1660 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1662 _verbose = _cm->verbose_medium();
1663 }
1665 void work(uint worker_id) {
1666 assert(worker_id < _n_workers, "invariant");
1668 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1669 _actual_region_bm, _actual_card_bm,
1670 _expected_region_bm,
1671 _expected_card_bm,
1672 _verbose);
1674 if (G1CollectedHeap::use_parallel_gc_threads()) {
1675 _g1h->heap_region_par_iterate_chunked(&verify_cl,
1676 worker_id,
1677 _n_workers,
1678 HeapRegion::VerifyCountClaimValue);
1679 } else {
1680 _g1h->heap_region_iterate(&verify_cl);
1681 }
1683 Atomic::add(verify_cl.failures(), &_failures);
1684 }
1686 int failures() const { return _failures; }
1687 };
1689 // Closure that finalizes the liveness counting data.
1690 // Used during the cleanup pause.
1691 // Sets the bits corresponding to the interval [NTAMS, top]
1692 // (which contains the implicitly live objects) in the
1693 // card liveness bitmap. Also sets the bit for each region,
1694 // containing live data, in the region liveness bitmap.
1696 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1697 public:
1698 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1699 BitMap* region_bm,
1700 BitMap* card_bm) :
1701 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1703 bool doHeapRegion(HeapRegion* hr) {
1705 if (hr->continuesHumongous()) {
1706 // We will ignore these here and process them when their
1707 // associated "starts humongous" region is processed (see
1708 // set_bit_for_heap_region()). Note that we cannot rely on their
1709 // associated "starts humongous" region to have their bit set to
1710 // 1 since, due to the region chunking in the parallel region
1711 // iteration, a "continues humongous" region might be visited
1712 // before its associated "starts humongous".
1713 return false;
1714 }
1716 HeapWord* ntams = hr->next_top_at_mark_start();
1717 HeapWord* top = hr->top();
1719 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1721 // Mark the allocated-since-marking portion...
1722 if (ntams < top) {
1723 // This definitely means the region has live objects.
1724 set_bit_for_region(hr);
1726 // Now set the bits in the card bitmap for [ntams, top)
1727 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1728 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1730 // Note: if we're looking at the last region in heap - top
1731 // could be actually just beyond the end of the heap; end_idx
1732 // will then correspond to a (non-existent) card that is also
1733 // just beyond the heap.
1734 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1735 // end of object is not card aligned - increment to cover
1736 // all the cards spanned by the object
1737 end_idx += 1;
1738 }
1740 assert(end_idx <= _card_bm->size(),
1741 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1742 end_idx, _card_bm->size()));
1743 assert(start_idx < _card_bm->size(),
1744 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1745 start_idx, _card_bm->size()));
1747 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1748 }
1750 // Set the bit for the region if it contains live data
1751 if (hr->next_marked_bytes() > 0) {
1752 set_bit_for_region(hr);
1753 }
1755 return false;
1756 }
1757 };
1759 class G1ParFinalCountTask: public AbstractGangTask {
1760 protected:
1761 G1CollectedHeap* _g1h;
1762 ConcurrentMark* _cm;
1763 BitMap* _actual_region_bm;
1764 BitMap* _actual_card_bm;
1766 uint _n_workers;
1768 public:
1769 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1770 : AbstractGangTask("G1 final counting"),
1771 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1772 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1773 _n_workers(0) {
1774 // Use the value already set as the number of active threads
1775 // in the call to run_task().
1776 if (G1CollectedHeap::use_parallel_gc_threads()) {
1777 assert( _g1h->workers()->active_workers() > 0,
1778 "Should have been previously set");
1779 _n_workers = _g1h->workers()->active_workers();
1780 } else {
1781 _n_workers = 1;
1782 }
1783 }
1785 void work(uint worker_id) {
1786 assert(worker_id < _n_workers, "invariant");
1788 FinalCountDataUpdateClosure final_update_cl(_g1h,
1789 _actual_region_bm,
1790 _actual_card_bm);
1792 if (G1CollectedHeap::use_parallel_gc_threads()) {
1793 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1794 worker_id,
1795 _n_workers,
1796 HeapRegion::FinalCountClaimValue);
1797 } else {
1798 _g1h->heap_region_iterate(&final_update_cl);
1799 }
1800 }
1801 };
1803 class G1ParNoteEndTask;
1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1806 G1CollectedHeap* _g1;
1807 int _worker_num;
1808 size_t _max_live_bytes;
1809 uint _regions_claimed;
1810 size_t _freed_bytes;
1811 FreeRegionList* _local_cleanup_list;
1812 HeapRegionSetCount _old_regions_removed;
1813 HeapRegionSetCount _humongous_regions_removed;
1814 HRRSCleanupTask* _hrrs_cleanup_task;
1815 double _claimed_region_time;
1816 double _max_region_time;
1818 public:
1819 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1820 int worker_num,
1821 FreeRegionList* local_cleanup_list,
1822 HRRSCleanupTask* hrrs_cleanup_task) :
1823 _g1(g1), _worker_num(worker_num),
1824 _max_live_bytes(0), _regions_claimed(0),
1825 _freed_bytes(0),
1826 _claimed_region_time(0.0), _max_region_time(0.0),
1827 _local_cleanup_list(local_cleanup_list),
1828 _old_regions_removed(),
1829 _humongous_regions_removed(),
1830 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1832 size_t freed_bytes() { return _freed_bytes; }
1833 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1834 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1836 bool doHeapRegion(HeapRegion *hr) {
1837 if (hr->continuesHumongous()) {
1838 return false;
1839 }
1840 // We use a claim value of zero here because all regions
1841 // were claimed with value 1 in the FinalCount task.
1842 _g1->reset_gc_time_stamps(hr);
1843 double start = os::elapsedTime();
1844 _regions_claimed++;
1845 hr->note_end_of_marking();
1846 _max_live_bytes += hr->max_live_bytes();
1848 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1849 _freed_bytes += hr->used();
1850 hr->set_containing_set(NULL);
1851 if (hr->isHumongous()) {
1852 assert(hr->startsHumongous(), "we should only see starts humongous");
1853 _humongous_regions_removed.increment(1u, hr->capacity());
1854 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1855 } else {
1856 _old_regions_removed.increment(1u, hr->capacity());
1857 _g1->free_region(hr, _local_cleanup_list, true);
1858 }
1859 } else {
1860 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1861 }
1863 double region_time = (os::elapsedTime() - start);
1864 _claimed_region_time += region_time;
1865 if (region_time > _max_region_time) {
1866 _max_region_time = region_time;
1867 }
1868 return false;
1869 }
1871 size_t max_live_bytes() { return _max_live_bytes; }
1872 uint regions_claimed() { return _regions_claimed; }
1873 double claimed_region_time_sec() { return _claimed_region_time; }
1874 double max_region_time_sec() { return _max_region_time; }
1875 };
1877 class G1ParNoteEndTask: public AbstractGangTask {
1878 friend class G1NoteEndOfConcMarkClosure;
1880 protected:
1881 G1CollectedHeap* _g1h;
1882 size_t _max_live_bytes;
1883 size_t _freed_bytes;
1884 FreeRegionList* _cleanup_list;
1886 public:
1887 G1ParNoteEndTask(G1CollectedHeap* g1h,
1888 FreeRegionList* cleanup_list) :
1889 AbstractGangTask("G1 note end"), _g1h(g1h),
1890 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1892 void work(uint worker_id) {
1893 double start = os::elapsedTime();
1894 FreeRegionList local_cleanup_list("Local Cleanup List");
1895 HRRSCleanupTask hrrs_cleanup_task;
1896 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
1897 &hrrs_cleanup_task);
1898 if (G1CollectedHeap::use_parallel_gc_threads()) {
1899 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1900 _g1h->workers()->active_workers(),
1901 HeapRegion::NoteEndClaimValue);
1902 } else {
1903 _g1h->heap_region_iterate(&g1_note_end);
1904 }
1905 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1907 // Now update the lists
1908 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1909 {
1910 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1911 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1912 _max_live_bytes += g1_note_end.max_live_bytes();
1913 _freed_bytes += g1_note_end.freed_bytes();
1915 // If we iterate over the global cleanup list at the end of
1916 // cleanup to do this printing we will not guarantee to only
1917 // generate output for the newly-reclaimed regions (the list
1918 // might not be empty at the beginning of cleanup; we might
1919 // still be working on its previous contents). So we do the
1920 // printing here, before we append the new regions to the global
1921 // cleanup list.
1923 G1HRPrinter* hr_printer = _g1h->hr_printer();
1924 if (hr_printer->is_active()) {
1925 FreeRegionListIterator iter(&local_cleanup_list);
1926 while (iter.more_available()) {
1927 HeapRegion* hr = iter.get_next();
1928 hr_printer->cleanup(hr);
1929 }
1930 }
1932 _cleanup_list->add_as_tail(&local_cleanup_list);
1933 assert(local_cleanup_list.is_empty(), "post-condition");
1935 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1936 }
1937 }
1938 size_t max_live_bytes() { return _max_live_bytes; }
1939 size_t freed_bytes() { return _freed_bytes; }
1940 };
1942 class G1ParScrubRemSetTask: public AbstractGangTask {
1943 protected:
1944 G1RemSet* _g1rs;
1945 BitMap* _region_bm;
1946 BitMap* _card_bm;
1947 public:
1948 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1949 BitMap* region_bm, BitMap* card_bm) :
1950 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1951 _region_bm(region_bm), _card_bm(card_bm) { }
1953 void work(uint worker_id) {
1954 if (G1CollectedHeap::use_parallel_gc_threads()) {
1955 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1956 HeapRegion::ScrubRemSetClaimValue);
1957 } else {
1958 _g1rs->scrub(_region_bm, _card_bm);
1959 }
1960 }
1962 };
1964 void ConcurrentMark::cleanup() {
1965 // world is stopped at this checkpoint
1966 assert(SafepointSynchronize::is_at_safepoint(),
1967 "world should be stopped");
1968 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1970 // If a full collection has happened, we shouldn't do this.
1971 if (has_aborted()) {
1972 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1973 return;
1974 }
1976 g1h->verify_region_sets_optional();
1978 if (VerifyDuringGC) {
1979 HandleMark hm; // handle scope
1980 Universe::heap()->prepare_for_verify();
1981 Universe::verify(VerifyOption_G1UsePrevMarking,
1982 " VerifyDuringGC:(before)");
1983 }
1985 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1986 g1p->record_concurrent_mark_cleanup_start();
1988 double start = os::elapsedTime();
1990 HeapRegionRemSet::reset_for_cleanup_tasks();
1992 uint n_workers;
1994 // Do counting once more with the world stopped for good measure.
1995 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1997 if (G1CollectedHeap::use_parallel_gc_threads()) {
1998 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1999 "sanity check");
2001 g1h->set_par_threads();
2002 n_workers = g1h->n_par_threads();
2003 assert(g1h->n_par_threads() == n_workers,
2004 "Should not have been reset");
2005 g1h->workers()->run_task(&g1_par_count_task);
2006 // Done with the parallel phase so reset to 0.
2007 g1h->set_par_threads(0);
2009 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2010 "sanity check");
2011 } else {
2012 n_workers = 1;
2013 g1_par_count_task.work(0);
2014 }
2016 if (VerifyDuringGC) {
2017 // Verify that the counting data accumulated during marking matches
2018 // that calculated by walking the marking bitmap.
2020 // Bitmaps to hold expected values
2021 BitMap expected_region_bm(_region_bm.size(), false);
2022 BitMap expected_card_bm(_card_bm.size(), false);
2024 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2025 &_region_bm,
2026 &_card_bm,
2027 &expected_region_bm,
2028 &expected_card_bm);
2030 if (G1CollectedHeap::use_parallel_gc_threads()) {
2031 g1h->set_par_threads((int)n_workers);
2032 g1h->workers()->run_task(&g1_par_verify_task);
2033 // Done with the parallel phase so reset to 0.
2034 g1h->set_par_threads(0);
2036 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2037 "sanity check");
2038 } else {
2039 g1_par_verify_task.work(0);
2040 }
2042 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2043 }
2045 size_t start_used_bytes = g1h->used();
2046 g1h->set_marking_complete();
2048 double count_end = os::elapsedTime();
2049 double this_final_counting_time = (count_end - start);
2050 _total_counting_time += this_final_counting_time;
2052 if (G1PrintRegionLivenessInfo) {
2053 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2054 _g1h->heap_region_iterate(&cl);
2055 }
2057 // Install newly created mark bitMap as "prev".
2058 swapMarkBitMaps();
2060 g1h->reset_gc_time_stamp();
2062 // Note end of marking in all heap regions.
2063 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2064 if (G1CollectedHeap::use_parallel_gc_threads()) {
2065 g1h->set_par_threads((int)n_workers);
2066 g1h->workers()->run_task(&g1_par_note_end_task);
2067 g1h->set_par_threads(0);
2069 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2070 "sanity check");
2071 } else {
2072 g1_par_note_end_task.work(0);
2073 }
2074 g1h->check_gc_time_stamps();
2076 if (!cleanup_list_is_empty()) {
2077 // The cleanup list is not empty, so we'll have to process it
2078 // concurrently. Notify anyone else that might be wanting free
2079 // regions that there will be more free regions coming soon.
2080 g1h->set_free_regions_coming();
2081 }
2083 // call below, since it affects the metric by which we sort the heap
2084 // regions.
2085 if (G1ScrubRemSets) {
2086 double rs_scrub_start = os::elapsedTime();
2087 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2088 if (G1CollectedHeap::use_parallel_gc_threads()) {
2089 g1h->set_par_threads((int)n_workers);
2090 g1h->workers()->run_task(&g1_par_scrub_rs_task);
2091 g1h->set_par_threads(0);
2093 assert(g1h->check_heap_region_claim_values(
2094 HeapRegion::ScrubRemSetClaimValue),
2095 "sanity check");
2096 } else {
2097 g1_par_scrub_rs_task.work(0);
2098 }
2100 double rs_scrub_end = os::elapsedTime();
2101 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2102 _total_rs_scrub_time += this_rs_scrub_time;
2103 }
2105 // this will also free any regions totally full of garbage objects,
2106 // and sort the regions.
2107 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2109 // Statistics.
2110 double end = os::elapsedTime();
2111 _cleanup_times.add((end - start) * 1000.0);
2113 if (G1Log::fine()) {
2114 g1h->print_size_transition(gclog_or_tty,
2115 start_used_bytes,
2116 g1h->used(),
2117 g1h->capacity());
2118 }
2120 // Clean up will have freed any regions completely full of garbage.
2121 // Update the soft reference policy with the new heap occupancy.
2122 Universe::update_heap_info_at_gc();
2124 // We need to make this be a "collection" so any collection pause that
2125 // races with it goes around and waits for completeCleanup to finish.
2126 g1h->increment_total_collections();
2128 // We reclaimed old regions so we should calculate the sizes to make
2129 // sure we update the old gen/space data.
2130 g1h->g1mm()->update_sizes();
2132 if (VerifyDuringGC) {
2133 HandleMark hm; // handle scope
2134 Universe::heap()->prepare_for_verify();
2135 Universe::verify(VerifyOption_G1UsePrevMarking,
2136 " VerifyDuringGC:(after)");
2137 }
2139 g1h->verify_region_sets_optional();
2140 g1h->trace_heap_after_concurrent_cycle();
2141 }
2143 void ConcurrentMark::completeCleanup() {
2144 if (has_aborted()) return;
2146 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2148 _cleanup_list.verify_list();
2149 FreeRegionList tmp_free_list("Tmp Free List");
2151 if (G1ConcRegionFreeingVerbose) {
2152 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2153 "cleanup list has %u entries",
2154 _cleanup_list.length());
2155 }
2157 // Noone else should be accessing the _cleanup_list at this point,
2158 // so it's not necessary to take any locks
2159 while (!_cleanup_list.is_empty()) {
2160 HeapRegion* hr = _cleanup_list.remove_head();
2161 assert(hr != NULL, "the list was not empty");
2162 hr->par_clear();
2163 tmp_free_list.add_as_tail(hr);
2165 // Instead of adding one region at a time to the secondary_free_list,
2166 // we accumulate them in the local list and move them a few at a
2167 // time. This also cuts down on the number of notify_all() calls
2168 // we do during this process. We'll also append the local list when
2169 // _cleanup_list is empty (which means we just removed the last
2170 // region from the _cleanup_list).
2171 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2172 _cleanup_list.is_empty()) {
2173 if (G1ConcRegionFreeingVerbose) {
2174 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2175 "appending %u entries to the secondary_free_list, "
2176 "cleanup list still has %u entries",
2177 tmp_free_list.length(),
2178 _cleanup_list.length());
2179 }
2181 {
2182 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2183 g1h->secondary_free_list_add_as_tail(&tmp_free_list);
2184 SecondaryFreeList_lock->notify_all();
2185 }
2187 if (G1StressConcRegionFreeing) {
2188 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2189 os::sleep(Thread::current(), (jlong) 1, false);
2190 }
2191 }
2192 }
2193 }
2194 assert(tmp_free_list.is_empty(), "post-condition");
2195 }
2197 // Supporting Object and Oop closures for reference discovery
2198 // and processing in during marking
2200 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2201 HeapWord* addr = (HeapWord*)obj;
2202 return addr != NULL &&
2203 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2204 }
2206 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2207 // Uses the CMTask associated with a worker thread (for serial reference
2208 // processing the CMTask for worker 0 is used) to preserve (mark) and
2209 // trace referent objects.
2210 //
2211 // Using the CMTask and embedded local queues avoids having the worker
2212 // threads operating on the global mark stack. This reduces the risk
2213 // of overflowing the stack - which we would rather avoid at this late
2214 // state. Also using the tasks' local queues removes the potential
2215 // of the workers interfering with each other that could occur if
2216 // operating on the global stack.
2218 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2219 ConcurrentMark* _cm;
2220 CMTask* _task;
2221 int _ref_counter_limit;
2222 int _ref_counter;
2223 bool _is_serial;
2224 public:
2225 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2226 _cm(cm), _task(task), _is_serial(is_serial),
2227 _ref_counter_limit(G1RefProcDrainInterval) {
2228 assert(_ref_counter_limit > 0, "sanity");
2229 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2230 _ref_counter = _ref_counter_limit;
2231 }
2233 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2234 virtual void do_oop( oop* p) { do_oop_work(p); }
2236 template <class T> void do_oop_work(T* p) {
2237 if (!_cm->has_overflown()) {
2238 oop obj = oopDesc::load_decode_heap_oop(p);
2239 if (_cm->verbose_high()) {
2240 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2241 "*"PTR_FORMAT" = "PTR_FORMAT,
2242 _task->worker_id(), p, (void*) obj);
2243 }
2245 _task->deal_with_reference(obj);
2246 _ref_counter--;
2248 if (_ref_counter == 0) {
2249 // We have dealt with _ref_counter_limit references, pushing them
2250 // and objects reachable from them on to the local stack (and
2251 // possibly the global stack). Call CMTask::do_marking_step() to
2252 // process these entries.
2253 //
2254 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2255 // there's nothing more to do (i.e. we're done with the entries that
2256 // were pushed as a result of the CMTask::deal_with_reference() calls
2257 // above) or we overflow.
2258 //
2259 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2260 // flag while there may still be some work to do. (See the comment at
2261 // the beginning of CMTask::do_marking_step() for those conditions -
2262 // one of which is reaching the specified time target.) It is only
2263 // when CMTask::do_marking_step() returns without setting the
2264 // has_aborted() flag that the marking step has completed.
2265 do {
2266 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2267 _task->do_marking_step(mark_step_duration_ms,
2268 false /* do_termination */,
2269 _is_serial);
2270 } while (_task->has_aborted() && !_cm->has_overflown());
2271 _ref_counter = _ref_counter_limit;
2272 }
2273 } else {
2274 if (_cm->verbose_high()) {
2275 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2276 }
2277 }
2278 }
2279 };
2281 // 'Drain' oop closure used by both serial and parallel reference processing.
2282 // Uses the CMTask associated with a given worker thread (for serial
2283 // reference processing the CMtask for worker 0 is used). Calls the
2284 // do_marking_step routine, with an unbelievably large timeout value,
2285 // to drain the marking data structures of the remaining entries
2286 // added by the 'keep alive' oop closure above.
2288 class G1CMDrainMarkingStackClosure: public VoidClosure {
2289 ConcurrentMark* _cm;
2290 CMTask* _task;
2291 bool _is_serial;
2292 public:
2293 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2294 _cm(cm), _task(task), _is_serial(is_serial) {
2295 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2296 }
2298 void do_void() {
2299 do {
2300 if (_cm->verbose_high()) {
2301 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2302 _task->worker_id(), BOOL_TO_STR(_is_serial));
2303 }
2305 // We call CMTask::do_marking_step() to completely drain the local
2306 // and global marking stacks of entries pushed by the 'keep alive'
2307 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2308 //
2309 // CMTask::do_marking_step() is called in a loop, which we'll exit
2310 // if there's nothing more to do (i.e. we'completely drained the
2311 // entries that were pushed as a a result of applying the 'keep alive'
2312 // closure to the entries on the discovered ref lists) or we overflow
2313 // the global marking stack.
2314 //
2315 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2316 // flag while there may still be some work to do. (See the comment at
2317 // the beginning of CMTask::do_marking_step() for those conditions -
2318 // one of which is reaching the specified time target.) It is only
2319 // when CMTask::do_marking_step() returns without setting the
2320 // has_aborted() flag that the marking step has completed.
2322 _task->do_marking_step(1000000000.0 /* something very large */,
2323 true /* do_termination */,
2324 _is_serial);
2325 } while (_task->has_aborted() && !_cm->has_overflown());
2326 }
2327 };
2329 // Implementation of AbstractRefProcTaskExecutor for parallel
2330 // reference processing at the end of G1 concurrent marking
2332 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2333 private:
2334 G1CollectedHeap* _g1h;
2335 ConcurrentMark* _cm;
2336 WorkGang* _workers;
2337 int _active_workers;
2339 public:
2340 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2341 ConcurrentMark* cm,
2342 WorkGang* workers,
2343 int n_workers) :
2344 _g1h(g1h), _cm(cm),
2345 _workers(workers), _active_workers(n_workers) { }
2347 // Executes the given task using concurrent marking worker threads.
2348 virtual void execute(ProcessTask& task);
2349 virtual void execute(EnqueueTask& task);
2350 };
2352 class G1CMRefProcTaskProxy: public AbstractGangTask {
2353 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2354 ProcessTask& _proc_task;
2355 G1CollectedHeap* _g1h;
2356 ConcurrentMark* _cm;
2358 public:
2359 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2360 G1CollectedHeap* g1h,
2361 ConcurrentMark* cm) :
2362 AbstractGangTask("Process reference objects in parallel"),
2363 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2364 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2365 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2366 }
2368 virtual void work(uint worker_id) {
2369 CMTask* task = _cm->task(worker_id);
2370 G1CMIsAliveClosure g1_is_alive(_g1h);
2371 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2372 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2374 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2375 }
2376 };
2378 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2379 assert(_workers != NULL, "Need parallel worker threads.");
2380 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2382 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2384 // We need to reset the concurrency level before each
2385 // proxy task execution, so that the termination protocol
2386 // and overflow handling in CMTask::do_marking_step() knows
2387 // how many workers to wait for.
2388 _cm->set_concurrency(_active_workers);
2389 _g1h->set_par_threads(_active_workers);
2390 _workers->run_task(&proc_task_proxy);
2391 _g1h->set_par_threads(0);
2392 }
2394 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2395 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2396 EnqueueTask& _enq_task;
2398 public:
2399 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2400 AbstractGangTask("Enqueue reference objects in parallel"),
2401 _enq_task(enq_task) { }
2403 virtual void work(uint worker_id) {
2404 _enq_task.work(worker_id);
2405 }
2406 };
2408 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2409 assert(_workers != NULL, "Need parallel worker threads.");
2410 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2412 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2414 // Not strictly necessary but...
2415 //
2416 // We need to reset the concurrency level before each
2417 // proxy task execution, so that the termination protocol
2418 // and overflow handling in CMTask::do_marking_step() knows
2419 // how many workers to wait for.
2420 _cm->set_concurrency(_active_workers);
2421 _g1h->set_par_threads(_active_workers);
2422 _workers->run_task(&enq_task_proxy);
2423 _g1h->set_par_threads(0);
2424 }
2426 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2427 if (has_overflown()) {
2428 // Skip processing the discovered references if we have
2429 // overflown the global marking stack. Reference objects
2430 // only get discovered once so it is OK to not
2431 // de-populate the discovered reference lists. We could have,
2432 // but the only benefit would be that, when marking restarts,
2433 // less reference objects are discovered.
2434 return;
2435 }
2437 ResourceMark rm;
2438 HandleMark hm;
2440 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2442 // Is alive closure.
2443 G1CMIsAliveClosure g1_is_alive(g1h);
2445 // Inner scope to exclude the cleaning of the string and symbol
2446 // tables from the displayed time.
2447 {
2448 if (G1Log::finer()) {
2449 gclog_or_tty->put(' ');
2450 }
2451 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
2453 ReferenceProcessor* rp = g1h->ref_processor_cm();
2455 // See the comment in G1CollectedHeap::ref_processing_init()
2456 // about how reference processing currently works in G1.
2458 // Set the soft reference policy
2459 rp->setup_policy(clear_all_soft_refs);
2460 assert(_markStack.isEmpty(), "mark stack should be empty");
2462 // Instances of the 'Keep Alive' and 'Complete GC' closures used
2463 // in serial reference processing. Note these closures are also
2464 // used for serially processing (by the the current thread) the
2465 // JNI references during parallel reference processing.
2466 //
2467 // These closures do not need to synchronize with the worker
2468 // threads involved in parallel reference processing as these
2469 // instances are executed serially by the current thread (e.g.
2470 // reference processing is not multi-threaded and is thus
2471 // performed by the current thread instead of a gang worker).
2472 //
2473 // The gang tasks involved in parallel reference procssing create
2474 // their own instances of these closures, which do their own
2475 // synchronization among themselves.
2476 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2477 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2479 // We need at least one active thread. If reference processing
2480 // is not multi-threaded we use the current (VMThread) thread,
2481 // otherwise we use the work gang from the G1CollectedHeap and
2482 // we utilize all the worker threads we can.
2483 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2484 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2485 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2487 // Parallel processing task executor.
2488 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2489 g1h->workers(), active_workers);
2490 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2492 // Set the concurrency level. The phase was already set prior to
2493 // executing the remark task.
2494 set_concurrency(active_workers);
2496 // Set the degree of MT processing here. If the discovery was done MT,
2497 // the number of threads involved during discovery could differ from
2498 // the number of active workers. This is OK as long as the discovered
2499 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2500 rp->set_active_mt_degree(active_workers);
2502 // Process the weak references.
2503 const ReferenceProcessorStats& stats =
2504 rp->process_discovered_references(&g1_is_alive,
2505 &g1_keep_alive,
2506 &g1_drain_mark_stack,
2507 executor,
2508 g1h->gc_timer_cm());
2509 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2511 // The do_oop work routines of the keep_alive and drain_marking_stack
2512 // oop closures will set the has_overflown flag if we overflow the
2513 // global marking stack.
2515 assert(_markStack.overflow() || _markStack.isEmpty(),
2516 "mark stack should be empty (unless it overflowed)");
2518 if (_markStack.overflow()) {
2519 // This should have been done already when we tried to push an
2520 // entry on to the global mark stack. But let's do it again.
2521 set_has_overflown();
2522 }
2524 assert(rp->num_q() == active_workers, "why not");
2526 rp->enqueue_discovered_references(executor);
2528 rp->verify_no_references_recorded();
2529 assert(!rp->discovery_enabled(), "Post condition");
2530 }
2532 if (has_overflown()) {
2533 // We can not trust g1_is_alive if the marking stack overflowed
2534 return;
2535 }
2537 g1h->unlink_string_and_symbol_table(&g1_is_alive,
2538 /* process_strings */ false, // currently strings are always roots
2539 /* process_symbols */ true);
2540 }
2542 void ConcurrentMark::swapMarkBitMaps() {
2543 CMBitMapRO* temp = _prevMarkBitMap;
2544 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2545 _nextMarkBitMap = (CMBitMap*) temp;
2546 }
2548 class CMRemarkTask: public AbstractGangTask {
2549 private:
2550 ConcurrentMark* _cm;
2551 bool _is_serial;
2552 public:
2553 void work(uint worker_id) {
2554 // Since all available tasks are actually started, we should
2555 // only proceed if we're supposed to be actived.
2556 if (worker_id < _cm->active_tasks()) {
2557 CMTask* task = _cm->task(worker_id);
2558 task->record_start_time();
2559 do {
2560 task->do_marking_step(1000000000.0 /* something very large */,
2561 true /* do_termination */,
2562 _is_serial);
2563 } while (task->has_aborted() && !_cm->has_overflown());
2564 // If we overflow, then we do not want to restart. We instead
2565 // want to abort remark and do concurrent marking again.
2566 task->record_end_time();
2567 }
2568 }
2570 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2571 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2572 _cm->terminator()->reset_for_reuse(active_workers);
2573 }
2574 };
2576 void ConcurrentMark::checkpointRootsFinalWork() {
2577 ResourceMark rm;
2578 HandleMark hm;
2579 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2581 g1h->ensure_parsability(false);
2583 if (G1CollectedHeap::use_parallel_gc_threads()) {
2584 G1CollectedHeap::StrongRootsScope srs(g1h);
2585 // this is remark, so we'll use up all active threads
2586 uint active_workers = g1h->workers()->active_workers();
2587 if (active_workers == 0) {
2588 assert(active_workers > 0, "Should have been set earlier");
2589 active_workers = (uint) ParallelGCThreads;
2590 g1h->workers()->set_active_workers(active_workers);
2591 }
2592 set_concurrency_and_phase(active_workers, false /* concurrent */);
2593 // Leave _parallel_marking_threads at it's
2594 // value originally calculated in the ConcurrentMark
2595 // constructor and pass values of the active workers
2596 // through the gang in the task.
2598 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2599 // We will start all available threads, even if we decide that the
2600 // active_workers will be fewer. The extra ones will just bail out
2601 // immediately.
2602 g1h->set_par_threads(active_workers);
2603 g1h->workers()->run_task(&remarkTask);
2604 g1h->set_par_threads(0);
2605 } else {
2606 G1CollectedHeap::StrongRootsScope srs(g1h);
2607 uint active_workers = 1;
2608 set_concurrency_and_phase(active_workers, false /* concurrent */);
2610 // Note - if there's no work gang then the VMThread will be
2611 // the thread to execute the remark - serially. We have
2612 // to pass true for the is_serial parameter so that
2613 // CMTask::do_marking_step() doesn't enter the sync
2614 // barriers in the event of an overflow. Doing so will
2615 // cause an assert that the current thread is not a
2616 // concurrent GC thread.
2617 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2618 remarkTask.work(0);
2619 }
2620 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2621 guarantee(has_overflown() ||
2622 satb_mq_set.completed_buffers_num() == 0,
2623 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2624 BOOL_TO_STR(has_overflown()),
2625 satb_mq_set.completed_buffers_num()));
2627 print_stats();
2628 }
2630 #ifndef PRODUCT
2632 class PrintReachableOopClosure: public OopClosure {
2633 private:
2634 G1CollectedHeap* _g1h;
2635 outputStream* _out;
2636 VerifyOption _vo;
2637 bool _all;
2639 public:
2640 PrintReachableOopClosure(outputStream* out,
2641 VerifyOption vo,
2642 bool all) :
2643 _g1h(G1CollectedHeap::heap()),
2644 _out(out), _vo(vo), _all(all) { }
2646 void do_oop(narrowOop* p) { do_oop_work(p); }
2647 void do_oop( oop* p) { do_oop_work(p); }
2649 template <class T> void do_oop_work(T* p) {
2650 oop obj = oopDesc::load_decode_heap_oop(p);
2651 const char* str = NULL;
2652 const char* str2 = "";
2654 if (obj == NULL) {
2655 str = "";
2656 } else if (!_g1h->is_in_g1_reserved(obj)) {
2657 str = " O";
2658 } else {
2659 HeapRegion* hr = _g1h->heap_region_containing(obj);
2660 guarantee(hr != NULL, "invariant");
2661 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2662 bool marked = _g1h->is_marked(obj, _vo);
2664 if (over_tams) {
2665 str = " >";
2666 if (marked) {
2667 str2 = " AND MARKED";
2668 }
2669 } else if (marked) {
2670 str = " M";
2671 } else {
2672 str = " NOT";
2673 }
2674 }
2676 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
2677 p, (void*) obj, str, str2);
2678 }
2679 };
2681 class PrintReachableObjectClosure : public ObjectClosure {
2682 private:
2683 G1CollectedHeap* _g1h;
2684 outputStream* _out;
2685 VerifyOption _vo;
2686 bool _all;
2687 HeapRegion* _hr;
2689 public:
2690 PrintReachableObjectClosure(outputStream* out,
2691 VerifyOption vo,
2692 bool all,
2693 HeapRegion* hr) :
2694 _g1h(G1CollectedHeap::heap()),
2695 _out(out), _vo(vo), _all(all), _hr(hr) { }
2697 void do_object(oop o) {
2698 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2699 bool marked = _g1h->is_marked(o, _vo);
2700 bool print_it = _all || over_tams || marked;
2702 if (print_it) {
2703 _out->print_cr(" "PTR_FORMAT"%s",
2704 (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
2705 PrintReachableOopClosure oopCl(_out, _vo, _all);
2706 o->oop_iterate_no_header(&oopCl);
2707 }
2708 }
2709 };
2711 class PrintReachableRegionClosure : public HeapRegionClosure {
2712 private:
2713 G1CollectedHeap* _g1h;
2714 outputStream* _out;
2715 VerifyOption _vo;
2716 bool _all;
2718 public:
2719 bool doHeapRegion(HeapRegion* hr) {
2720 HeapWord* b = hr->bottom();
2721 HeapWord* e = hr->end();
2722 HeapWord* t = hr->top();
2723 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2724 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2725 "TAMS: "PTR_FORMAT, b, e, t, p);
2726 _out->cr();
2728 HeapWord* from = b;
2729 HeapWord* to = t;
2731 if (to > from) {
2732 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
2733 _out->cr();
2734 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2735 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2736 _out->cr();
2737 }
2739 return false;
2740 }
2742 PrintReachableRegionClosure(outputStream* out,
2743 VerifyOption vo,
2744 bool all) :
2745 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2746 };
2748 void ConcurrentMark::print_reachable(const char* str,
2749 VerifyOption vo,
2750 bool all) {
2751 gclog_or_tty->cr();
2752 gclog_or_tty->print_cr("== Doing heap dump... ");
2754 if (G1PrintReachableBaseFile == NULL) {
2755 gclog_or_tty->print_cr(" #### error: no base file defined");
2756 return;
2757 }
2759 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2760 (JVM_MAXPATHLEN - 1)) {
2761 gclog_or_tty->print_cr(" #### error: file name too long");
2762 return;
2763 }
2765 char file_name[JVM_MAXPATHLEN];
2766 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2767 gclog_or_tty->print_cr(" dumping to file %s", file_name);
2769 fileStream fout(file_name);
2770 if (!fout.is_open()) {
2771 gclog_or_tty->print_cr(" #### error: could not open file");
2772 return;
2773 }
2775 outputStream* out = &fout;
2776 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2777 out->cr();
2779 out->print_cr("--- ITERATING OVER REGIONS");
2780 out->cr();
2781 PrintReachableRegionClosure rcl(out, vo, all);
2782 _g1h->heap_region_iterate(&rcl);
2783 out->cr();
2785 gclog_or_tty->print_cr(" done");
2786 gclog_or_tty->flush();
2787 }
2789 #endif // PRODUCT
2791 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2792 // Note we are overriding the read-only view of the prev map here, via
2793 // the cast.
2794 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2795 }
2797 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2798 _nextMarkBitMap->clearRange(mr);
2799 }
2801 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2802 clearRangePrevBitmap(mr);
2803 clearRangeNextBitmap(mr);
2804 }
2806 HeapRegion*
2807 ConcurrentMark::claim_region(uint worker_id) {
2808 // "checkpoint" the finger
2809 HeapWord* finger = _finger;
2811 // _heap_end will not change underneath our feet; it only changes at
2812 // yield points.
2813 while (finger < _heap_end) {
2814 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2816 // Note on how this code handles humongous regions. In the
2817 // normal case the finger will reach the start of a "starts
2818 // humongous" (SH) region. Its end will either be the end of the
2819 // last "continues humongous" (CH) region in the sequence, or the
2820 // standard end of the SH region (if the SH is the only region in
2821 // the sequence). That way claim_region() will skip over the CH
2822 // regions. However, there is a subtle race between a CM thread
2823 // executing this method and a mutator thread doing a humongous
2824 // object allocation. The two are not mutually exclusive as the CM
2825 // thread does not need to hold the Heap_lock when it gets
2826 // here. So there is a chance that claim_region() will come across
2827 // a free region that's in the progress of becoming a SH or a CH
2828 // region. In the former case, it will either
2829 // a) Miss the update to the region's end, in which case it will
2830 // visit every subsequent CH region, will find their bitmaps
2831 // empty, and do nothing, or
2832 // b) Will observe the update of the region's end (in which case
2833 // it will skip the subsequent CH regions).
2834 // If it comes across a region that suddenly becomes CH, the
2835 // scenario will be similar to b). So, the race between
2836 // claim_region() and a humongous object allocation might force us
2837 // to do a bit of unnecessary work (due to some unnecessary bitmap
2838 // iterations) but it should not introduce and correctness issues.
2839 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2840 HeapWord* bottom = curr_region->bottom();
2841 HeapWord* end = curr_region->end();
2842 HeapWord* limit = curr_region->next_top_at_mark_start();
2844 if (verbose_low()) {
2845 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2846 "["PTR_FORMAT", "PTR_FORMAT"), "
2847 "limit = "PTR_FORMAT,
2848 worker_id, curr_region, bottom, end, limit);
2849 }
2851 // Is the gap between reading the finger and doing the CAS too long?
2852 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2853 if (res == finger) {
2854 // we succeeded
2856 // notice that _finger == end cannot be guaranteed here since,
2857 // someone else might have moved the finger even further
2858 assert(_finger >= end, "the finger should have moved forward");
2860 if (verbose_low()) {
2861 gclog_or_tty->print_cr("[%u] we were successful with region = "
2862 PTR_FORMAT, worker_id, curr_region);
2863 }
2865 if (limit > bottom) {
2866 if (verbose_low()) {
2867 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
2868 "returning it ", worker_id, curr_region);
2869 }
2870 return curr_region;
2871 } else {
2872 assert(limit == bottom,
2873 "the region limit should be at bottom");
2874 if (verbose_low()) {
2875 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
2876 "returning NULL", worker_id, curr_region);
2877 }
2878 // we return NULL and the caller should try calling
2879 // claim_region() again.
2880 return NULL;
2881 }
2882 } else {
2883 assert(_finger > finger, "the finger should have moved forward");
2884 if (verbose_low()) {
2885 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
2886 "global finger = "PTR_FORMAT", "
2887 "our finger = "PTR_FORMAT,
2888 worker_id, _finger, finger);
2889 }
2891 // read it again
2892 finger = _finger;
2893 }
2894 }
2896 return NULL;
2897 }
2899 #ifndef PRODUCT
2900 enum VerifyNoCSetOopsPhase {
2901 VerifyNoCSetOopsStack,
2902 VerifyNoCSetOopsQueues,
2903 VerifyNoCSetOopsSATBCompleted,
2904 VerifyNoCSetOopsSATBThread
2905 };
2907 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
2908 private:
2909 G1CollectedHeap* _g1h;
2910 VerifyNoCSetOopsPhase _phase;
2911 int _info;
2913 const char* phase_str() {
2914 switch (_phase) {
2915 case VerifyNoCSetOopsStack: return "Stack";
2916 case VerifyNoCSetOopsQueues: return "Queue";
2917 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
2918 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
2919 default: ShouldNotReachHere();
2920 }
2921 return NULL;
2922 }
2924 void do_object_work(oop obj) {
2925 guarantee(!_g1h->obj_in_cs(obj),
2926 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
2927 (void*) obj, phase_str(), _info));
2928 }
2930 public:
2931 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
2933 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
2934 _phase = phase;
2935 _info = info;
2936 }
2938 virtual void do_oop(oop* p) {
2939 oop obj = oopDesc::load_decode_heap_oop(p);
2940 do_object_work(obj);
2941 }
2943 virtual void do_oop(narrowOop* p) {
2944 // We should not come across narrow oops while scanning marking
2945 // stacks and SATB buffers.
2946 ShouldNotReachHere();
2947 }
2949 virtual void do_object(oop obj) {
2950 do_object_work(obj);
2951 }
2952 };
2954 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
2955 bool verify_enqueued_buffers,
2956 bool verify_thread_buffers,
2957 bool verify_fingers) {
2958 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2959 if (!G1CollectedHeap::heap()->mark_in_progress()) {
2960 return;
2961 }
2963 VerifyNoCSetOopsClosure cl;
2965 if (verify_stacks) {
2966 // Verify entries on the global mark stack
2967 cl.set_phase(VerifyNoCSetOopsStack);
2968 _markStack.oops_do(&cl);
2970 // Verify entries on the task queues
2971 for (uint i = 0; i < _max_worker_id; i += 1) {
2972 cl.set_phase(VerifyNoCSetOopsQueues, i);
2973 CMTaskQueue* queue = _task_queues->queue(i);
2974 queue->oops_do(&cl);
2975 }
2976 }
2978 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
2980 // Verify entries on the enqueued SATB buffers
2981 if (verify_enqueued_buffers) {
2982 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
2983 satb_qs.iterate_completed_buffers_read_only(&cl);
2984 }
2986 // Verify entries on the per-thread SATB buffers
2987 if (verify_thread_buffers) {
2988 cl.set_phase(VerifyNoCSetOopsSATBThread);
2989 satb_qs.iterate_thread_buffers_read_only(&cl);
2990 }
2992 if (verify_fingers) {
2993 // Verify the global finger
2994 HeapWord* global_finger = finger();
2995 if (global_finger != NULL && global_finger < _heap_end) {
2996 // The global finger always points to a heap region boundary. We
2997 // use heap_region_containing_raw() to get the containing region
2998 // given that the global finger could be pointing to a free region
2999 // which subsequently becomes continues humongous. If that
3000 // happens, heap_region_containing() will return the bottom of the
3001 // corresponding starts humongous region and the check below will
3002 // not hold any more.
3003 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3004 guarantee(global_finger == global_hr->bottom(),
3005 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3006 global_finger, HR_FORMAT_PARAMS(global_hr)));
3007 }
3009 // Verify the task fingers
3010 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3011 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3012 CMTask* task = _tasks[i];
3013 HeapWord* task_finger = task->finger();
3014 if (task_finger != NULL && task_finger < _heap_end) {
3015 // See above note on the global finger verification.
3016 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3017 guarantee(task_finger == task_hr->bottom() ||
3018 !task_hr->in_collection_set(),
3019 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3020 task_finger, HR_FORMAT_PARAMS(task_hr)));
3021 }
3022 }
3023 }
3024 }
3025 #endif // PRODUCT
3027 // Aggregate the counting data that was constructed concurrently
3028 // with marking.
3029 class AggregateCountDataHRClosure: public HeapRegionClosure {
3030 G1CollectedHeap* _g1h;
3031 ConcurrentMark* _cm;
3032 CardTableModRefBS* _ct_bs;
3033 BitMap* _cm_card_bm;
3034 uint _max_worker_id;
3036 public:
3037 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3038 BitMap* cm_card_bm,
3039 uint max_worker_id) :
3040 _g1h(g1h), _cm(g1h->concurrent_mark()),
3041 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3042 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3044 bool doHeapRegion(HeapRegion* hr) {
3045 if (hr->continuesHumongous()) {
3046 // We will ignore these here and process them when their
3047 // associated "starts humongous" region is processed.
3048 // Note that we cannot rely on their associated
3049 // "starts humongous" region to have their bit set to 1
3050 // since, due to the region chunking in the parallel region
3051 // iteration, a "continues humongous" region might be visited
3052 // before its associated "starts humongous".
3053 return false;
3054 }
3056 HeapWord* start = hr->bottom();
3057 HeapWord* limit = hr->next_top_at_mark_start();
3058 HeapWord* end = hr->end();
3060 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3061 err_msg("Preconditions not met - "
3062 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3063 "top: "PTR_FORMAT", end: "PTR_FORMAT,
3064 start, limit, hr->top(), hr->end()));
3066 assert(hr->next_marked_bytes() == 0, "Precondition");
3068 if (start == limit) {
3069 // NTAMS of this region has not been set so nothing to do.
3070 return false;
3071 }
3073 // 'start' should be in the heap.
3074 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3075 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3076 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3078 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3079 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3080 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3082 // If ntams is not card aligned then we bump card bitmap index
3083 // for limit so that we get the all the cards spanned by
3084 // the object ending at ntams.
3085 // Note: if this is the last region in the heap then ntams
3086 // could be actually just beyond the end of the the heap;
3087 // limit_idx will then correspond to a (non-existent) card
3088 // that is also outside the heap.
3089 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3090 limit_idx += 1;
3091 }
3093 assert(limit_idx <= end_idx, "or else use atomics");
3095 // Aggregate the "stripe" in the count data associated with hr.
3096 uint hrs_index = hr->hrs_index();
3097 size_t marked_bytes = 0;
3099 for (uint i = 0; i < _max_worker_id; i += 1) {
3100 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3101 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3103 // Fetch the marked_bytes in this region for task i and
3104 // add it to the running total for this region.
3105 marked_bytes += marked_bytes_array[hrs_index];
3107 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3108 // into the global card bitmap.
3109 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3111 while (scan_idx < limit_idx) {
3112 assert(task_card_bm->at(scan_idx) == true, "should be");
3113 _cm_card_bm->set_bit(scan_idx);
3114 assert(_cm_card_bm->at(scan_idx) == true, "should be");
3116 // BitMap::get_next_one_offset() can handle the case when
3117 // its left_offset parameter is greater than its right_offset
3118 // parameter. It does, however, have an early exit if
3119 // left_offset == right_offset. So let's limit the value
3120 // passed in for left offset here.
3121 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3122 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3123 }
3124 }
3126 // Update the marked bytes for this region.
3127 hr->add_to_marked_bytes(marked_bytes);
3129 // Next heap region
3130 return false;
3131 }
3132 };
3134 class G1AggregateCountDataTask: public AbstractGangTask {
3135 protected:
3136 G1CollectedHeap* _g1h;
3137 ConcurrentMark* _cm;
3138 BitMap* _cm_card_bm;
3139 uint _max_worker_id;
3140 int _active_workers;
3142 public:
3143 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3144 ConcurrentMark* cm,
3145 BitMap* cm_card_bm,
3146 uint max_worker_id,
3147 int n_workers) :
3148 AbstractGangTask("Count Aggregation"),
3149 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3150 _max_worker_id(max_worker_id),
3151 _active_workers(n_workers) { }
3153 void work(uint worker_id) {
3154 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3156 if (G1CollectedHeap::use_parallel_gc_threads()) {
3157 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3158 _active_workers,
3159 HeapRegion::AggregateCountClaimValue);
3160 } else {
3161 _g1h->heap_region_iterate(&cl);
3162 }
3163 }
3164 };
3167 void ConcurrentMark::aggregate_count_data() {
3168 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3169 _g1h->workers()->active_workers() :
3170 1);
3172 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3173 _max_worker_id, n_workers);
3175 if (G1CollectedHeap::use_parallel_gc_threads()) {
3176 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3177 "sanity check");
3178 _g1h->set_par_threads(n_workers);
3179 _g1h->workers()->run_task(&g1_par_agg_task);
3180 _g1h->set_par_threads(0);
3182 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3183 "sanity check");
3184 _g1h->reset_heap_region_claim_values();
3185 } else {
3186 g1_par_agg_task.work(0);
3187 }
3188 }
3190 // Clear the per-worker arrays used to store the per-region counting data
3191 void ConcurrentMark::clear_all_count_data() {
3192 // Clear the global card bitmap - it will be filled during
3193 // liveness count aggregation (during remark) and the
3194 // final counting task.
3195 _card_bm.clear();
3197 // Clear the global region bitmap - it will be filled as part
3198 // of the final counting task.
3199 _region_bm.clear();
3201 uint max_regions = _g1h->max_regions();
3202 assert(_max_worker_id > 0, "uninitialized");
3204 for (uint i = 0; i < _max_worker_id; i += 1) {
3205 BitMap* task_card_bm = count_card_bitmap_for(i);
3206 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3208 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3209 assert(marked_bytes_array != NULL, "uninitialized");
3211 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3212 task_card_bm->clear();
3213 }
3214 }
3216 void ConcurrentMark::print_stats() {
3217 if (verbose_stats()) {
3218 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3219 for (size_t i = 0; i < _active_tasks; ++i) {
3220 _tasks[i]->print_stats();
3221 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3222 }
3223 }
3224 }
3226 // abandon current marking iteration due to a Full GC
3227 void ConcurrentMark::abort() {
3228 // Clear all marks to force marking thread to do nothing
3229 _nextMarkBitMap->clearAll();
3230 // Clear the liveness counting data
3231 clear_all_count_data();
3232 // Empty mark stack
3233 reset_marking_state();
3234 for (uint i = 0; i < _max_worker_id; ++i) {
3235 _tasks[i]->clear_region_fields();
3236 }
3237 _has_aborted = true;
3239 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3240 satb_mq_set.abandon_partial_marking();
3241 // This can be called either during or outside marking, we'll read
3242 // the expected_active value from the SATB queue set.
3243 satb_mq_set.set_active_all_threads(
3244 false, /* new active value */
3245 satb_mq_set.is_active() /* expected_active */);
3247 _g1h->trace_heap_after_concurrent_cycle();
3248 _g1h->register_concurrent_cycle_end();
3249 }
3251 static void print_ms_time_info(const char* prefix, const char* name,
3252 NumberSeq& ns) {
3253 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3254 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3255 if (ns.num() > 0) {
3256 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
3257 prefix, ns.sd(), ns.maximum());
3258 }
3259 }
3261 void ConcurrentMark::print_summary_info() {
3262 gclog_or_tty->print_cr(" Concurrent marking:");
3263 print_ms_time_info(" ", "init marks", _init_times);
3264 print_ms_time_info(" ", "remarks", _remark_times);
3265 {
3266 print_ms_time_info(" ", "final marks", _remark_mark_times);
3267 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
3269 }
3270 print_ms_time_info(" ", "cleanups", _cleanup_times);
3271 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
3272 _total_counting_time,
3273 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3274 (double)_cleanup_times.num()
3275 : 0.0));
3276 if (G1ScrubRemSets) {
3277 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
3278 _total_rs_scrub_time,
3279 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3280 (double)_cleanup_times.num()
3281 : 0.0));
3282 }
3283 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
3284 (_init_times.sum() + _remark_times.sum() +
3285 _cleanup_times.sum())/1000.0);
3286 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
3287 "(%8.2f s marking).",
3288 cmThread()->vtime_accum(),
3289 cmThread()->vtime_mark_accum());
3290 }
3292 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3293 if (use_parallel_marking_threads()) {
3294 _parallel_workers->print_worker_threads_on(st);
3295 }
3296 }
3298 void ConcurrentMark::print_on_error(outputStream* st) const {
3299 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3300 _prevMarkBitMap, _nextMarkBitMap);
3301 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3302 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3303 }
3305 // We take a break if someone is trying to stop the world.
3306 bool ConcurrentMark::do_yield_check(uint worker_id) {
3307 if (should_yield()) {
3308 if (worker_id == 0) {
3309 _g1h->g1_policy()->record_concurrent_pause();
3310 }
3311 cmThread()->yield();
3312 return true;
3313 } else {
3314 return false;
3315 }
3316 }
3318 bool ConcurrentMark::should_yield() {
3319 return cmThread()->should_yield();
3320 }
3322 bool ConcurrentMark::containing_card_is_marked(void* p) {
3323 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3324 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3325 }
3327 bool ConcurrentMark::containing_cards_are_marked(void* start,
3328 void* last) {
3329 return containing_card_is_marked(start) &&
3330 containing_card_is_marked(last);
3331 }
3333 #ifndef PRODUCT
3334 // for debugging purposes
3335 void ConcurrentMark::print_finger() {
3336 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3337 _heap_start, _heap_end, _finger);
3338 for (uint i = 0; i < _max_worker_id; ++i) {
3339 gclog_or_tty->print(" %u: "PTR_FORMAT, i, _tasks[i]->finger());
3340 }
3341 gclog_or_tty->print_cr("");
3342 }
3343 #endif
3345 void CMTask::scan_object(oop obj) {
3346 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3348 if (_cm->verbose_high()) {
3349 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3350 _worker_id, (void*) obj);
3351 }
3353 size_t obj_size = obj->size();
3354 _words_scanned += obj_size;
3356 obj->oop_iterate(_cm_oop_closure);
3357 statsOnly( ++_objs_scanned );
3358 check_limits();
3359 }
3361 // Closure for iteration over bitmaps
3362 class CMBitMapClosure : public BitMapClosure {
3363 private:
3364 // the bitmap that is being iterated over
3365 CMBitMap* _nextMarkBitMap;
3366 ConcurrentMark* _cm;
3367 CMTask* _task;
3369 public:
3370 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3371 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3373 bool do_bit(size_t offset) {
3374 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3375 assert(_nextMarkBitMap->isMarked(addr), "invariant");
3376 assert( addr < _cm->finger(), "invariant");
3378 statsOnly( _task->increase_objs_found_on_bitmap() );
3379 assert(addr >= _task->finger(), "invariant");
3381 // We move that task's local finger along.
3382 _task->move_finger_to(addr);
3384 _task->scan_object(oop(addr));
3385 // we only partially drain the local queue and global stack
3386 _task->drain_local_queue(true);
3387 _task->drain_global_stack(true);
3389 // if the has_aborted flag has been raised, we need to bail out of
3390 // the iteration
3391 return !_task->has_aborted();
3392 }
3393 };
3395 // Closure for iterating over objects, currently only used for
3396 // processing SATB buffers.
3397 class CMObjectClosure : public ObjectClosure {
3398 private:
3399 CMTask* _task;
3401 public:
3402 void do_object(oop obj) {
3403 _task->deal_with_reference(obj);
3404 }
3406 CMObjectClosure(CMTask* task) : _task(task) { }
3407 };
3409 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3410 ConcurrentMark* cm,
3411 CMTask* task)
3412 : _g1h(g1h), _cm(cm), _task(task) {
3413 assert(_ref_processor == NULL, "should be initialized to NULL");
3415 if (G1UseConcMarkReferenceProcessing) {
3416 _ref_processor = g1h->ref_processor_cm();
3417 assert(_ref_processor != NULL, "should not be NULL");
3418 }
3419 }
3421 void CMTask::setup_for_region(HeapRegion* hr) {
3422 // Separated the asserts so that we know which one fires.
3423 assert(hr != NULL,
3424 "claim_region() should have filtered out continues humongous regions");
3425 assert(!hr->continuesHumongous(),
3426 "claim_region() should have filtered out continues humongous regions");
3428 if (_cm->verbose_low()) {
3429 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3430 _worker_id, hr);
3431 }
3433 _curr_region = hr;
3434 _finger = hr->bottom();
3435 update_region_limit();
3436 }
3438 void CMTask::update_region_limit() {
3439 HeapRegion* hr = _curr_region;
3440 HeapWord* bottom = hr->bottom();
3441 HeapWord* limit = hr->next_top_at_mark_start();
3443 if (limit == bottom) {
3444 if (_cm->verbose_low()) {
3445 gclog_or_tty->print_cr("[%u] found an empty region "
3446 "["PTR_FORMAT", "PTR_FORMAT")",
3447 _worker_id, bottom, limit);
3448 }
3449 // The region was collected underneath our feet.
3450 // We set the finger to bottom to ensure that the bitmap
3451 // iteration that will follow this will not do anything.
3452 // (this is not a condition that holds when we set the region up,
3453 // as the region is not supposed to be empty in the first place)
3454 _finger = bottom;
3455 } else if (limit >= _region_limit) {
3456 assert(limit >= _finger, "peace of mind");
3457 } else {
3458 assert(limit < _region_limit, "only way to get here");
3459 // This can happen under some pretty unusual circumstances. An
3460 // evacuation pause empties the region underneath our feet (NTAMS
3461 // at bottom). We then do some allocation in the region (NTAMS
3462 // stays at bottom), followed by the region being used as a GC
3463 // alloc region (NTAMS will move to top() and the objects
3464 // originally below it will be grayed). All objects now marked in
3465 // the region are explicitly grayed, if below the global finger,
3466 // and we do not need in fact to scan anything else. So, we simply
3467 // set _finger to be limit to ensure that the bitmap iteration
3468 // doesn't do anything.
3469 _finger = limit;
3470 }
3472 _region_limit = limit;
3473 }
3475 void CMTask::giveup_current_region() {
3476 assert(_curr_region != NULL, "invariant");
3477 if (_cm->verbose_low()) {
3478 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3479 _worker_id, _curr_region);
3480 }
3481 clear_region_fields();
3482 }
3484 void CMTask::clear_region_fields() {
3485 // Values for these three fields that indicate that we're not
3486 // holding on to a region.
3487 _curr_region = NULL;
3488 _finger = NULL;
3489 _region_limit = NULL;
3490 }
3492 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3493 if (cm_oop_closure == NULL) {
3494 assert(_cm_oop_closure != NULL, "invariant");
3495 } else {
3496 assert(_cm_oop_closure == NULL, "invariant");
3497 }
3498 _cm_oop_closure = cm_oop_closure;
3499 }
3501 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3502 guarantee(nextMarkBitMap != NULL, "invariant");
3504 if (_cm->verbose_low()) {
3505 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3506 }
3508 _nextMarkBitMap = nextMarkBitMap;
3509 clear_region_fields();
3511 _calls = 0;
3512 _elapsed_time_ms = 0.0;
3513 _termination_time_ms = 0.0;
3514 _termination_start_time_ms = 0.0;
3516 #if _MARKING_STATS_
3517 _local_pushes = 0;
3518 _local_pops = 0;
3519 _local_max_size = 0;
3520 _objs_scanned = 0;
3521 _global_pushes = 0;
3522 _global_pops = 0;
3523 _global_max_size = 0;
3524 _global_transfers_to = 0;
3525 _global_transfers_from = 0;
3526 _regions_claimed = 0;
3527 _objs_found_on_bitmap = 0;
3528 _satb_buffers_processed = 0;
3529 _steal_attempts = 0;
3530 _steals = 0;
3531 _aborted = 0;
3532 _aborted_overflow = 0;
3533 _aborted_cm_aborted = 0;
3534 _aborted_yield = 0;
3535 _aborted_timed_out = 0;
3536 _aborted_satb = 0;
3537 _aborted_termination = 0;
3538 #endif // _MARKING_STATS_
3539 }
3541 bool CMTask::should_exit_termination() {
3542 regular_clock_call();
3543 // This is called when we are in the termination protocol. We should
3544 // quit if, for some reason, this task wants to abort or the global
3545 // stack is not empty (this means that we can get work from it).
3546 return !_cm->mark_stack_empty() || has_aborted();
3547 }
3549 void CMTask::reached_limit() {
3550 assert(_words_scanned >= _words_scanned_limit ||
3551 _refs_reached >= _refs_reached_limit ,
3552 "shouldn't have been called otherwise");
3553 regular_clock_call();
3554 }
3556 void CMTask::regular_clock_call() {
3557 if (has_aborted()) return;
3559 // First, we need to recalculate the words scanned and refs reached
3560 // limits for the next clock call.
3561 recalculate_limits();
3563 // During the regular clock call we do the following
3565 // (1) If an overflow has been flagged, then we abort.
3566 if (_cm->has_overflown()) {
3567 set_has_aborted();
3568 return;
3569 }
3571 // If we are not concurrent (i.e. we're doing remark) we don't need
3572 // to check anything else. The other steps are only needed during
3573 // the concurrent marking phase.
3574 if (!concurrent()) return;
3576 // (2) If marking has been aborted for Full GC, then we also abort.
3577 if (_cm->has_aborted()) {
3578 set_has_aborted();
3579 statsOnly( ++_aborted_cm_aborted );
3580 return;
3581 }
3583 double curr_time_ms = os::elapsedVTime() * 1000.0;
3585 // (3) If marking stats are enabled, then we update the step history.
3586 #if _MARKING_STATS_
3587 if (_words_scanned >= _words_scanned_limit) {
3588 ++_clock_due_to_scanning;
3589 }
3590 if (_refs_reached >= _refs_reached_limit) {
3591 ++_clock_due_to_marking;
3592 }
3594 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3595 _interval_start_time_ms = curr_time_ms;
3596 _all_clock_intervals_ms.add(last_interval_ms);
3598 if (_cm->verbose_medium()) {
3599 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3600 "scanned = %d%s, refs reached = %d%s",
3601 _worker_id, last_interval_ms,
3602 _words_scanned,
3603 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3604 _refs_reached,
3605 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3606 }
3607 #endif // _MARKING_STATS_
3609 // (4) We check whether we should yield. If we have to, then we abort.
3610 if (_cm->should_yield()) {
3611 // We should yield. To do this we abort the task. The caller is
3612 // responsible for yielding.
3613 set_has_aborted();
3614 statsOnly( ++_aborted_yield );
3615 return;
3616 }
3618 // (5) We check whether we've reached our time quota. If we have,
3619 // then we abort.
3620 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3621 if (elapsed_time_ms > _time_target_ms) {
3622 set_has_aborted();
3623 _has_timed_out = true;
3624 statsOnly( ++_aborted_timed_out );
3625 return;
3626 }
3628 // (6) Finally, we check whether there are enough completed STAB
3629 // buffers available for processing. If there are, we abort.
3630 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3631 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3632 if (_cm->verbose_low()) {
3633 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3634 _worker_id);
3635 }
3636 // we do need to process SATB buffers, we'll abort and restart
3637 // the marking task to do so
3638 set_has_aborted();
3639 statsOnly( ++_aborted_satb );
3640 return;
3641 }
3642 }
3644 void CMTask::recalculate_limits() {
3645 _real_words_scanned_limit = _words_scanned + words_scanned_period;
3646 _words_scanned_limit = _real_words_scanned_limit;
3648 _real_refs_reached_limit = _refs_reached + refs_reached_period;
3649 _refs_reached_limit = _real_refs_reached_limit;
3650 }
3652 void CMTask::decrease_limits() {
3653 // This is called when we believe that we're going to do an infrequent
3654 // operation which will increase the per byte scanned cost (i.e. move
3655 // entries to/from the global stack). It basically tries to decrease the
3656 // scanning limit so that the clock is called earlier.
3658 if (_cm->verbose_medium()) {
3659 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3660 }
3662 _words_scanned_limit = _real_words_scanned_limit -
3663 3 * words_scanned_period / 4;
3664 _refs_reached_limit = _real_refs_reached_limit -
3665 3 * refs_reached_period / 4;
3666 }
3668 void CMTask::move_entries_to_global_stack() {
3669 // local array where we'll store the entries that will be popped
3670 // from the local queue
3671 oop buffer[global_stack_transfer_size];
3673 int n = 0;
3674 oop obj;
3675 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3676 buffer[n] = obj;
3677 ++n;
3678 }
3680 if (n > 0) {
3681 // we popped at least one entry from the local queue
3683 statsOnly( ++_global_transfers_to; _local_pops += n );
3685 if (!_cm->mark_stack_push(buffer, n)) {
3686 if (_cm->verbose_low()) {
3687 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3688 _worker_id);
3689 }
3690 set_has_aborted();
3691 } else {
3692 // the transfer was successful
3694 if (_cm->verbose_medium()) {
3695 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3696 _worker_id, n);
3697 }
3698 statsOnly( int tmp_size = _cm->mark_stack_size();
3699 if (tmp_size > _global_max_size) {
3700 _global_max_size = tmp_size;
3701 }
3702 _global_pushes += n );
3703 }
3704 }
3706 // this operation was quite expensive, so decrease the limits
3707 decrease_limits();
3708 }
3710 void CMTask::get_entries_from_global_stack() {
3711 // local array where we'll store the entries that will be popped
3712 // from the global stack.
3713 oop buffer[global_stack_transfer_size];
3714 int n;
3715 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3716 assert(n <= global_stack_transfer_size,
3717 "we should not pop more than the given limit");
3718 if (n > 0) {
3719 // yes, we did actually pop at least one entry
3721 statsOnly( ++_global_transfers_from; _global_pops += n );
3722 if (_cm->verbose_medium()) {
3723 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3724 _worker_id, n);
3725 }
3726 for (int i = 0; i < n; ++i) {
3727 bool success = _task_queue->push(buffer[i]);
3728 // We only call this when the local queue is empty or under a
3729 // given target limit. So, we do not expect this push to fail.
3730 assert(success, "invariant");
3731 }
3733 statsOnly( int tmp_size = _task_queue->size();
3734 if (tmp_size > _local_max_size) {
3735 _local_max_size = tmp_size;
3736 }
3737 _local_pushes += n );
3738 }
3740 // this operation was quite expensive, so decrease the limits
3741 decrease_limits();
3742 }
3744 void CMTask::drain_local_queue(bool partially) {
3745 if (has_aborted()) return;
3747 // Decide what the target size is, depending whether we're going to
3748 // drain it partially (so that other tasks can steal if they run out
3749 // of things to do) or totally (at the very end).
3750 size_t target_size;
3751 if (partially) {
3752 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3753 } else {
3754 target_size = 0;
3755 }
3757 if (_task_queue->size() > target_size) {
3758 if (_cm->verbose_high()) {
3759 gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
3760 _worker_id, target_size);
3761 }
3763 oop obj;
3764 bool ret = _task_queue->pop_local(obj);
3765 while (ret) {
3766 statsOnly( ++_local_pops );
3768 if (_cm->verbose_high()) {
3769 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3770 (void*) obj);
3771 }
3773 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3774 assert(!_g1h->is_on_master_free_list(
3775 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3777 scan_object(obj);
3779 if (_task_queue->size() <= target_size || has_aborted()) {
3780 ret = false;
3781 } else {
3782 ret = _task_queue->pop_local(obj);
3783 }
3784 }
3786 if (_cm->verbose_high()) {
3787 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3788 _worker_id, _task_queue->size());
3789 }
3790 }
3791 }
3793 void CMTask::drain_global_stack(bool partially) {
3794 if (has_aborted()) return;
3796 // We have a policy to drain the local queue before we attempt to
3797 // drain the global stack.
3798 assert(partially || _task_queue->size() == 0, "invariant");
3800 // Decide what the target size is, depending whether we're going to
3801 // drain it partially (so that other tasks can steal if they run out
3802 // of things to do) or totally (at the very end). Notice that,
3803 // because we move entries from the global stack in chunks or
3804 // because another task might be doing the same, we might in fact
3805 // drop below the target. But, this is not a problem.
3806 size_t target_size;
3807 if (partially) {
3808 target_size = _cm->partial_mark_stack_size_target();
3809 } else {
3810 target_size = 0;
3811 }
3813 if (_cm->mark_stack_size() > target_size) {
3814 if (_cm->verbose_low()) {
3815 gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
3816 _worker_id, target_size);
3817 }
3819 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3820 get_entries_from_global_stack();
3821 drain_local_queue(partially);
3822 }
3824 if (_cm->verbose_low()) {
3825 gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
3826 _worker_id, _cm->mark_stack_size());
3827 }
3828 }
3829 }
3831 // SATB Queue has several assumptions on whether to call the par or
3832 // non-par versions of the methods. this is why some of the code is
3833 // replicated. We should really get rid of the single-threaded version
3834 // of the code to simplify things.
3835 void CMTask::drain_satb_buffers() {
3836 if (has_aborted()) return;
3838 // We set this so that the regular clock knows that we're in the
3839 // middle of draining buffers and doesn't set the abort flag when it
3840 // notices that SATB buffers are available for draining. It'd be
3841 // very counter productive if it did that. :-)
3842 _draining_satb_buffers = true;
3844 CMObjectClosure oc(this);
3845 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3846 if (G1CollectedHeap::use_parallel_gc_threads()) {
3847 satb_mq_set.set_par_closure(_worker_id, &oc);
3848 } else {
3849 satb_mq_set.set_closure(&oc);
3850 }
3852 // This keeps claiming and applying the closure to completed buffers
3853 // until we run out of buffers or we need to abort.
3854 if (G1CollectedHeap::use_parallel_gc_threads()) {
3855 while (!has_aborted() &&
3856 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3857 if (_cm->verbose_medium()) {
3858 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3859 }
3860 statsOnly( ++_satb_buffers_processed );
3861 regular_clock_call();
3862 }
3863 } else {
3864 while (!has_aborted() &&
3865 satb_mq_set.apply_closure_to_completed_buffer()) {
3866 if (_cm->verbose_medium()) {
3867 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3868 }
3869 statsOnly( ++_satb_buffers_processed );
3870 regular_clock_call();
3871 }
3872 }
3874 if (!concurrent() && !has_aborted()) {
3875 // We should only do this during remark.
3876 if (G1CollectedHeap::use_parallel_gc_threads()) {
3877 satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3878 } else {
3879 satb_mq_set.iterate_closure_all_threads();
3880 }
3881 }
3883 _draining_satb_buffers = false;
3885 assert(has_aborted() ||
3886 concurrent() ||
3887 satb_mq_set.completed_buffers_num() == 0, "invariant");
3889 if (G1CollectedHeap::use_parallel_gc_threads()) {
3890 satb_mq_set.set_par_closure(_worker_id, NULL);
3891 } else {
3892 satb_mq_set.set_closure(NULL);
3893 }
3895 // again, this was a potentially expensive operation, decrease the
3896 // limits to get the regular clock call early
3897 decrease_limits();
3898 }
3900 void CMTask::print_stats() {
3901 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3902 _worker_id, _calls);
3903 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3904 _elapsed_time_ms, _termination_time_ms);
3905 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3906 _step_times_ms.num(), _step_times_ms.avg(),
3907 _step_times_ms.sd());
3908 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
3909 _step_times_ms.maximum(), _step_times_ms.sum());
3911 #if _MARKING_STATS_
3912 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3913 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
3914 _all_clock_intervals_ms.sd());
3915 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
3916 _all_clock_intervals_ms.maximum(),
3917 _all_clock_intervals_ms.sum());
3918 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
3919 _clock_due_to_scanning, _clock_due_to_marking);
3920 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
3921 _objs_scanned, _objs_found_on_bitmap);
3922 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
3923 _local_pushes, _local_pops, _local_max_size);
3924 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
3925 _global_pushes, _global_pops, _global_max_size);
3926 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
3927 _global_transfers_to,_global_transfers_from);
3928 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
3929 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
3930 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
3931 _steal_attempts, _steals);
3932 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
3933 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
3934 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
3935 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
3936 _aborted_timed_out, _aborted_satb, _aborted_termination);
3937 #endif // _MARKING_STATS_
3938 }
3940 /*****************************************************************************
3942 The do_marking_step(time_target_ms, ...) method is the building
3943 block of the parallel marking framework. It can be called in parallel
3944 with other invocations of do_marking_step() on different tasks
3945 (but only one per task, obviously) and concurrently with the
3946 mutator threads, or during remark, hence it eliminates the need
3947 for two versions of the code. When called during remark, it will
3948 pick up from where the task left off during the concurrent marking
3949 phase. Interestingly, tasks are also claimable during evacuation
3950 pauses too, since do_marking_step() ensures that it aborts before
3951 it needs to yield.
3953 The data structures that it uses to do marking work are the
3954 following:
3956 (1) Marking Bitmap. If there are gray objects that appear only
3957 on the bitmap (this happens either when dealing with an overflow
3958 or when the initial marking phase has simply marked the roots
3959 and didn't push them on the stack), then tasks claim heap
3960 regions whose bitmap they then scan to find gray objects. A
3961 global finger indicates where the end of the last claimed region
3962 is. A local finger indicates how far into the region a task has
3963 scanned. The two fingers are used to determine how to gray an
3964 object (i.e. whether simply marking it is OK, as it will be
3965 visited by a task in the future, or whether it needs to be also
3966 pushed on a stack).
3968 (2) Local Queue. The local queue of the task which is accessed
3969 reasonably efficiently by the task. Other tasks can steal from
3970 it when they run out of work. Throughout the marking phase, a
3971 task attempts to keep its local queue short but not totally
3972 empty, so that entries are available for stealing by other
3973 tasks. Only when there is no more work, a task will totally
3974 drain its local queue.
3976 (3) Global Mark Stack. This handles local queue overflow. During
3977 marking only sets of entries are moved between it and the local
3978 queues, as access to it requires a mutex and more fine-grain
3979 interaction with it which might cause contention. If it
3980 overflows, then the marking phase should restart and iterate
3981 over the bitmap to identify gray objects. Throughout the marking
3982 phase, tasks attempt to keep the global mark stack at a small
3983 length but not totally empty, so that entries are available for
3984 popping by other tasks. Only when there is no more work, tasks
3985 will totally drain the global mark stack.
3987 (4) SATB Buffer Queue. This is where completed SATB buffers are
3988 made available. Buffers are regularly removed from this queue
3989 and scanned for roots, so that the queue doesn't get too
3990 long. During remark, all completed buffers are processed, as
3991 well as the filled in parts of any uncompleted buffers.
3993 The do_marking_step() method tries to abort when the time target
3994 has been reached. There are a few other cases when the
3995 do_marking_step() method also aborts:
3997 (1) When the marking phase has been aborted (after a Full GC).
3999 (2) When a global overflow (on the global stack) has been
4000 triggered. Before the task aborts, it will actually sync up with
4001 the other tasks to ensure that all the marking data structures
4002 (local queues, stacks, fingers etc.) are re-initialized so that
4003 when do_marking_step() completes, the marking phase can
4004 immediately restart.
4006 (3) When enough completed SATB buffers are available. The
4007 do_marking_step() method only tries to drain SATB buffers right
4008 at the beginning. So, if enough buffers are available, the
4009 marking step aborts and the SATB buffers are processed at
4010 the beginning of the next invocation.
4012 (4) To yield. when we have to yield then we abort and yield
4013 right at the end of do_marking_step(). This saves us from a lot
4014 of hassle as, by yielding we might allow a Full GC. If this
4015 happens then objects will be compacted underneath our feet, the
4016 heap might shrink, etc. We save checking for this by just
4017 aborting and doing the yield right at the end.
4019 From the above it follows that the do_marking_step() method should
4020 be called in a loop (or, otherwise, regularly) until it completes.
4022 If a marking step completes without its has_aborted() flag being
4023 true, it means it has completed the current marking phase (and
4024 also all other marking tasks have done so and have all synced up).
4026 A method called regular_clock_call() is invoked "regularly" (in
4027 sub ms intervals) throughout marking. It is this clock method that
4028 checks all the abort conditions which were mentioned above and
4029 decides when the task should abort. A work-based scheme is used to
4030 trigger this clock method: when the number of object words the
4031 marking phase has scanned or the number of references the marking
4032 phase has visited reach a given limit. Additional invocations to
4033 the method clock have been planted in a few other strategic places
4034 too. The initial reason for the clock method was to avoid calling
4035 vtime too regularly, as it is quite expensive. So, once it was in
4036 place, it was natural to piggy-back all the other conditions on it
4037 too and not constantly check them throughout the code.
4039 If do_termination is true then do_marking_step will enter its
4040 termination protocol.
4042 The value of is_serial must be true when do_marking_step is being
4043 called serially (i.e. by the VMThread) and do_marking_step should
4044 skip any synchronization in the termination and overflow code.
4045 Examples include the serial remark code and the serial reference
4046 processing closures.
4048 The value of is_serial must be false when do_marking_step is
4049 being called by any of the worker threads in a work gang.
4050 Examples include the concurrent marking code (CMMarkingTask),
4051 the MT remark code, and the MT reference processing closures.
4053 *****************************************************************************/
4055 void CMTask::do_marking_step(double time_target_ms,
4056 bool do_termination,
4057 bool is_serial) {
4058 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4059 assert(concurrent() == _cm->concurrent(), "they should be the same");
4061 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4062 assert(_task_queues != NULL, "invariant");
4063 assert(_task_queue != NULL, "invariant");
4064 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4066 assert(!_claimed,
4067 "only one thread should claim this task at any one time");
4069 // OK, this doesn't safeguard again all possible scenarios, as it is
4070 // possible for two threads to set the _claimed flag at the same
4071 // time. But it is only for debugging purposes anyway and it will
4072 // catch most problems.
4073 _claimed = true;
4075 _start_time_ms = os::elapsedVTime() * 1000.0;
4076 statsOnly( _interval_start_time_ms = _start_time_ms );
4078 // If do_stealing is true then do_marking_step will attempt to
4079 // steal work from the other CMTasks. It only makes sense to
4080 // enable stealing when the termination protocol is enabled
4081 // and do_marking_step() is not being called serially.
4082 bool do_stealing = do_termination && !is_serial;
4084 double diff_prediction_ms =
4085 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4086 _time_target_ms = time_target_ms - diff_prediction_ms;
4088 // set up the variables that are used in the work-based scheme to
4089 // call the regular clock method
4090 _words_scanned = 0;
4091 _refs_reached = 0;
4092 recalculate_limits();
4094 // clear all flags
4095 clear_has_aborted();
4096 _has_timed_out = false;
4097 _draining_satb_buffers = false;
4099 ++_calls;
4101 if (_cm->verbose_low()) {
4102 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4103 "target = %1.2lfms >>>>>>>>>>",
4104 _worker_id, _calls, _time_target_ms);
4105 }
4107 // Set up the bitmap and oop closures. Anything that uses them is
4108 // eventually called from this method, so it is OK to allocate these
4109 // statically.
4110 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4111 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4112 set_cm_oop_closure(&cm_oop_closure);
4114 if (_cm->has_overflown()) {
4115 // This can happen if the mark stack overflows during a GC pause
4116 // and this task, after a yield point, restarts. We have to abort
4117 // as we need to get into the overflow protocol which happens
4118 // right at the end of this task.
4119 set_has_aborted();
4120 }
4122 // First drain any available SATB buffers. After this, we will not
4123 // look at SATB buffers before the next invocation of this method.
4124 // If enough completed SATB buffers are queued up, the regular clock
4125 // will abort this task so that it restarts.
4126 drain_satb_buffers();
4127 // ...then partially drain the local queue and the global stack
4128 drain_local_queue(true);
4129 drain_global_stack(true);
4131 do {
4132 if (!has_aborted() && _curr_region != NULL) {
4133 // This means that we're already holding on to a region.
4134 assert(_finger != NULL, "if region is not NULL, then the finger "
4135 "should not be NULL either");
4137 // We might have restarted this task after an evacuation pause
4138 // which might have evacuated the region we're holding on to
4139 // underneath our feet. Let's read its limit again to make sure
4140 // that we do not iterate over a region of the heap that
4141 // contains garbage (update_region_limit() will also move
4142 // _finger to the start of the region if it is found empty).
4143 update_region_limit();
4144 // We will start from _finger not from the start of the region,
4145 // as we might be restarting this task after aborting half-way
4146 // through scanning this region. In this case, _finger points to
4147 // the address where we last found a marked object. If this is a
4148 // fresh region, _finger points to start().
4149 MemRegion mr = MemRegion(_finger, _region_limit);
4151 if (_cm->verbose_low()) {
4152 gclog_or_tty->print_cr("[%u] we're scanning part "
4153 "["PTR_FORMAT", "PTR_FORMAT") "
4154 "of region "HR_FORMAT,
4155 _worker_id, _finger, _region_limit,
4156 HR_FORMAT_PARAMS(_curr_region));
4157 }
4159 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4160 "humongous regions should go around loop once only");
4162 // Some special cases:
4163 // If the memory region is empty, we can just give up the region.
4164 // If the current region is humongous then we only need to check
4165 // the bitmap for the bit associated with the start of the object,
4166 // scan the object if it's live, and give up the region.
4167 // Otherwise, let's iterate over the bitmap of the part of the region
4168 // that is left.
4169 // If the iteration is successful, give up the region.
4170 if (mr.is_empty()) {
4171 giveup_current_region();
4172 regular_clock_call();
4173 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4174 if (_nextMarkBitMap->isMarked(mr.start())) {
4175 // The object is marked - apply the closure
4176 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4177 bitmap_closure.do_bit(offset);
4178 }
4179 // Even if this task aborted while scanning the humongous object
4180 // we can (and should) give up the current region.
4181 giveup_current_region();
4182 regular_clock_call();
4183 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4184 giveup_current_region();
4185 regular_clock_call();
4186 } else {
4187 assert(has_aborted(), "currently the only way to do so");
4188 // The only way to abort the bitmap iteration is to return
4189 // false from the do_bit() method. However, inside the
4190 // do_bit() method we move the _finger to point to the
4191 // object currently being looked at. So, if we bail out, we
4192 // have definitely set _finger to something non-null.
4193 assert(_finger != NULL, "invariant");
4195 // Region iteration was actually aborted. So now _finger
4196 // points to the address of the object we last scanned. If we
4197 // leave it there, when we restart this task, we will rescan
4198 // the object. It is easy to avoid this. We move the finger by
4199 // enough to point to the next possible object header (the
4200 // bitmap knows by how much we need to move it as it knows its
4201 // granularity).
4202 assert(_finger < _region_limit, "invariant");
4203 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4204 // Check if bitmap iteration was aborted while scanning the last object
4205 if (new_finger >= _region_limit) {
4206 giveup_current_region();
4207 } else {
4208 move_finger_to(new_finger);
4209 }
4210 }
4211 }
4212 // At this point we have either completed iterating over the
4213 // region we were holding on to, or we have aborted.
4215 // We then partially drain the local queue and the global stack.
4216 // (Do we really need this?)
4217 drain_local_queue(true);
4218 drain_global_stack(true);
4220 // Read the note on the claim_region() method on why it might
4221 // return NULL with potentially more regions available for
4222 // claiming and why we have to check out_of_regions() to determine
4223 // whether we're done or not.
4224 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4225 // We are going to try to claim a new region. We should have
4226 // given up on the previous one.
4227 // Separated the asserts so that we know which one fires.
4228 assert(_curr_region == NULL, "invariant");
4229 assert(_finger == NULL, "invariant");
4230 assert(_region_limit == NULL, "invariant");
4231 if (_cm->verbose_low()) {
4232 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4233 }
4234 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4235 if (claimed_region != NULL) {
4236 // Yes, we managed to claim one
4237 statsOnly( ++_regions_claimed );
4239 if (_cm->verbose_low()) {
4240 gclog_or_tty->print_cr("[%u] we successfully claimed "
4241 "region "PTR_FORMAT,
4242 _worker_id, claimed_region);
4243 }
4245 setup_for_region(claimed_region);
4246 assert(_curr_region == claimed_region, "invariant");
4247 }
4248 // It is important to call the regular clock here. It might take
4249 // a while to claim a region if, for example, we hit a large
4250 // block of empty regions. So we need to call the regular clock
4251 // method once round the loop to make sure it's called
4252 // frequently enough.
4253 regular_clock_call();
4254 }
4256 if (!has_aborted() && _curr_region == NULL) {
4257 assert(_cm->out_of_regions(),
4258 "at this point we should be out of regions");
4259 }
4260 } while ( _curr_region != NULL && !has_aborted());
4262 if (!has_aborted()) {
4263 // We cannot check whether the global stack is empty, since other
4264 // tasks might be pushing objects to it concurrently.
4265 assert(_cm->out_of_regions(),
4266 "at this point we should be out of regions");
4268 if (_cm->verbose_low()) {
4269 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4270 }
4272 // Try to reduce the number of available SATB buffers so that
4273 // remark has less work to do.
4274 drain_satb_buffers();
4275 }
4277 // Since we've done everything else, we can now totally drain the
4278 // local queue and global stack.
4279 drain_local_queue(false);
4280 drain_global_stack(false);
4282 // Attempt at work stealing from other task's queues.
4283 if (do_stealing && !has_aborted()) {
4284 // We have not aborted. This means that we have finished all that
4285 // we could. Let's try to do some stealing...
4287 // We cannot check whether the global stack is empty, since other
4288 // tasks might be pushing objects to it concurrently.
4289 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4290 "only way to reach here");
4292 if (_cm->verbose_low()) {
4293 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4294 }
4296 while (!has_aborted()) {
4297 oop obj;
4298 statsOnly( ++_steal_attempts );
4300 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4301 if (_cm->verbose_medium()) {
4302 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4303 _worker_id, (void*) obj);
4304 }
4306 statsOnly( ++_steals );
4308 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4309 "any stolen object should be marked");
4310 scan_object(obj);
4312 // And since we're towards the end, let's totally drain the
4313 // local queue and global stack.
4314 drain_local_queue(false);
4315 drain_global_stack(false);
4316 } else {
4317 break;
4318 }
4319 }
4320 }
4322 // If we are about to wrap up and go into termination, check if we
4323 // should raise the overflow flag.
4324 if (do_termination && !has_aborted()) {
4325 if (_cm->force_overflow()->should_force()) {
4326 _cm->set_has_overflown();
4327 regular_clock_call();
4328 }
4329 }
4331 // We still haven't aborted. Now, let's try to get into the
4332 // termination protocol.
4333 if (do_termination && !has_aborted()) {
4334 // We cannot check whether the global stack is empty, since other
4335 // tasks might be concurrently pushing objects on it.
4336 // Separated the asserts so that we know which one fires.
4337 assert(_cm->out_of_regions(), "only way to reach here");
4338 assert(_task_queue->size() == 0, "only way to reach here");
4340 if (_cm->verbose_low()) {
4341 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4342 }
4344 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4346 // The CMTask class also extends the TerminatorTerminator class,
4347 // hence its should_exit_termination() method will also decide
4348 // whether to exit the termination protocol or not.
4349 bool finished = (is_serial ||
4350 _cm->terminator()->offer_termination(this));
4351 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4352 _termination_time_ms +=
4353 termination_end_time_ms - _termination_start_time_ms;
4355 if (finished) {
4356 // We're all done.
4358 if (_worker_id == 0) {
4359 // let's allow task 0 to do this
4360 if (concurrent()) {
4361 assert(_cm->concurrent_marking_in_progress(), "invariant");
4362 // we need to set this to false before the next
4363 // safepoint. This way we ensure that the marking phase
4364 // doesn't observe any more heap expansions.
4365 _cm->clear_concurrent_marking_in_progress();
4366 }
4367 }
4369 // We can now guarantee that the global stack is empty, since
4370 // all other tasks have finished. We separated the guarantees so
4371 // that, if a condition is false, we can immediately find out
4372 // which one.
4373 guarantee(_cm->out_of_regions(), "only way to reach here");
4374 guarantee(_cm->mark_stack_empty(), "only way to reach here");
4375 guarantee(_task_queue->size() == 0, "only way to reach here");
4376 guarantee(!_cm->has_overflown(), "only way to reach here");
4377 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4379 if (_cm->verbose_low()) {
4380 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4381 }
4382 } else {
4383 // Apparently there's more work to do. Let's abort this task. It
4384 // will restart it and we can hopefully find more things to do.
4386 if (_cm->verbose_low()) {
4387 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4388 _worker_id);
4389 }
4391 set_has_aborted();
4392 statsOnly( ++_aborted_termination );
4393 }
4394 }
4396 // Mainly for debugging purposes to make sure that a pointer to the
4397 // closure which was statically allocated in this frame doesn't
4398 // escape it by accident.
4399 set_cm_oop_closure(NULL);
4400 double end_time_ms = os::elapsedVTime() * 1000.0;
4401 double elapsed_time_ms = end_time_ms - _start_time_ms;
4402 // Update the step history.
4403 _step_times_ms.add(elapsed_time_ms);
4405 if (has_aborted()) {
4406 // The task was aborted for some reason.
4408 statsOnly( ++_aborted );
4410 if (_has_timed_out) {
4411 double diff_ms = elapsed_time_ms - _time_target_ms;
4412 // Keep statistics of how well we did with respect to hitting
4413 // our target only if we actually timed out (if we aborted for
4414 // other reasons, then the results might get skewed).
4415 _marking_step_diffs_ms.add(diff_ms);
4416 }
4418 if (_cm->has_overflown()) {
4419 // This is the interesting one. We aborted because a global
4420 // overflow was raised. This means we have to restart the
4421 // marking phase and start iterating over regions. However, in
4422 // order to do this we have to make sure that all tasks stop
4423 // what they are doing and re-initialise in a safe manner. We
4424 // will achieve this with the use of two barrier sync points.
4426 if (_cm->verbose_low()) {
4427 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4428 }
4430 if (!is_serial) {
4431 // We only need to enter the sync barrier if being called
4432 // from a parallel context
4433 _cm->enter_first_sync_barrier(_worker_id);
4435 // When we exit this sync barrier we know that all tasks have
4436 // stopped doing marking work. So, it's now safe to
4437 // re-initialise our data structures. At the end of this method,
4438 // task 0 will clear the global data structures.
4439 }
4441 statsOnly( ++_aborted_overflow );
4443 // We clear the local state of this task...
4444 clear_region_fields();
4446 if (!is_serial) {
4447 // ...and enter the second barrier.
4448 _cm->enter_second_sync_barrier(_worker_id);
4449 }
4450 // At this point, if we're during the concurrent phase of
4451 // marking, everything has been re-initialized and we're
4452 // ready to restart.
4453 }
4455 if (_cm->verbose_low()) {
4456 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4457 "elapsed = %1.2lfms <<<<<<<<<<",
4458 _worker_id, _time_target_ms, elapsed_time_ms);
4459 if (_cm->has_aborted()) {
4460 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4461 _worker_id);
4462 }
4463 }
4464 } else {
4465 if (_cm->verbose_low()) {
4466 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4467 "elapsed = %1.2lfms <<<<<<<<<<",
4468 _worker_id, _time_target_ms, elapsed_time_ms);
4469 }
4470 }
4472 _claimed = false;
4473 }
4475 CMTask::CMTask(uint worker_id,
4476 ConcurrentMark* cm,
4477 size_t* marked_bytes,
4478 BitMap* card_bm,
4479 CMTaskQueue* task_queue,
4480 CMTaskQueueSet* task_queues)
4481 : _g1h(G1CollectedHeap::heap()),
4482 _worker_id(worker_id), _cm(cm),
4483 _claimed(false),
4484 _nextMarkBitMap(NULL), _hash_seed(17),
4485 _task_queue(task_queue),
4486 _task_queues(task_queues),
4487 _cm_oop_closure(NULL),
4488 _marked_bytes_array(marked_bytes),
4489 _card_bm(card_bm) {
4490 guarantee(task_queue != NULL, "invariant");
4491 guarantee(task_queues != NULL, "invariant");
4493 statsOnly( _clock_due_to_scanning = 0;
4494 _clock_due_to_marking = 0 );
4496 _marking_step_diffs_ms.add(0.5);
4497 }
4499 // These are formatting macros that are used below to ensure
4500 // consistent formatting. The *_H_* versions are used to format the
4501 // header for a particular value and they should be kept consistent
4502 // with the corresponding macro. Also note that most of the macros add
4503 // the necessary white space (as a prefix) which makes them a bit
4504 // easier to compose.
4506 // All the output lines are prefixed with this string to be able to
4507 // identify them easily in a large log file.
4508 #define G1PPRL_LINE_PREFIX "###"
4510 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4511 #ifdef _LP64
4512 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4513 #else // _LP64
4514 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4515 #endif // _LP64
4517 // For per-region info
4518 #define G1PPRL_TYPE_FORMAT " %-4s"
4519 #define G1PPRL_TYPE_H_FORMAT " %4s"
4520 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4521 #define G1PPRL_BYTE_H_FORMAT " %9s"
4522 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4523 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4525 // For summary info
4526 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4527 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4528 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4529 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4531 G1PrintRegionLivenessInfoClosure::
4532 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4533 : _out(out),
4534 _total_used_bytes(0), _total_capacity_bytes(0),
4535 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4536 _hum_used_bytes(0), _hum_capacity_bytes(0),
4537 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4538 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4539 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4540 MemRegion g1_committed = g1h->g1_committed();
4541 MemRegion g1_reserved = g1h->g1_reserved();
4542 double now = os::elapsedTime();
4544 // Print the header of the output.
4545 _out->cr();
4546 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4547 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4548 G1PPRL_SUM_ADDR_FORMAT("committed")
4549 G1PPRL_SUM_ADDR_FORMAT("reserved")
4550 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4551 g1_committed.start(), g1_committed.end(),
4552 g1_reserved.start(), g1_reserved.end(),
4553 HeapRegion::GrainBytes);
4554 _out->print_cr(G1PPRL_LINE_PREFIX);
4555 _out->print_cr(G1PPRL_LINE_PREFIX
4556 G1PPRL_TYPE_H_FORMAT
4557 G1PPRL_ADDR_BASE_H_FORMAT
4558 G1PPRL_BYTE_H_FORMAT
4559 G1PPRL_BYTE_H_FORMAT
4560 G1PPRL_BYTE_H_FORMAT
4561 G1PPRL_DOUBLE_H_FORMAT
4562 G1PPRL_BYTE_H_FORMAT
4563 G1PPRL_BYTE_H_FORMAT,
4564 "type", "address-range",
4565 "used", "prev-live", "next-live", "gc-eff",
4566 "remset", "code-roots");
4567 _out->print_cr(G1PPRL_LINE_PREFIX
4568 G1PPRL_TYPE_H_FORMAT
4569 G1PPRL_ADDR_BASE_H_FORMAT
4570 G1PPRL_BYTE_H_FORMAT
4571 G1PPRL_BYTE_H_FORMAT
4572 G1PPRL_BYTE_H_FORMAT
4573 G1PPRL_DOUBLE_H_FORMAT
4574 G1PPRL_BYTE_H_FORMAT
4575 G1PPRL_BYTE_H_FORMAT,
4576 "", "",
4577 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4578 "(bytes)", "(bytes)");
4579 }
4581 // It takes as a parameter a reference to one of the _hum_* fields, it
4582 // deduces the corresponding value for a region in a humongous region
4583 // series (either the region size, or what's left if the _hum_* field
4584 // is < the region size), and updates the _hum_* field accordingly.
4585 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4586 size_t bytes = 0;
4587 // The > 0 check is to deal with the prev and next live bytes which
4588 // could be 0.
4589 if (*hum_bytes > 0) {
4590 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4591 *hum_bytes -= bytes;
4592 }
4593 return bytes;
4594 }
4596 // It deduces the values for a region in a humongous region series
4597 // from the _hum_* fields and updates those accordingly. It assumes
4598 // that that _hum_* fields have already been set up from the "starts
4599 // humongous" region and we visit the regions in address order.
4600 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4601 size_t* capacity_bytes,
4602 size_t* prev_live_bytes,
4603 size_t* next_live_bytes) {
4604 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4605 *used_bytes = get_hum_bytes(&_hum_used_bytes);
4606 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
4607 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4608 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4609 }
4611 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4612 const char* type = "";
4613 HeapWord* bottom = r->bottom();
4614 HeapWord* end = r->end();
4615 size_t capacity_bytes = r->capacity();
4616 size_t used_bytes = r->used();
4617 size_t prev_live_bytes = r->live_bytes();
4618 size_t next_live_bytes = r->next_live_bytes();
4619 double gc_eff = r->gc_efficiency();
4620 size_t remset_bytes = r->rem_set()->mem_size();
4621 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4623 if (r->used() == 0) {
4624 type = "FREE";
4625 } else if (r->is_survivor()) {
4626 type = "SURV";
4627 } else if (r->is_young()) {
4628 type = "EDEN";
4629 } else if (r->startsHumongous()) {
4630 type = "HUMS";
4632 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4633 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4634 "they should have been zeroed after the last time we used them");
4635 // Set up the _hum_* fields.
4636 _hum_capacity_bytes = capacity_bytes;
4637 _hum_used_bytes = used_bytes;
4638 _hum_prev_live_bytes = prev_live_bytes;
4639 _hum_next_live_bytes = next_live_bytes;
4640 get_hum_bytes(&used_bytes, &capacity_bytes,
4641 &prev_live_bytes, &next_live_bytes);
4642 end = bottom + HeapRegion::GrainWords;
4643 } else if (r->continuesHumongous()) {
4644 type = "HUMC";
4645 get_hum_bytes(&used_bytes, &capacity_bytes,
4646 &prev_live_bytes, &next_live_bytes);
4647 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4648 } else {
4649 type = "OLD";
4650 }
4652 _total_used_bytes += used_bytes;
4653 _total_capacity_bytes += capacity_bytes;
4654 _total_prev_live_bytes += prev_live_bytes;
4655 _total_next_live_bytes += next_live_bytes;
4656 _total_remset_bytes += remset_bytes;
4657 _total_strong_code_roots_bytes += strong_code_roots_bytes;
4659 // Print a line for this particular region.
4660 _out->print_cr(G1PPRL_LINE_PREFIX
4661 G1PPRL_TYPE_FORMAT
4662 G1PPRL_ADDR_BASE_FORMAT
4663 G1PPRL_BYTE_FORMAT
4664 G1PPRL_BYTE_FORMAT
4665 G1PPRL_BYTE_FORMAT
4666 G1PPRL_DOUBLE_FORMAT
4667 G1PPRL_BYTE_FORMAT
4668 G1PPRL_BYTE_FORMAT,
4669 type, bottom, end,
4670 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4671 remset_bytes, strong_code_roots_bytes);
4673 return false;
4674 }
4676 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4677 // add static memory usages to remembered set sizes
4678 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4679 // Print the footer of the output.
4680 _out->print_cr(G1PPRL_LINE_PREFIX);
4681 _out->print_cr(G1PPRL_LINE_PREFIX
4682 " SUMMARY"
4683 G1PPRL_SUM_MB_FORMAT("capacity")
4684 G1PPRL_SUM_MB_PERC_FORMAT("used")
4685 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4686 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4687 G1PPRL_SUM_MB_FORMAT("remset")
4688 G1PPRL_SUM_MB_FORMAT("code-roots"),
4689 bytes_to_mb(_total_capacity_bytes),
4690 bytes_to_mb(_total_used_bytes),
4691 perc(_total_used_bytes, _total_capacity_bytes),
4692 bytes_to_mb(_total_prev_live_bytes),
4693 perc(_total_prev_live_bytes, _total_capacity_bytes),
4694 bytes_to_mb(_total_next_live_bytes),
4695 perc(_total_next_live_bytes, _total_capacity_bytes),
4696 bytes_to_mb(_total_remset_bytes),
4697 bytes_to_mb(_total_strong_code_roots_bytes));
4698 _out->cr();
4699 }