Thu, 07 Aug 2014 22:28:53 +0200
8054341: Remove some obsolete code in G1CollectedHeap class
Summary: Remove dead code.
Reviewed-by: stefank, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
35 #include "gc_implementation/g1/g1RemSet.hpp"
36 #include "gc_implementation/g1/heapRegion.inline.hpp"
37 #include "gc_implementation/g1/heapRegionRemSet.hpp"
38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
39 #include "gc_implementation/shared/vmGCOperations.hpp"
40 #include "gc_implementation/shared/gcTimer.hpp"
41 #include "gc_implementation/shared/gcTrace.hpp"
42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 #include "memory/allocation.hpp"
44 #include "memory/genOopClosures.inline.hpp"
45 #include "memory/referencePolicy.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/prefetch.inline.hpp"
51 #include "services/memTracker.hpp"
53 // Concurrent marking bit map wrapper
55 CMBitMapRO::CMBitMapRO(int shifter) :
56 _bm(),
57 _shifter(shifter) {
58 _bmStartWord = 0;
59 _bmWordSize = 0;
60 }
62 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
63 const HeapWord* limit) const {
64 // First we must round addr *up* to a possible object boundary.
65 addr = (HeapWord*)align_size_up((intptr_t)addr,
66 HeapWordSize << _shifter);
67 size_t addrOffset = heapWordToOffset(addr);
68 if (limit == NULL) {
69 limit = _bmStartWord + _bmWordSize;
70 }
71 size_t limitOffset = heapWordToOffset(limit);
72 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
73 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
74 assert(nextAddr >= addr, "get_next_one postcondition");
75 assert(nextAddr == limit || isMarked(nextAddr),
76 "get_next_one postcondition");
77 return nextAddr;
78 }
80 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
81 const HeapWord* limit) const {
82 size_t addrOffset = heapWordToOffset(addr);
83 if (limit == NULL) {
84 limit = _bmStartWord + _bmWordSize;
85 }
86 size_t limitOffset = heapWordToOffset(limit);
87 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
88 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
89 assert(nextAddr >= addr, "get_next_one postcondition");
90 assert(nextAddr == limit || !isMarked(nextAddr),
91 "get_next_one postcondition");
92 return nextAddr;
93 }
95 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
96 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
97 return (int) (diff >> _shifter);
98 }
100 #ifndef PRODUCT
101 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
102 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
103 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
104 "size inconsistency");
105 return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
106 _bmWordSize == heap_rs.size()>>LogHeapWordSize;
107 }
108 #endif
110 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
111 _bm.print_on_error(st, prefix);
112 }
114 bool CMBitMap::allocate(ReservedSpace heap_rs) {
115 _bmStartWord = (HeapWord*)(heap_rs.base());
116 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
117 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
118 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
119 if (!brs.is_reserved()) {
120 warning("ConcurrentMark marking bit map allocation failure");
121 return false;
122 }
123 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
124 // For now we'll just commit all of the bit map up front.
125 // Later on we'll try to be more parsimonious with swap.
126 if (!_virtual_space.initialize(brs, brs.size())) {
127 warning("ConcurrentMark marking bit map backing store failure");
128 return false;
129 }
130 assert(_virtual_space.committed_size() == brs.size(),
131 "didn't reserve backing store for all of concurrent marking bit map?");
132 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
133 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
134 _bmWordSize, "inconsistency in bit map sizing");
135 _bm.set_size(_bmWordSize >> _shifter);
136 return true;
137 }
139 void CMBitMap::clearAll() {
140 _bm.clear();
141 return;
142 }
144 void CMBitMap::markRange(MemRegion mr) {
145 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
146 assert(!mr.is_empty(), "unexpected empty region");
147 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
148 ((HeapWord *) mr.end())),
149 "markRange memory region end is not card aligned");
150 // convert address range into offset range
151 _bm.at_put_range(heapWordToOffset(mr.start()),
152 heapWordToOffset(mr.end()), true);
153 }
155 void CMBitMap::clearRange(MemRegion mr) {
156 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
157 assert(!mr.is_empty(), "unexpected empty region");
158 // convert address range into offset range
159 _bm.at_put_range(heapWordToOffset(mr.start()),
160 heapWordToOffset(mr.end()), false);
161 }
163 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
164 HeapWord* end_addr) {
165 HeapWord* start = getNextMarkedWordAddress(addr);
166 start = MIN2(start, end_addr);
167 HeapWord* end = getNextUnmarkedWordAddress(start);
168 end = MIN2(end, end_addr);
169 assert(start <= end, "Consistency check");
170 MemRegion mr(start, end);
171 if (!mr.is_empty()) {
172 clearRange(mr);
173 }
174 return mr;
175 }
177 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
178 _base(NULL), _cm(cm)
179 #ifdef ASSERT
180 , _drain_in_progress(false)
181 , _drain_in_progress_yields(false)
182 #endif
183 {}
185 bool CMMarkStack::allocate(size_t capacity) {
186 // allocate a stack of the requisite depth
187 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
188 if (!rs.is_reserved()) {
189 warning("ConcurrentMark MarkStack allocation failure");
190 return false;
191 }
192 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
193 if (!_virtual_space.initialize(rs, rs.size())) {
194 warning("ConcurrentMark MarkStack backing store failure");
195 // Release the virtual memory reserved for the marking stack
196 rs.release();
197 return false;
198 }
199 assert(_virtual_space.committed_size() == rs.size(),
200 "Didn't reserve backing store for all of ConcurrentMark stack?");
201 _base = (oop*) _virtual_space.low();
202 setEmpty();
203 _capacity = (jint) capacity;
204 _saved_index = -1;
205 _should_expand = false;
206 NOT_PRODUCT(_max_depth = 0);
207 return true;
208 }
210 void CMMarkStack::expand() {
211 // Called, during remark, if we've overflown the marking stack during marking.
212 assert(isEmpty(), "stack should been emptied while handling overflow");
213 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
214 // Clear expansion flag
215 _should_expand = false;
216 if (_capacity == (jint) MarkStackSizeMax) {
217 if (PrintGCDetails && Verbose) {
218 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
219 }
220 return;
221 }
222 // Double capacity if possible
223 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
224 // Do not give up existing stack until we have managed to
225 // get the double capacity that we desired.
226 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
227 sizeof(oop)));
228 if (rs.is_reserved()) {
229 // Release the backing store associated with old stack
230 _virtual_space.release();
231 // Reinitialize virtual space for new stack
232 if (!_virtual_space.initialize(rs, rs.size())) {
233 fatal("Not enough swap for expanded marking stack capacity");
234 }
235 _base = (oop*)(_virtual_space.low());
236 _index = 0;
237 _capacity = new_capacity;
238 } else {
239 if (PrintGCDetails && Verbose) {
240 // Failed to double capacity, continue;
241 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
242 SIZE_FORMAT"K to " SIZE_FORMAT"K",
243 _capacity / K, new_capacity / K);
244 }
245 }
246 }
248 void CMMarkStack::set_should_expand() {
249 // If we're resetting the marking state because of an
250 // marking stack overflow, record that we should, if
251 // possible, expand the stack.
252 _should_expand = _cm->has_overflown();
253 }
255 CMMarkStack::~CMMarkStack() {
256 if (_base != NULL) {
257 _base = NULL;
258 _virtual_space.release();
259 }
260 }
262 void CMMarkStack::par_push(oop ptr) {
263 while (true) {
264 if (isFull()) {
265 _overflow = true;
266 return;
267 }
268 // Otherwise...
269 jint index = _index;
270 jint next_index = index+1;
271 jint res = Atomic::cmpxchg(next_index, &_index, index);
272 if (res == index) {
273 _base[index] = ptr;
274 // Note that we don't maintain this atomically. We could, but it
275 // doesn't seem necessary.
276 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
277 return;
278 }
279 // Otherwise, we need to try again.
280 }
281 }
283 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
284 while (true) {
285 if (isFull()) {
286 _overflow = true;
287 return;
288 }
289 // Otherwise...
290 jint index = _index;
291 jint next_index = index + n;
292 if (next_index > _capacity) {
293 _overflow = true;
294 return;
295 }
296 jint res = Atomic::cmpxchg(next_index, &_index, index);
297 if (res == index) {
298 for (int i = 0; i < n; i++) {
299 int ind = index + i;
300 assert(ind < _capacity, "By overflow test above.");
301 _base[ind] = ptr_arr[i];
302 }
303 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
304 return;
305 }
306 // Otherwise, we need to try again.
307 }
308 }
310 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
311 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
312 jint start = _index;
313 jint next_index = start + n;
314 if (next_index > _capacity) {
315 _overflow = true;
316 return;
317 }
318 // Otherwise.
319 _index = next_index;
320 for (int i = 0; i < n; i++) {
321 int ind = start + i;
322 assert(ind < _capacity, "By overflow test above.");
323 _base[ind] = ptr_arr[i];
324 }
325 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
326 }
328 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
329 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
330 jint index = _index;
331 if (index == 0) {
332 *n = 0;
333 return false;
334 } else {
335 int k = MIN2(max, index);
336 jint new_ind = index - k;
337 for (int j = 0; j < k; j++) {
338 ptr_arr[j] = _base[new_ind + j];
339 }
340 _index = new_ind;
341 *n = k;
342 return true;
343 }
344 }
346 template<class OopClosureClass>
347 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
348 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
349 || SafepointSynchronize::is_at_safepoint(),
350 "Drain recursion must be yield-safe.");
351 bool res = true;
352 debug_only(_drain_in_progress = true);
353 debug_only(_drain_in_progress_yields = yield_after);
354 while (!isEmpty()) {
355 oop newOop = pop();
356 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
357 assert(newOop->is_oop(), "Expected an oop");
358 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
359 "only grey objects on this stack");
360 newOop->oop_iterate(cl);
361 if (yield_after && _cm->do_yield_check()) {
362 res = false;
363 break;
364 }
365 }
366 debug_only(_drain_in_progress = false);
367 return res;
368 }
370 void CMMarkStack::note_start_of_gc() {
371 assert(_saved_index == -1,
372 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
373 _saved_index = _index;
374 }
376 void CMMarkStack::note_end_of_gc() {
377 // This is intentionally a guarantee, instead of an assert. If we
378 // accidentally add something to the mark stack during GC, it
379 // will be a correctness issue so it's better if we crash. we'll
380 // only check this once per GC anyway, so it won't be a performance
381 // issue in any way.
382 guarantee(_saved_index == _index,
383 err_msg("saved index: %d index: %d", _saved_index, _index));
384 _saved_index = -1;
385 }
387 void CMMarkStack::oops_do(OopClosure* f) {
388 assert(_saved_index == _index,
389 err_msg("saved index: %d index: %d", _saved_index, _index));
390 for (int i = 0; i < _index; i += 1) {
391 f->do_oop(&_base[i]);
392 }
393 }
395 bool ConcurrentMark::not_yet_marked(oop obj) const {
396 return _g1h->is_obj_ill(obj);
397 }
399 CMRootRegions::CMRootRegions() :
400 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
401 _should_abort(false), _next_survivor(NULL) { }
403 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
404 _young_list = g1h->young_list();
405 _cm = cm;
406 }
408 void CMRootRegions::prepare_for_scan() {
409 assert(!scan_in_progress(), "pre-condition");
411 // Currently, only survivors can be root regions.
412 assert(_next_survivor == NULL, "pre-condition");
413 _next_survivor = _young_list->first_survivor_region();
414 _scan_in_progress = (_next_survivor != NULL);
415 _should_abort = false;
416 }
418 HeapRegion* CMRootRegions::claim_next() {
419 if (_should_abort) {
420 // If someone has set the should_abort flag, we return NULL to
421 // force the caller to bail out of their loop.
422 return NULL;
423 }
425 // Currently, only survivors can be root regions.
426 HeapRegion* res = _next_survivor;
427 if (res != NULL) {
428 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
429 // Read it again in case it changed while we were waiting for the lock.
430 res = _next_survivor;
431 if (res != NULL) {
432 if (res == _young_list->last_survivor_region()) {
433 // We just claimed the last survivor so store NULL to indicate
434 // that we're done.
435 _next_survivor = NULL;
436 } else {
437 _next_survivor = res->get_next_young_region();
438 }
439 } else {
440 // Someone else claimed the last survivor while we were trying
441 // to take the lock so nothing else to do.
442 }
443 }
444 assert(res == NULL || res->is_survivor(), "post-condition");
446 return res;
447 }
449 void CMRootRegions::scan_finished() {
450 assert(scan_in_progress(), "pre-condition");
452 // Currently, only survivors can be root regions.
453 if (!_should_abort) {
454 assert(_next_survivor == NULL, "we should have claimed all survivors");
455 }
456 _next_survivor = NULL;
458 {
459 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
460 _scan_in_progress = false;
461 RootRegionScan_lock->notify_all();
462 }
463 }
465 bool CMRootRegions::wait_until_scan_finished() {
466 if (!scan_in_progress()) return false;
468 {
469 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
470 while (scan_in_progress()) {
471 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
472 }
473 }
474 return true;
475 }
477 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
478 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
479 #endif // _MSC_VER
481 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
482 return MAX2((n_par_threads + 2) / 4, 1U);
483 }
485 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
486 _g1h(g1h),
487 _markBitMap1(log2_intptr(MinObjAlignment)),
488 _markBitMap2(log2_intptr(MinObjAlignment)),
489 _parallel_marking_threads(0),
490 _max_parallel_marking_threads(0),
491 _sleep_factor(0.0),
492 _marking_task_overhead(1.0),
493 _cleanup_sleep_factor(0.0),
494 _cleanup_task_overhead(1.0),
495 _cleanup_list("Cleanup List"),
496 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
497 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
498 CardTableModRefBS::card_shift,
499 false /* in_resource_area*/),
501 _prevMarkBitMap(&_markBitMap1),
502 _nextMarkBitMap(&_markBitMap2),
504 _markStack(this),
505 // _finger set in set_non_marking_state
507 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
508 // _active_tasks set in set_non_marking_state
509 // _tasks set inside the constructor
510 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
511 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
513 _has_overflown(false),
514 _concurrent(false),
515 _has_aborted(false),
516 _aborted_gc_id(GCId::undefined()),
517 _restart_for_overflow(false),
518 _concurrent_marking_in_progress(false),
520 // _verbose_level set below
522 _init_times(),
523 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
524 _cleanup_times(),
525 _total_counting_time(0.0),
526 _total_rs_scrub_time(0.0),
528 _parallel_workers(NULL),
530 _count_card_bitmaps(NULL),
531 _count_marked_bytes(NULL),
532 _completed_initialization(false) {
533 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
534 if (verbose_level < no_verbose) {
535 verbose_level = no_verbose;
536 }
537 if (verbose_level > high_verbose) {
538 verbose_level = high_verbose;
539 }
540 _verbose_level = verbose_level;
542 if (verbose_low()) {
543 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
544 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
545 }
547 if (!_markBitMap1.allocate(heap_rs)) {
548 warning("Failed to allocate first CM bit map");
549 return;
550 }
551 if (!_markBitMap2.allocate(heap_rs)) {
552 warning("Failed to allocate second CM bit map");
553 return;
554 }
556 // Create & start a ConcurrentMark thread.
557 _cmThread = new ConcurrentMarkThread(this);
558 assert(cmThread() != NULL, "CM Thread should have been created");
559 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
560 if (_cmThread->osthread() == NULL) {
561 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
562 }
564 assert(CGC_lock != NULL, "Where's the CGC_lock?");
565 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
566 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
568 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
569 satb_qs.set_buffer_size(G1SATBBufferSize);
571 _root_regions.init(_g1h, this);
573 if (ConcGCThreads > ParallelGCThreads) {
574 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
575 "than ParallelGCThreads (" UINTX_FORMAT ").",
576 ConcGCThreads, ParallelGCThreads);
577 return;
578 }
579 if (ParallelGCThreads == 0) {
580 // if we are not running with any parallel GC threads we will not
581 // spawn any marking threads either
582 _parallel_marking_threads = 0;
583 _max_parallel_marking_threads = 0;
584 _sleep_factor = 0.0;
585 _marking_task_overhead = 1.0;
586 } else {
587 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
588 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
589 // if both are set
590 _sleep_factor = 0.0;
591 _marking_task_overhead = 1.0;
592 } else if (G1MarkingOverheadPercent > 0) {
593 // We will calculate the number of parallel marking threads based
594 // on a target overhead with respect to the soft real-time goal
595 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
596 double overall_cm_overhead =
597 (double) MaxGCPauseMillis * marking_overhead /
598 (double) GCPauseIntervalMillis;
599 double cpu_ratio = 1.0 / (double) os::processor_count();
600 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
601 double marking_task_overhead =
602 overall_cm_overhead / marking_thread_num *
603 (double) os::processor_count();
604 double sleep_factor =
605 (1.0 - marking_task_overhead) / marking_task_overhead;
607 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
608 _sleep_factor = sleep_factor;
609 _marking_task_overhead = marking_task_overhead;
610 } else {
611 // Calculate the number of parallel marking threads by scaling
612 // the number of parallel GC threads.
613 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
614 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
615 _sleep_factor = 0.0;
616 _marking_task_overhead = 1.0;
617 }
619 assert(ConcGCThreads > 0, "Should have been set");
620 _parallel_marking_threads = (uint) ConcGCThreads;
621 _max_parallel_marking_threads = _parallel_marking_threads;
623 if (parallel_marking_threads() > 1) {
624 _cleanup_task_overhead = 1.0;
625 } else {
626 _cleanup_task_overhead = marking_task_overhead();
627 }
628 _cleanup_sleep_factor =
629 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
631 #if 0
632 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
633 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
634 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
635 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
636 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
637 #endif
639 guarantee(parallel_marking_threads() > 0, "peace of mind");
640 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
641 _max_parallel_marking_threads, false, true);
642 if (_parallel_workers == NULL) {
643 vm_exit_during_initialization("Failed necessary allocation.");
644 } else {
645 _parallel_workers->initialize_workers();
646 }
647 }
649 if (FLAG_IS_DEFAULT(MarkStackSize)) {
650 uintx mark_stack_size =
651 MIN2(MarkStackSizeMax,
652 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
653 // Verify that the calculated value for MarkStackSize is in range.
654 // It would be nice to use the private utility routine from Arguments.
655 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
656 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
657 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
658 mark_stack_size, (uintx) 1, MarkStackSizeMax);
659 return;
660 }
661 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
662 } else {
663 // Verify MarkStackSize is in range.
664 if (FLAG_IS_CMDLINE(MarkStackSize)) {
665 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
666 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
667 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
668 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
669 MarkStackSize, (uintx) 1, MarkStackSizeMax);
670 return;
671 }
672 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
673 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
674 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
675 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
676 MarkStackSize, MarkStackSizeMax);
677 return;
678 }
679 }
680 }
681 }
683 if (!_markStack.allocate(MarkStackSize)) {
684 warning("Failed to allocate CM marking stack");
685 return;
686 }
688 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
689 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
691 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
692 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
694 BitMap::idx_t card_bm_size = _card_bm.size();
696 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
697 _active_tasks = _max_worker_id;
699 size_t max_regions = (size_t) _g1h->max_regions();
700 for (uint i = 0; i < _max_worker_id; ++i) {
701 CMTaskQueue* task_queue = new CMTaskQueue();
702 task_queue->initialize();
703 _task_queues->register_queue(i, task_queue);
705 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
706 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
708 _tasks[i] = new CMTask(i, this,
709 _count_marked_bytes[i],
710 &_count_card_bitmaps[i],
711 task_queue, _task_queues);
713 _accum_task_vtime[i] = 0.0;
714 }
716 // Calculate the card number for the bottom of the heap. Used
717 // in biasing indexes into the accounting card bitmaps.
718 _heap_bottom_card_num =
719 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
720 CardTableModRefBS::card_shift);
722 // Clear all the liveness counting data
723 clear_all_count_data();
725 // so that the call below can read a sensible value
726 _heap_start = (HeapWord*) heap_rs.base();
727 set_non_marking_state();
728 _completed_initialization = true;
729 }
731 void ConcurrentMark::update_g1_committed(bool force) {
732 // If concurrent marking is not in progress, then we do not need to
733 // update _heap_end.
734 if (!concurrent_marking_in_progress() && !force) return;
736 MemRegion committed = _g1h->g1_committed();
737 assert(committed.start() == _heap_start, "start shouldn't change");
738 HeapWord* new_end = committed.end();
739 if (new_end > _heap_end) {
740 // The heap has been expanded.
742 _heap_end = new_end;
743 }
744 // Notice that the heap can also shrink. However, this only happens
745 // during a Full GC (at least currently) and the entire marking
746 // phase will bail out and the task will not be restarted. So, let's
747 // do nothing.
748 }
750 void ConcurrentMark::reset() {
751 // Starting values for these two. This should be called in a STW
752 // phase. CM will be notified of any future g1_committed expansions
753 // will be at the end of evacuation pauses, when tasks are
754 // inactive.
755 MemRegion committed = _g1h->g1_committed();
756 _heap_start = committed.start();
757 _heap_end = committed.end();
759 // Separated the asserts so that we know which one fires.
760 assert(_heap_start != NULL, "heap bounds should look ok");
761 assert(_heap_end != NULL, "heap bounds should look ok");
762 assert(_heap_start < _heap_end, "heap bounds should look ok");
764 // Reset all the marking data structures and any necessary flags
765 reset_marking_state();
767 if (verbose_low()) {
768 gclog_or_tty->print_cr("[global] resetting");
769 }
771 // We do reset all of them, since different phases will use
772 // different number of active threads. So, it's easiest to have all
773 // of them ready.
774 for (uint i = 0; i < _max_worker_id; ++i) {
775 _tasks[i]->reset(_nextMarkBitMap);
776 }
778 // we need this to make sure that the flag is on during the evac
779 // pause with initial mark piggy-backed
780 set_concurrent_marking_in_progress();
781 }
784 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
785 _markStack.set_should_expand();
786 _markStack.setEmpty(); // Also clears the _markStack overflow flag
787 if (clear_overflow) {
788 clear_has_overflown();
789 } else {
790 assert(has_overflown(), "pre-condition");
791 }
792 _finger = _heap_start;
794 for (uint i = 0; i < _max_worker_id; ++i) {
795 CMTaskQueue* queue = _task_queues->queue(i);
796 queue->set_empty();
797 }
798 }
800 void ConcurrentMark::set_concurrency(uint active_tasks) {
801 assert(active_tasks <= _max_worker_id, "we should not have more");
803 _active_tasks = active_tasks;
804 // Need to update the three data structures below according to the
805 // number of active threads for this phase.
806 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
807 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
808 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
809 }
811 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
812 set_concurrency(active_tasks);
814 _concurrent = concurrent;
815 // We propagate this to all tasks, not just the active ones.
816 for (uint i = 0; i < _max_worker_id; ++i)
817 _tasks[i]->set_concurrent(concurrent);
819 if (concurrent) {
820 set_concurrent_marking_in_progress();
821 } else {
822 // We currently assume that the concurrent flag has been set to
823 // false before we start remark. At this point we should also be
824 // in a STW phase.
825 assert(!concurrent_marking_in_progress(), "invariant");
826 assert(out_of_regions(),
827 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
828 p2i(_finger), p2i(_heap_end)));
829 update_g1_committed(true);
830 }
831 }
833 void ConcurrentMark::set_non_marking_state() {
834 // We set the global marking state to some default values when we're
835 // not doing marking.
836 reset_marking_state();
837 _active_tasks = 0;
838 clear_concurrent_marking_in_progress();
839 }
841 ConcurrentMark::~ConcurrentMark() {
842 // The ConcurrentMark instance is never freed.
843 ShouldNotReachHere();
844 }
846 void ConcurrentMark::clearNextBitmap() {
847 G1CollectedHeap* g1h = G1CollectedHeap::heap();
848 G1CollectorPolicy* g1p = g1h->g1_policy();
850 // Make sure that the concurrent mark thread looks to still be in
851 // the current cycle.
852 guarantee(cmThread()->during_cycle(), "invariant");
854 // We are finishing up the current cycle by clearing the next
855 // marking bitmap and getting it ready for the next cycle. During
856 // this time no other cycle can start. So, let's make sure that this
857 // is the case.
858 guarantee(!g1h->mark_in_progress(), "invariant");
860 // clear the mark bitmap (no grey objects to start with).
861 // We need to do this in chunks and offer to yield in between
862 // each chunk.
863 HeapWord* start = _nextMarkBitMap->startWord();
864 HeapWord* end = _nextMarkBitMap->endWord();
865 HeapWord* cur = start;
866 size_t chunkSize = M;
867 while (cur < end) {
868 HeapWord* next = cur + chunkSize;
869 if (next > end) {
870 next = end;
871 }
872 MemRegion mr(cur,next);
873 _nextMarkBitMap->clearRange(mr);
874 cur = next;
875 do_yield_check();
877 // Repeat the asserts from above. We'll do them as asserts here to
878 // minimize their overhead on the product. However, we'll have
879 // them as guarantees at the beginning / end of the bitmap
880 // clearing to get some checking in the product.
881 assert(cmThread()->during_cycle(), "invariant");
882 assert(!g1h->mark_in_progress(), "invariant");
883 }
885 // Clear the liveness counting data
886 clear_all_count_data();
888 // Repeat the asserts from above.
889 guarantee(cmThread()->during_cycle(), "invariant");
890 guarantee(!g1h->mark_in_progress(), "invariant");
891 }
893 bool ConcurrentMark::nextMarkBitmapIsClear() {
894 return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
895 }
897 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
898 public:
899 bool doHeapRegion(HeapRegion* r) {
900 if (!r->continuesHumongous()) {
901 r->note_start_of_marking();
902 }
903 return false;
904 }
905 };
907 void ConcurrentMark::checkpointRootsInitialPre() {
908 G1CollectedHeap* g1h = G1CollectedHeap::heap();
909 G1CollectorPolicy* g1p = g1h->g1_policy();
911 _has_aborted = false;
913 #ifndef PRODUCT
914 if (G1PrintReachableAtInitialMark) {
915 print_reachable("at-cycle-start",
916 VerifyOption_G1UsePrevMarking, true /* all */);
917 }
918 #endif
920 // Initialise marking structures. This has to be done in a STW phase.
921 reset();
923 // For each region note start of marking.
924 NoteStartOfMarkHRClosure startcl;
925 g1h->heap_region_iterate(&startcl);
926 }
929 void ConcurrentMark::checkpointRootsInitialPost() {
930 G1CollectedHeap* g1h = G1CollectedHeap::heap();
932 // If we force an overflow during remark, the remark operation will
933 // actually abort and we'll restart concurrent marking. If we always
934 // force an oveflow during remark we'll never actually complete the
935 // marking phase. So, we initilize this here, at the start of the
936 // cycle, so that at the remaining overflow number will decrease at
937 // every remark and we'll eventually not need to cause one.
938 force_overflow_stw()->init();
940 // Start Concurrent Marking weak-reference discovery.
941 ReferenceProcessor* rp = g1h->ref_processor_cm();
942 // enable ("weak") refs discovery
943 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
944 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
946 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
947 // This is the start of the marking cycle, we're expected all
948 // threads to have SATB queues with active set to false.
949 satb_mq_set.set_active_all_threads(true, /* new active value */
950 false /* expected_active */);
952 _root_regions.prepare_for_scan();
954 // update_g1_committed() will be called at the end of an evac pause
955 // when marking is on. So, it's also called at the end of the
956 // initial-mark pause to update the heap end, if the heap expands
957 // during it. No need to call it here.
958 }
960 /*
961 * Notice that in the next two methods, we actually leave the STS
962 * during the barrier sync and join it immediately afterwards. If we
963 * do not do this, the following deadlock can occur: one thread could
964 * be in the barrier sync code, waiting for the other thread to also
965 * sync up, whereas another one could be trying to yield, while also
966 * waiting for the other threads to sync up too.
967 *
968 * Note, however, that this code is also used during remark and in
969 * this case we should not attempt to leave / enter the STS, otherwise
970 * we'll either hit an asseert (debug / fastdebug) or deadlock
971 * (product). So we should only leave / enter the STS if we are
972 * operating concurrently.
973 *
974 * Because the thread that does the sync barrier has left the STS, it
975 * is possible to be suspended for a Full GC or an evacuation pause
976 * could occur. This is actually safe, since the entering the sync
977 * barrier is one of the last things do_marking_step() does, and it
978 * doesn't manipulate any data structures afterwards.
979 */
981 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
982 if (verbose_low()) {
983 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
984 }
986 if (concurrent()) {
987 SuspendibleThreadSet::leave();
988 }
990 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
992 if (concurrent()) {
993 SuspendibleThreadSet::join();
994 }
995 // at this point everyone should have synced up and not be doing any
996 // more work
998 if (verbose_low()) {
999 if (barrier_aborted) {
1000 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1001 } else {
1002 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1003 }
1004 }
1006 if (barrier_aborted) {
1007 // If the barrier aborted we ignore the overflow condition and
1008 // just abort the whole marking phase as quickly as possible.
1009 return;
1010 }
1012 // If we're executing the concurrent phase of marking, reset the marking
1013 // state; otherwise the marking state is reset after reference processing,
1014 // during the remark pause.
1015 // If we reset here as a result of an overflow during the remark we will
1016 // see assertion failures from any subsequent set_concurrency_and_phase()
1017 // calls.
1018 if (concurrent()) {
1019 // let the task associated with with worker 0 do this
1020 if (worker_id == 0) {
1021 // task 0 is responsible for clearing the global data structures
1022 // We should be here because of an overflow. During STW we should
1023 // not clear the overflow flag since we rely on it being true when
1024 // we exit this method to abort the pause and restart concurent
1025 // marking.
1026 reset_marking_state(true /* clear_overflow */);
1027 force_overflow()->update();
1029 if (G1Log::fine()) {
1030 gclog_or_tty->gclog_stamp(concurrent_gc_id());
1031 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1032 }
1033 }
1034 }
1036 // after this, each task should reset its own data structures then
1037 // then go into the second barrier
1038 }
1040 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1041 if (verbose_low()) {
1042 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1043 }
1045 if (concurrent()) {
1046 SuspendibleThreadSet::leave();
1047 }
1049 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1051 if (concurrent()) {
1052 SuspendibleThreadSet::join();
1053 }
1054 // at this point everything should be re-initialized and ready to go
1056 if (verbose_low()) {
1057 if (barrier_aborted) {
1058 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1059 } else {
1060 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1061 }
1062 }
1063 }
1065 #ifndef PRODUCT
1066 void ForceOverflowSettings::init() {
1067 _num_remaining = G1ConcMarkForceOverflow;
1068 _force = false;
1069 update();
1070 }
1072 void ForceOverflowSettings::update() {
1073 if (_num_remaining > 0) {
1074 _num_remaining -= 1;
1075 _force = true;
1076 } else {
1077 _force = false;
1078 }
1079 }
1081 bool ForceOverflowSettings::should_force() {
1082 if (_force) {
1083 _force = false;
1084 return true;
1085 } else {
1086 return false;
1087 }
1088 }
1089 #endif // !PRODUCT
1091 class CMConcurrentMarkingTask: public AbstractGangTask {
1092 private:
1093 ConcurrentMark* _cm;
1094 ConcurrentMarkThread* _cmt;
1096 public:
1097 void work(uint worker_id) {
1098 assert(Thread::current()->is_ConcurrentGC_thread(),
1099 "this should only be done by a conc GC thread");
1100 ResourceMark rm;
1102 double start_vtime = os::elapsedVTime();
1104 SuspendibleThreadSet::join();
1106 assert(worker_id < _cm->active_tasks(), "invariant");
1107 CMTask* the_task = _cm->task(worker_id);
1108 the_task->record_start_time();
1109 if (!_cm->has_aborted()) {
1110 do {
1111 double start_vtime_sec = os::elapsedVTime();
1112 double start_time_sec = os::elapsedTime();
1113 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1115 the_task->do_marking_step(mark_step_duration_ms,
1116 true /* do_termination */,
1117 false /* is_serial*/);
1119 double end_time_sec = os::elapsedTime();
1120 double end_vtime_sec = os::elapsedVTime();
1121 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1122 double elapsed_time_sec = end_time_sec - start_time_sec;
1123 _cm->clear_has_overflown();
1125 bool ret = _cm->do_yield_check(worker_id);
1127 jlong sleep_time_ms;
1128 if (!_cm->has_aborted() && the_task->has_aborted()) {
1129 sleep_time_ms =
1130 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1131 SuspendibleThreadSet::leave();
1132 os::sleep(Thread::current(), sleep_time_ms, false);
1133 SuspendibleThreadSet::join();
1134 }
1135 double end_time2_sec = os::elapsedTime();
1136 double elapsed_time2_sec = end_time2_sec - start_time_sec;
1138 #if 0
1139 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1140 "overhead %1.4lf",
1141 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1142 the_task->conc_overhead(os::elapsedTime()) * 8.0);
1143 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1144 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1145 #endif
1146 } while (!_cm->has_aborted() && the_task->has_aborted());
1147 }
1148 the_task->record_end_time();
1149 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1151 SuspendibleThreadSet::leave();
1153 double end_vtime = os::elapsedVTime();
1154 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1155 }
1157 CMConcurrentMarkingTask(ConcurrentMark* cm,
1158 ConcurrentMarkThread* cmt) :
1159 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1161 ~CMConcurrentMarkingTask() { }
1162 };
1164 // Calculates the number of active workers for a concurrent
1165 // phase.
1166 uint ConcurrentMark::calc_parallel_marking_threads() {
1167 if (G1CollectedHeap::use_parallel_gc_threads()) {
1168 uint n_conc_workers = 0;
1169 if (!UseDynamicNumberOfGCThreads ||
1170 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1171 !ForceDynamicNumberOfGCThreads)) {
1172 n_conc_workers = max_parallel_marking_threads();
1173 } else {
1174 n_conc_workers =
1175 AdaptiveSizePolicy::calc_default_active_workers(
1176 max_parallel_marking_threads(),
1177 1, /* Minimum workers */
1178 parallel_marking_threads(),
1179 Threads::number_of_non_daemon_threads());
1180 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
1181 // that scaling has already gone into "_max_parallel_marking_threads".
1182 }
1183 assert(n_conc_workers > 0, "Always need at least 1");
1184 return n_conc_workers;
1185 }
1186 // If we are not running with any parallel GC threads we will not
1187 // have spawned any marking threads either. Hence the number of
1188 // concurrent workers should be 0.
1189 return 0;
1190 }
1192 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
1193 // Currently, only survivors can be root regions.
1194 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
1195 G1RootRegionScanClosure cl(_g1h, this, worker_id);
1197 const uintx interval = PrefetchScanIntervalInBytes;
1198 HeapWord* curr = hr->bottom();
1199 const HeapWord* end = hr->top();
1200 while (curr < end) {
1201 Prefetch::read(curr, interval);
1202 oop obj = oop(curr);
1203 int size = obj->oop_iterate(&cl);
1204 assert(size == obj->size(), "sanity");
1205 curr += size;
1206 }
1207 }
1209 class CMRootRegionScanTask : public AbstractGangTask {
1210 private:
1211 ConcurrentMark* _cm;
1213 public:
1214 CMRootRegionScanTask(ConcurrentMark* cm) :
1215 AbstractGangTask("Root Region Scan"), _cm(cm) { }
1217 void work(uint worker_id) {
1218 assert(Thread::current()->is_ConcurrentGC_thread(),
1219 "this should only be done by a conc GC thread");
1221 CMRootRegions* root_regions = _cm->root_regions();
1222 HeapRegion* hr = root_regions->claim_next();
1223 while (hr != NULL) {
1224 _cm->scanRootRegion(hr, worker_id);
1225 hr = root_regions->claim_next();
1226 }
1227 }
1228 };
1230 void ConcurrentMark::scanRootRegions() {
1231 // Start of concurrent marking.
1232 ClassLoaderDataGraph::clear_claimed_marks();
1234 // scan_in_progress() will have been set to true only if there was
1235 // at least one root region to scan. So, if it's false, we
1236 // should not attempt to do any further work.
1237 if (root_regions()->scan_in_progress()) {
1238 _parallel_marking_threads = calc_parallel_marking_threads();
1239 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1240 "Maximum number of marking threads exceeded");
1241 uint active_workers = MAX2(1U, parallel_marking_threads());
1243 CMRootRegionScanTask task(this);
1244 if (use_parallel_marking_threads()) {
1245 _parallel_workers->set_active_workers((int) active_workers);
1246 _parallel_workers->run_task(&task);
1247 } else {
1248 task.work(0);
1249 }
1251 // It's possible that has_aborted() is true here without actually
1252 // aborting the survivor scan earlier. This is OK as it's
1253 // mainly used for sanity checking.
1254 root_regions()->scan_finished();
1255 }
1256 }
1258 void ConcurrentMark::markFromRoots() {
1259 // we might be tempted to assert that:
1260 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1261 // "inconsistent argument?");
1262 // However that wouldn't be right, because it's possible that
1263 // a safepoint is indeed in progress as a younger generation
1264 // stop-the-world GC happens even as we mark in this generation.
1266 _restart_for_overflow = false;
1267 force_overflow_conc()->init();
1269 // _g1h has _n_par_threads
1270 _parallel_marking_threads = calc_parallel_marking_threads();
1271 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1272 "Maximum number of marking threads exceeded");
1274 uint active_workers = MAX2(1U, parallel_marking_threads());
1276 // Parallel task terminator is set in "set_concurrency_and_phase()"
1277 set_concurrency_and_phase(active_workers, true /* concurrent */);
1279 CMConcurrentMarkingTask markingTask(this, cmThread());
1280 if (use_parallel_marking_threads()) {
1281 _parallel_workers->set_active_workers((int)active_workers);
1282 // Don't set _n_par_threads because it affects MT in process_roots()
1283 // and the decisions on that MT processing is made elsewhere.
1284 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1285 _parallel_workers->run_task(&markingTask);
1286 } else {
1287 markingTask.work(0);
1288 }
1289 print_stats();
1290 }
1292 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1293 // world is stopped at this checkpoint
1294 assert(SafepointSynchronize::is_at_safepoint(),
1295 "world should be stopped");
1297 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1299 // If a full collection has happened, we shouldn't do this.
1300 if (has_aborted()) {
1301 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1302 return;
1303 }
1305 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1307 if (VerifyDuringGC) {
1308 HandleMark hm; // handle scope
1309 Universe::heap()->prepare_for_verify();
1310 Universe::verify(VerifyOption_G1UsePrevMarking,
1311 " VerifyDuringGC:(before)");
1312 }
1313 g1h->check_bitmaps("Remark Start");
1315 G1CollectorPolicy* g1p = g1h->g1_policy();
1316 g1p->record_concurrent_mark_remark_start();
1318 double start = os::elapsedTime();
1320 checkpointRootsFinalWork();
1322 double mark_work_end = os::elapsedTime();
1324 weakRefsWork(clear_all_soft_refs);
1326 if (has_overflown()) {
1327 // Oops. We overflowed. Restart concurrent marking.
1328 _restart_for_overflow = true;
1329 if (G1TraceMarkStackOverflow) {
1330 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1331 }
1333 // Verify the heap w.r.t. the previous marking bitmap.
1334 if (VerifyDuringGC) {
1335 HandleMark hm; // handle scope
1336 Universe::heap()->prepare_for_verify();
1337 Universe::verify(VerifyOption_G1UsePrevMarking,
1338 " VerifyDuringGC:(overflow)");
1339 }
1341 // Clear the marking state because we will be restarting
1342 // marking due to overflowing the global mark stack.
1343 reset_marking_state();
1344 } else {
1345 // Aggregate the per-task counting data that we have accumulated
1346 // while marking.
1347 aggregate_count_data();
1349 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1350 // We're done with marking.
1351 // This is the end of the marking cycle, we're expected all
1352 // threads to have SATB queues with active set to true.
1353 satb_mq_set.set_active_all_threads(false, /* new active value */
1354 true /* expected_active */);
1356 if (VerifyDuringGC) {
1357 HandleMark hm; // handle scope
1358 Universe::heap()->prepare_for_verify();
1359 Universe::verify(VerifyOption_G1UseNextMarking,
1360 " VerifyDuringGC:(after)");
1361 }
1362 g1h->check_bitmaps("Remark End");
1363 assert(!restart_for_overflow(), "sanity");
1364 // Completely reset the marking state since marking completed
1365 set_non_marking_state();
1366 }
1368 // Expand the marking stack, if we have to and if we can.
1369 if (_markStack.should_expand()) {
1370 _markStack.expand();
1371 }
1373 // Statistics
1374 double now = os::elapsedTime();
1375 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1376 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1377 _remark_times.add((now - start) * 1000.0);
1379 g1p->record_concurrent_mark_remark_end();
1381 G1CMIsAliveClosure is_alive(g1h);
1382 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
1383 }
1385 // Base class of the closures that finalize and verify the
1386 // liveness counting data.
1387 class CMCountDataClosureBase: public HeapRegionClosure {
1388 protected:
1389 G1CollectedHeap* _g1h;
1390 ConcurrentMark* _cm;
1391 CardTableModRefBS* _ct_bs;
1393 BitMap* _region_bm;
1394 BitMap* _card_bm;
1396 // Takes a region that's not empty (i.e., it has at least one
1397 // live object in it and sets its corresponding bit on the region
1398 // bitmap to 1. If the region is "starts humongous" it will also set
1399 // to 1 the bits on the region bitmap that correspond to its
1400 // associated "continues humongous" regions.
1401 void set_bit_for_region(HeapRegion* hr) {
1402 assert(!hr->continuesHumongous(), "should have filtered those out");
1404 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1405 if (!hr->startsHumongous()) {
1406 // Normal (non-humongous) case: just set the bit.
1407 _region_bm->par_at_put(index, true);
1408 } else {
1409 // Starts humongous case: calculate how many regions are part of
1410 // this humongous region and then set the bit range.
1411 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
1412 _region_bm->par_at_put_range(index, end_index, true);
1413 }
1414 }
1416 public:
1417 CMCountDataClosureBase(G1CollectedHeap* g1h,
1418 BitMap* region_bm, BitMap* card_bm):
1419 _g1h(g1h), _cm(g1h->concurrent_mark()),
1420 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
1421 _region_bm(region_bm), _card_bm(card_bm) { }
1422 };
1424 // Closure that calculates the # live objects per region. Used
1425 // for verification purposes during the cleanup pause.
1426 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
1427 CMBitMapRO* _bm;
1428 size_t _region_marked_bytes;
1430 public:
1431 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
1432 BitMap* region_bm, BitMap* card_bm) :
1433 CMCountDataClosureBase(g1h, region_bm, card_bm),
1434 _bm(bm), _region_marked_bytes(0) { }
1436 bool doHeapRegion(HeapRegion* hr) {
1438 if (hr->continuesHumongous()) {
1439 // We will ignore these here and process them when their
1440 // associated "starts humongous" region is processed (see
1441 // set_bit_for_heap_region()). Note that we cannot rely on their
1442 // associated "starts humongous" region to have their bit set to
1443 // 1 since, due to the region chunking in the parallel region
1444 // iteration, a "continues humongous" region might be visited
1445 // before its associated "starts humongous".
1446 return false;
1447 }
1449 HeapWord* ntams = hr->next_top_at_mark_start();
1450 HeapWord* start = hr->bottom();
1452 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
1453 err_msg("Preconditions not met - "
1454 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
1455 p2i(start), p2i(ntams), p2i(hr->end())));
1457 // Find the first marked object at or after "start".
1458 start = _bm->getNextMarkedWordAddress(start, ntams);
1460 size_t marked_bytes = 0;
1462 while (start < ntams) {
1463 oop obj = oop(start);
1464 int obj_sz = obj->size();
1465 HeapWord* obj_end = start + obj_sz;
1467 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
1468 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
1470 // Note: if we're looking at the last region in heap - obj_end
1471 // could be actually just beyond the end of the heap; end_idx
1472 // will then correspond to a (non-existent) card that is also
1473 // just beyond the heap.
1474 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
1475 // end of object is not card aligned - increment to cover
1476 // all the cards spanned by the object
1477 end_idx += 1;
1478 }
1480 // Set the bits in the card BM for the cards spanned by this object.
1481 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1483 // Add the size of this object to the number of marked bytes.
1484 marked_bytes += (size_t)obj_sz * HeapWordSize;
1486 // Find the next marked object after this one.
1487 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
1488 }
1490 // Mark the allocated-since-marking portion...
1491 HeapWord* top = hr->top();
1492 if (ntams < top) {
1493 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1494 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1496 // Note: if we're looking at the last region in heap - top
1497 // could be actually just beyond the end of the heap; end_idx
1498 // will then correspond to a (non-existent) card that is also
1499 // just beyond the heap.
1500 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1501 // end of object is not card aligned - increment to cover
1502 // all the cards spanned by the object
1503 end_idx += 1;
1504 }
1505 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1507 // This definitely means the region has live objects.
1508 set_bit_for_region(hr);
1509 }
1511 // Update the live region bitmap.
1512 if (marked_bytes > 0) {
1513 set_bit_for_region(hr);
1514 }
1516 // Set the marked bytes for the current region so that
1517 // it can be queried by a calling verificiation routine
1518 _region_marked_bytes = marked_bytes;
1520 return false;
1521 }
1523 size_t region_marked_bytes() const { return _region_marked_bytes; }
1524 };
1526 // Heap region closure used for verifying the counting data
1527 // that was accumulated concurrently and aggregated during
1528 // the remark pause. This closure is applied to the heap
1529 // regions during the STW cleanup pause.
1531 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
1532 G1CollectedHeap* _g1h;
1533 ConcurrentMark* _cm;
1534 CalcLiveObjectsClosure _calc_cl;
1535 BitMap* _region_bm; // Region BM to be verified
1536 BitMap* _card_bm; // Card BM to be verified
1537 bool _verbose; // verbose output?
1539 BitMap* _exp_region_bm; // Expected Region BM values
1540 BitMap* _exp_card_bm; // Expected card BM values
1542 int _failures;
1544 public:
1545 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
1546 BitMap* region_bm,
1547 BitMap* card_bm,
1548 BitMap* exp_region_bm,
1549 BitMap* exp_card_bm,
1550 bool verbose) :
1551 _g1h(g1h), _cm(g1h->concurrent_mark()),
1552 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
1553 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
1554 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
1555 _failures(0) { }
1557 int failures() const { return _failures; }
1559 bool doHeapRegion(HeapRegion* hr) {
1560 if (hr->continuesHumongous()) {
1561 // We will ignore these here and process them when their
1562 // associated "starts humongous" region is processed (see
1563 // set_bit_for_heap_region()). Note that we cannot rely on their
1564 // associated "starts humongous" region to have their bit set to
1565 // 1 since, due to the region chunking in the parallel region
1566 // iteration, a "continues humongous" region might be visited
1567 // before its associated "starts humongous".
1568 return false;
1569 }
1571 int failures = 0;
1573 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
1574 // this region and set the corresponding bits in the expected region
1575 // and card bitmaps.
1576 bool res = _calc_cl.doHeapRegion(hr);
1577 assert(res == false, "should be continuing");
1579 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
1580 Mutex::_no_safepoint_check_flag);
1582 // Verify the marked bytes for this region.
1583 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
1584 size_t act_marked_bytes = hr->next_marked_bytes();
1586 // We're not OK if expected marked bytes > actual marked bytes. It means
1587 // we have missed accounting some objects during the actual marking.
1588 if (exp_marked_bytes > act_marked_bytes) {
1589 if (_verbose) {
1590 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
1591 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
1592 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
1593 }
1594 failures += 1;
1595 }
1597 // Verify the bit, for this region, in the actual and expected
1598 // (which was just calculated) region bit maps.
1599 // We're not OK if the bit in the calculated expected region
1600 // bitmap is set and the bit in the actual region bitmap is not.
1601 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
1603 bool expected = _exp_region_bm->at(index);
1604 bool actual = _region_bm->at(index);
1605 if (expected && !actual) {
1606 if (_verbose) {
1607 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
1608 "expected: %s, actual: %s",
1609 hr->hrs_index(),
1610 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1611 }
1612 failures += 1;
1613 }
1615 // Verify that the card bit maps for the cards spanned by the current
1616 // region match. We have an error if we have a set bit in the expected
1617 // bit map and the corresponding bit in the actual bitmap is not set.
1619 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
1620 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
1622 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
1623 expected = _exp_card_bm->at(i);
1624 actual = _card_bm->at(i);
1626 if (expected && !actual) {
1627 if (_verbose) {
1628 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
1629 "expected: %s, actual: %s",
1630 hr->hrs_index(), i,
1631 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
1632 }
1633 failures += 1;
1634 }
1635 }
1637 if (failures > 0 && _verbose) {
1638 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
1639 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
1640 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
1641 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
1642 }
1644 _failures += failures;
1646 // We could stop iteration over the heap when we
1647 // find the first violating region by returning true.
1648 return false;
1649 }
1650 };
1652 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1653 protected:
1654 G1CollectedHeap* _g1h;
1655 ConcurrentMark* _cm;
1656 BitMap* _actual_region_bm;
1657 BitMap* _actual_card_bm;
1659 uint _n_workers;
1661 BitMap* _expected_region_bm;
1662 BitMap* _expected_card_bm;
1664 int _failures;
1665 bool _verbose;
1667 public:
1668 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1669 BitMap* region_bm, BitMap* card_bm,
1670 BitMap* expected_region_bm, BitMap* expected_card_bm)
1671 : AbstractGangTask("G1 verify final counting"),
1672 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1673 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1674 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1675 _failures(0), _verbose(false),
1676 _n_workers(0) {
1677 assert(VerifyDuringGC, "don't call this otherwise");
1679 // Use the value already set as the number of active threads
1680 // in the call to run_task().
1681 if (G1CollectedHeap::use_parallel_gc_threads()) {
1682 assert( _g1h->workers()->active_workers() > 0,
1683 "Should have been previously set");
1684 _n_workers = _g1h->workers()->active_workers();
1685 } else {
1686 _n_workers = 1;
1687 }
1689 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1690 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1692 _verbose = _cm->verbose_medium();
1693 }
1695 void work(uint worker_id) {
1696 assert(worker_id < _n_workers, "invariant");
1698 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1699 _actual_region_bm, _actual_card_bm,
1700 _expected_region_bm,
1701 _expected_card_bm,
1702 _verbose);
1704 if (G1CollectedHeap::use_parallel_gc_threads()) {
1705 _g1h->heap_region_par_iterate_chunked(&verify_cl,
1706 worker_id,
1707 _n_workers,
1708 HeapRegion::VerifyCountClaimValue);
1709 } else {
1710 _g1h->heap_region_iterate(&verify_cl);
1711 }
1713 Atomic::add(verify_cl.failures(), &_failures);
1714 }
1716 int failures() const { return _failures; }
1717 };
1719 // Closure that finalizes the liveness counting data.
1720 // Used during the cleanup pause.
1721 // Sets the bits corresponding to the interval [NTAMS, top]
1722 // (which contains the implicitly live objects) in the
1723 // card liveness bitmap. Also sets the bit for each region,
1724 // containing live data, in the region liveness bitmap.
1726 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1727 public:
1728 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
1729 BitMap* region_bm,
1730 BitMap* card_bm) :
1731 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
1733 bool doHeapRegion(HeapRegion* hr) {
1735 if (hr->continuesHumongous()) {
1736 // We will ignore these here and process them when their
1737 // associated "starts humongous" region is processed (see
1738 // set_bit_for_heap_region()). Note that we cannot rely on their
1739 // associated "starts humongous" region to have their bit set to
1740 // 1 since, due to the region chunking in the parallel region
1741 // iteration, a "continues humongous" region might be visited
1742 // before its associated "starts humongous".
1743 return false;
1744 }
1746 HeapWord* ntams = hr->next_top_at_mark_start();
1747 HeapWord* top = hr->top();
1749 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
1751 // Mark the allocated-since-marking portion...
1752 if (ntams < top) {
1753 // This definitely means the region has live objects.
1754 set_bit_for_region(hr);
1756 // Now set the bits in the card bitmap for [ntams, top)
1757 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
1758 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
1760 // Note: if we're looking at the last region in heap - top
1761 // could be actually just beyond the end of the heap; end_idx
1762 // will then correspond to a (non-existent) card that is also
1763 // just beyond the heap.
1764 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
1765 // end of object is not card aligned - increment to cover
1766 // all the cards spanned by the object
1767 end_idx += 1;
1768 }
1770 assert(end_idx <= _card_bm->size(),
1771 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1772 end_idx, _card_bm->size()));
1773 assert(start_idx < _card_bm->size(),
1774 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
1775 start_idx, _card_bm->size()));
1777 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1778 }
1780 // Set the bit for the region if it contains live data
1781 if (hr->next_marked_bytes() > 0) {
1782 set_bit_for_region(hr);
1783 }
1785 return false;
1786 }
1787 };
1789 class G1ParFinalCountTask: public AbstractGangTask {
1790 protected:
1791 G1CollectedHeap* _g1h;
1792 ConcurrentMark* _cm;
1793 BitMap* _actual_region_bm;
1794 BitMap* _actual_card_bm;
1796 uint _n_workers;
1798 public:
1799 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1800 : AbstractGangTask("G1 final counting"),
1801 _g1h(g1h), _cm(_g1h->concurrent_mark()),
1802 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1803 _n_workers(0) {
1804 // Use the value already set as the number of active threads
1805 // in the call to run_task().
1806 if (G1CollectedHeap::use_parallel_gc_threads()) {
1807 assert( _g1h->workers()->active_workers() > 0,
1808 "Should have been previously set");
1809 _n_workers = _g1h->workers()->active_workers();
1810 } else {
1811 _n_workers = 1;
1812 }
1813 }
1815 void work(uint worker_id) {
1816 assert(worker_id < _n_workers, "invariant");
1818 FinalCountDataUpdateClosure final_update_cl(_g1h,
1819 _actual_region_bm,
1820 _actual_card_bm);
1822 if (G1CollectedHeap::use_parallel_gc_threads()) {
1823 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1824 worker_id,
1825 _n_workers,
1826 HeapRegion::FinalCountClaimValue);
1827 } else {
1828 _g1h->heap_region_iterate(&final_update_cl);
1829 }
1830 }
1831 };
1833 class G1ParNoteEndTask;
1835 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1836 G1CollectedHeap* _g1;
1837 size_t _max_live_bytes;
1838 uint _regions_claimed;
1839 size_t _freed_bytes;
1840 FreeRegionList* _local_cleanup_list;
1841 HeapRegionSetCount _old_regions_removed;
1842 HeapRegionSetCount _humongous_regions_removed;
1843 HRRSCleanupTask* _hrrs_cleanup_task;
1844 double _claimed_region_time;
1845 double _max_region_time;
1847 public:
1848 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1849 FreeRegionList* local_cleanup_list,
1850 HRRSCleanupTask* hrrs_cleanup_task) :
1851 _g1(g1),
1852 _max_live_bytes(0), _regions_claimed(0),
1853 _freed_bytes(0),
1854 _claimed_region_time(0.0), _max_region_time(0.0),
1855 _local_cleanup_list(local_cleanup_list),
1856 _old_regions_removed(),
1857 _humongous_regions_removed(),
1858 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1860 size_t freed_bytes() { return _freed_bytes; }
1861 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
1862 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
1864 bool doHeapRegion(HeapRegion *hr) {
1865 if (hr->continuesHumongous()) {
1866 return false;
1867 }
1868 // We use a claim value of zero here because all regions
1869 // were claimed with value 1 in the FinalCount task.
1870 _g1->reset_gc_time_stamps(hr);
1871 double start = os::elapsedTime();
1872 _regions_claimed++;
1873 hr->note_end_of_marking();
1874 _max_live_bytes += hr->max_live_bytes();
1876 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1877 _freed_bytes += hr->used();
1878 hr->set_containing_set(NULL);
1879 if (hr->isHumongous()) {
1880 assert(hr->startsHumongous(), "we should only see starts humongous");
1881 _humongous_regions_removed.increment(1u, hr->capacity());
1882 _g1->free_humongous_region(hr, _local_cleanup_list, true);
1883 } else {
1884 _old_regions_removed.increment(1u, hr->capacity());
1885 _g1->free_region(hr, _local_cleanup_list, true);
1886 }
1887 } else {
1888 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1889 }
1891 double region_time = (os::elapsedTime() - start);
1892 _claimed_region_time += region_time;
1893 if (region_time > _max_region_time) {
1894 _max_region_time = region_time;
1895 }
1896 return false;
1897 }
1899 size_t max_live_bytes() { return _max_live_bytes; }
1900 uint regions_claimed() { return _regions_claimed; }
1901 double claimed_region_time_sec() { return _claimed_region_time; }
1902 double max_region_time_sec() { return _max_region_time; }
1903 };
1905 class G1ParNoteEndTask: public AbstractGangTask {
1906 friend class G1NoteEndOfConcMarkClosure;
1908 protected:
1909 G1CollectedHeap* _g1h;
1910 size_t _max_live_bytes;
1911 size_t _freed_bytes;
1912 FreeRegionList* _cleanup_list;
1914 public:
1915 G1ParNoteEndTask(G1CollectedHeap* g1h,
1916 FreeRegionList* cleanup_list) :
1917 AbstractGangTask("G1 note end"), _g1h(g1h),
1918 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
1920 void work(uint worker_id) {
1921 double start = os::elapsedTime();
1922 FreeRegionList local_cleanup_list("Local Cleanup List");
1923 HRRSCleanupTask hrrs_cleanup_task;
1924 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1925 &hrrs_cleanup_task);
1926 if (G1CollectedHeap::use_parallel_gc_threads()) {
1927 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1928 _g1h->workers()->active_workers(),
1929 HeapRegion::NoteEndClaimValue);
1930 } else {
1931 _g1h->heap_region_iterate(&g1_note_end);
1932 }
1933 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1935 // Now update the lists
1936 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1937 {
1938 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1939 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1940 _max_live_bytes += g1_note_end.max_live_bytes();
1941 _freed_bytes += g1_note_end.freed_bytes();
1943 // If we iterate over the global cleanup list at the end of
1944 // cleanup to do this printing we will not guarantee to only
1945 // generate output for the newly-reclaimed regions (the list
1946 // might not be empty at the beginning of cleanup; we might
1947 // still be working on its previous contents). So we do the
1948 // printing here, before we append the new regions to the global
1949 // cleanup list.
1951 G1HRPrinter* hr_printer = _g1h->hr_printer();
1952 if (hr_printer->is_active()) {
1953 FreeRegionListIterator iter(&local_cleanup_list);
1954 while (iter.more_available()) {
1955 HeapRegion* hr = iter.get_next();
1956 hr_printer->cleanup(hr);
1957 }
1958 }
1960 _cleanup_list->add_ordered(&local_cleanup_list);
1961 assert(local_cleanup_list.is_empty(), "post-condition");
1963 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1964 }
1965 }
1966 size_t max_live_bytes() { return _max_live_bytes; }
1967 size_t freed_bytes() { return _freed_bytes; }
1968 };
1970 class G1ParScrubRemSetTask: public AbstractGangTask {
1971 protected:
1972 G1RemSet* _g1rs;
1973 BitMap* _region_bm;
1974 BitMap* _card_bm;
1975 public:
1976 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1977 BitMap* region_bm, BitMap* card_bm) :
1978 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1979 _region_bm(region_bm), _card_bm(card_bm) { }
1981 void work(uint worker_id) {
1982 if (G1CollectedHeap::use_parallel_gc_threads()) {
1983 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1984 HeapRegion::ScrubRemSetClaimValue);
1985 } else {
1986 _g1rs->scrub(_region_bm, _card_bm);
1987 }
1988 }
1990 };
1992 void ConcurrentMark::cleanup() {
1993 // world is stopped at this checkpoint
1994 assert(SafepointSynchronize::is_at_safepoint(),
1995 "world should be stopped");
1996 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1998 // If a full collection has happened, we shouldn't do this.
1999 if (has_aborted()) {
2000 g1h->set_marking_complete(); // So bitmap clearing isn't confused
2001 return;
2002 }
2004 g1h->verify_region_sets_optional();
2006 if (VerifyDuringGC) {
2007 HandleMark hm; // handle scope
2008 Universe::heap()->prepare_for_verify();
2009 Universe::verify(VerifyOption_G1UsePrevMarking,
2010 " VerifyDuringGC:(before)");
2011 }
2012 g1h->check_bitmaps("Cleanup Start");
2014 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2015 g1p->record_concurrent_mark_cleanup_start();
2017 double start = os::elapsedTime();
2019 HeapRegionRemSet::reset_for_cleanup_tasks();
2021 uint n_workers;
2023 // Do counting once more with the world stopped for good measure.
2024 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2026 if (G1CollectedHeap::use_parallel_gc_threads()) {
2027 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2028 "sanity check");
2030 g1h->set_par_threads();
2031 n_workers = g1h->n_par_threads();
2032 assert(g1h->n_par_threads() == n_workers,
2033 "Should not have been reset");
2034 g1h->workers()->run_task(&g1_par_count_task);
2035 // Done with the parallel phase so reset to 0.
2036 g1h->set_par_threads(0);
2038 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2039 "sanity check");
2040 } else {
2041 n_workers = 1;
2042 g1_par_count_task.work(0);
2043 }
2045 if (VerifyDuringGC) {
2046 // Verify that the counting data accumulated during marking matches
2047 // that calculated by walking the marking bitmap.
2049 // Bitmaps to hold expected values
2050 BitMap expected_region_bm(_region_bm.size(), true);
2051 BitMap expected_card_bm(_card_bm.size(), true);
2053 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2054 &_region_bm,
2055 &_card_bm,
2056 &expected_region_bm,
2057 &expected_card_bm);
2059 if (G1CollectedHeap::use_parallel_gc_threads()) {
2060 g1h->set_par_threads((int)n_workers);
2061 g1h->workers()->run_task(&g1_par_verify_task);
2062 // Done with the parallel phase so reset to 0.
2063 g1h->set_par_threads(0);
2065 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2066 "sanity check");
2067 } else {
2068 g1_par_verify_task.work(0);
2069 }
2071 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2072 }
2074 size_t start_used_bytes = g1h->used();
2075 g1h->set_marking_complete();
2077 double count_end = os::elapsedTime();
2078 double this_final_counting_time = (count_end - start);
2079 _total_counting_time += this_final_counting_time;
2081 if (G1PrintRegionLivenessInfo) {
2082 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2083 _g1h->heap_region_iterate(&cl);
2084 }
2086 // Install newly created mark bitMap as "prev".
2087 swapMarkBitMaps();
2089 g1h->reset_gc_time_stamp();
2091 // Note end of marking in all heap regions.
2092 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2093 if (G1CollectedHeap::use_parallel_gc_threads()) {
2094 g1h->set_par_threads((int)n_workers);
2095 g1h->workers()->run_task(&g1_par_note_end_task);
2096 g1h->set_par_threads(0);
2098 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2099 "sanity check");
2100 } else {
2101 g1_par_note_end_task.work(0);
2102 }
2103 g1h->check_gc_time_stamps();
2105 if (!cleanup_list_is_empty()) {
2106 // The cleanup list is not empty, so we'll have to process it
2107 // concurrently. Notify anyone else that might be wanting free
2108 // regions that there will be more free regions coming soon.
2109 g1h->set_free_regions_coming();
2110 }
2112 // call below, since it affects the metric by which we sort the heap
2113 // regions.
2114 if (G1ScrubRemSets) {
2115 double rs_scrub_start = os::elapsedTime();
2116 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2117 if (G1CollectedHeap::use_parallel_gc_threads()) {
2118 g1h->set_par_threads((int)n_workers);
2119 g1h->workers()->run_task(&g1_par_scrub_rs_task);
2120 g1h->set_par_threads(0);
2122 assert(g1h->check_heap_region_claim_values(
2123 HeapRegion::ScrubRemSetClaimValue),
2124 "sanity check");
2125 } else {
2126 g1_par_scrub_rs_task.work(0);
2127 }
2129 double rs_scrub_end = os::elapsedTime();
2130 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2131 _total_rs_scrub_time += this_rs_scrub_time;
2132 }
2134 // this will also free any regions totally full of garbage objects,
2135 // and sort the regions.
2136 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2138 // Statistics.
2139 double end = os::elapsedTime();
2140 _cleanup_times.add((end - start) * 1000.0);
2142 if (G1Log::fine()) {
2143 g1h->print_size_transition(gclog_or_tty,
2144 start_used_bytes,
2145 g1h->used(),
2146 g1h->capacity());
2147 }
2149 // Clean up will have freed any regions completely full of garbage.
2150 // Update the soft reference policy with the new heap occupancy.
2151 Universe::update_heap_info_at_gc();
2153 if (VerifyDuringGC) {
2154 HandleMark hm; // handle scope
2155 Universe::heap()->prepare_for_verify();
2156 Universe::verify(VerifyOption_G1UsePrevMarking,
2157 " VerifyDuringGC:(after)");
2158 }
2159 g1h->check_bitmaps("Cleanup End");
2161 g1h->verify_region_sets_optional();
2163 // We need to make this be a "collection" so any collection pause that
2164 // races with it goes around and waits for completeCleanup to finish.
2165 g1h->increment_total_collections();
2167 // Clean out dead classes and update Metaspace sizes.
2168 if (ClassUnloadingWithConcurrentMark) {
2169 ClassLoaderDataGraph::purge();
2170 }
2171 MetaspaceGC::compute_new_size();
2173 // We reclaimed old regions so we should calculate the sizes to make
2174 // sure we update the old gen/space data.
2175 g1h->g1mm()->update_sizes();
2177 g1h->trace_heap_after_concurrent_cycle();
2178 }
2180 void ConcurrentMark::completeCleanup() {
2181 if (has_aborted()) return;
2183 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2185 _cleanup_list.verify_optional();
2186 FreeRegionList tmp_free_list("Tmp Free List");
2188 if (G1ConcRegionFreeingVerbose) {
2189 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2190 "cleanup list has %u entries",
2191 _cleanup_list.length());
2192 }
2194 // Noone else should be accessing the _cleanup_list at this point,
2195 // so it's not necessary to take any locks
2196 while (!_cleanup_list.is_empty()) {
2197 HeapRegion* hr = _cleanup_list.remove_head();
2198 assert(hr != NULL, "Got NULL from a non-empty list");
2199 hr->par_clear();
2200 tmp_free_list.add_ordered(hr);
2202 // Instead of adding one region at a time to the secondary_free_list,
2203 // we accumulate them in the local list and move them a few at a
2204 // time. This also cuts down on the number of notify_all() calls
2205 // we do during this process. We'll also append the local list when
2206 // _cleanup_list is empty (which means we just removed the last
2207 // region from the _cleanup_list).
2208 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
2209 _cleanup_list.is_empty()) {
2210 if (G1ConcRegionFreeingVerbose) {
2211 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2212 "appending %u entries to the secondary_free_list, "
2213 "cleanup list still has %u entries",
2214 tmp_free_list.length(),
2215 _cleanup_list.length());
2216 }
2218 {
2219 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
2220 g1h->secondary_free_list_add(&tmp_free_list);
2221 SecondaryFreeList_lock->notify_all();
2222 }
2224 if (G1StressConcRegionFreeing) {
2225 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
2226 os::sleep(Thread::current(), (jlong) 1, false);
2227 }
2228 }
2229 }
2230 }
2231 assert(tmp_free_list.is_empty(), "post-condition");
2232 }
2234 // Supporting Object and Oop closures for reference discovery
2235 // and processing in during marking
2237 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2238 HeapWord* addr = (HeapWord*)obj;
2239 return addr != NULL &&
2240 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2241 }
2243 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2244 // Uses the CMTask associated with a worker thread (for serial reference
2245 // processing the CMTask for worker 0 is used) to preserve (mark) and
2246 // trace referent objects.
2247 //
2248 // Using the CMTask and embedded local queues avoids having the worker
2249 // threads operating on the global mark stack. This reduces the risk
2250 // of overflowing the stack - which we would rather avoid at this late
2251 // state. Also using the tasks' local queues removes the potential
2252 // of the workers interfering with each other that could occur if
2253 // operating on the global stack.
2255 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2256 ConcurrentMark* _cm;
2257 CMTask* _task;
2258 int _ref_counter_limit;
2259 int _ref_counter;
2260 bool _is_serial;
2261 public:
2262 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2263 _cm(cm), _task(task), _is_serial(is_serial),
2264 _ref_counter_limit(G1RefProcDrainInterval) {
2265 assert(_ref_counter_limit > 0, "sanity");
2266 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2267 _ref_counter = _ref_counter_limit;
2268 }
2270 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2271 virtual void do_oop( oop* p) { do_oop_work(p); }
2273 template <class T> void do_oop_work(T* p) {
2274 if (!_cm->has_overflown()) {
2275 oop obj = oopDesc::load_decode_heap_oop(p);
2276 if (_cm->verbose_high()) {
2277 gclog_or_tty->print_cr("\t[%u] we're looking at location "
2278 "*"PTR_FORMAT" = "PTR_FORMAT,
2279 _task->worker_id(), p2i(p), p2i((void*) obj));
2280 }
2282 _task->deal_with_reference(obj);
2283 _ref_counter--;
2285 if (_ref_counter == 0) {
2286 // We have dealt with _ref_counter_limit references, pushing them
2287 // and objects reachable from them on to the local stack (and
2288 // possibly the global stack). Call CMTask::do_marking_step() to
2289 // process these entries.
2290 //
2291 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2292 // there's nothing more to do (i.e. we're done with the entries that
2293 // were pushed as a result of the CMTask::deal_with_reference() calls
2294 // above) or we overflow.
2295 //
2296 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2297 // flag while there may still be some work to do. (See the comment at
2298 // the beginning of CMTask::do_marking_step() for those conditions -
2299 // one of which is reaching the specified time target.) It is only
2300 // when CMTask::do_marking_step() returns without setting the
2301 // has_aborted() flag that the marking step has completed.
2302 do {
2303 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2304 _task->do_marking_step(mark_step_duration_ms,
2305 false /* do_termination */,
2306 _is_serial);
2307 } while (_task->has_aborted() && !_cm->has_overflown());
2308 _ref_counter = _ref_counter_limit;
2309 }
2310 } else {
2311 if (_cm->verbose_high()) {
2312 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
2313 }
2314 }
2315 }
2316 };
2318 // 'Drain' oop closure used by both serial and parallel reference processing.
2319 // Uses the CMTask associated with a given worker thread (for serial
2320 // reference processing the CMtask for worker 0 is used). Calls the
2321 // do_marking_step routine, with an unbelievably large timeout value,
2322 // to drain the marking data structures of the remaining entries
2323 // added by the 'keep alive' oop closure above.
2325 class G1CMDrainMarkingStackClosure: public VoidClosure {
2326 ConcurrentMark* _cm;
2327 CMTask* _task;
2328 bool _is_serial;
2329 public:
2330 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
2331 _cm(cm), _task(task), _is_serial(is_serial) {
2332 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
2333 }
2335 void do_void() {
2336 do {
2337 if (_cm->verbose_high()) {
2338 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
2339 _task->worker_id(), BOOL_TO_STR(_is_serial));
2340 }
2342 // We call CMTask::do_marking_step() to completely drain the local
2343 // and global marking stacks of entries pushed by the 'keep alive'
2344 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2345 //
2346 // CMTask::do_marking_step() is called in a loop, which we'll exit
2347 // if there's nothing more to do (i.e. we'completely drained the
2348 // entries that were pushed as a a result of applying the 'keep alive'
2349 // closure to the entries on the discovered ref lists) or we overflow
2350 // the global marking stack.
2351 //
2352 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2353 // flag while there may still be some work to do. (See the comment at
2354 // the beginning of CMTask::do_marking_step() for those conditions -
2355 // one of which is reaching the specified time target.) It is only
2356 // when CMTask::do_marking_step() returns without setting the
2357 // has_aborted() flag that the marking step has completed.
2359 _task->do_marking_step(1000000000.0 /* something very large */,
2360 true /* do_termination */,
2361 _is_serial);
2362 } while (_task->has_aborted() && !_cm->has_overflown());
2363 }
2364 };
2366 // Implementation of AbstractRefProcTaskExecutor for parallel
2367 // reference processing at the end of G1 concurrent marking
2369 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2370 private:
2371 G1CollectedHeap* _g1h;
2372 ConcurrentMark* _cm;
2373 WorkGang* _workers;
2374 int _active_workers;
2376 public:
2377 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2378 ConcurrentMark* cm,
2379 WorkGang* workers,
2380 int n_workers) :
2381 _g1h(g1h), _cm(cm),
2382 _workers(workers), _active_workers(n_workers) { }
2384 // Executes the given task using concurrent marking worker threads.
2385 virtual void execute(ProcessTask& task);
2386 virtual void execute(EnqueueTask& task);
2387 };
2389 class G1CMRefProcTaskProxy: public AbstractGangTask {
2390 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2391 ProcessTask& _proc_task;
2392 G1CollectedHeap* _g1h;
2393 ConcurrentMark* _cm;
2395 public:
2396 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2397 G1CollectedHeap* g1h,
2398 ConcurrentMark* cm) :
2399 AbstractGangTask("Process reference objects in parallel"),
2400 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2401 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2402 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
2403 }
2405 virtual void work(uint worker_id) {
2406 ResourceMark rm;
2407 HandleMark hm;
2408 CMTask* task = _cm->task(worker_id);
2409 G1CMIsAliveClosure g1_is_alive(_g1h);
2410 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2411 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2413 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2414 }
2415 };
2417 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2418 assert(_workers != NULL, "Need parallel worker threads.");
2419 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2421 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2423 // We need to reset the concurrency level before each
2424 // proxy task execution, so that the termination protocol
2425 // and overflow handling in CMTask::do_marking_step() knows
2426 // how many workers to wait for.
2427 _cm->set_concurrency(_active_workers);
2428 _g1h->set_par_threads(_active_workers);
2429 _workers->run_task(&proc_task_proxy);
2430 _g1h->set_par_threads(0);
2431 }
2433 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2434 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2435 EnqueueTask& _enq_task;
2437 public:
2438 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2439 AbstractGangTask("Enqueue reference objects in parallel"),
2440 _enq_task(enq_task) { }
2442 virtual void work(uint worker_id) {
2443 _enq_task.work(worker_id);
2444 }
2445 };
2447 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2448 assert(_workers != NULL, "Need parallel worker threads.");
2449 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2451 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2453 // Not strictly necessary but...
2454 //
2455 // We need to reset the concurrency level before each
2456 // proxy task execution, so that the termination protocol
2457 // and overflow handling in CMTask::do_marking_step() knows
2458 // how many workers to wait for.
2459 _cm->set_concurrency(_active_workers);
2460 _g1h->set_par_threads(_active_workers);
2461 _workers->run_task(&enq_task_proxy);
2462 _g1h->set_par_threads(0);
2463 }
2465 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2466 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2467 }
2469 // Helper class to get rid of some boilerplate code.
2470 class G1RemarkGCTraceTime : public GCTraceTime {
2471 static bool doit_and_prepend(bool doit) {
2472 if (doit) {
2473 gclog_or_tty->put(' ');
2474 }
2475 return doit;
2476 }
2478 public:
2479 G1RemarkGCTraceTime(const char* title, bool doit)
2480 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2481 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2482 }
2483 };
2485 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2486 if (has_overflown()) {
2487 // Skip processing the discovered references if we have
2488 // overflown the global marking stack. Reference objects
2489 // only get discovered once so it is OK to not
2490 // de-populate the discovered reference lists. We could have,
2491 // but the only benefit would be that, when marking restarts,
2492 // less reference objects are discovered.
2493 return;
2494 }
2496 ResourceMark rm;
2497 HandleMark hm;
2499 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2501 // Is alive closure.
2502 G1CMIsAliveClosure g1_is_alive(g1h);
2504 // Inner scope to exclude the cleaning of the string and symbol
2505 // tables from the displayed time.
2506 {
2507 if (G1Log::finer()) {
2508 gclog_or_tty->put(' ');
2509 }
2510 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
2512 ReferenceProcessor* rp = g1h->ref_processor_cm();
2514 // See the comment in G1CollectedHeap::ref_processing_init()
2515 // about how reference processing currently works in G1.
2517 // Set the soft reference policy
2518 rp->setup_policy(clear_all_soft_refs);
2519 assert(_markStack.isEmpty(), "mark stack should be empty");
2521 // Instances of the 'Keep Alive' and 'Complete GC' closures used
2522 // in serial reference processing. Note these closures are also
2523 // used for serially processing (by the the current thread) the
2524 // JNI references during parallel reference processing.
2525 //
2526 // These closures do not need to synchronize with the worker
2527 // threads involved in parallel reference processing as these
2528 // instances are executed serially by the current thread (e.g.
2529 // reference processing is not multi-threaded and is thus
2530 // performed by the current thread instead of a gang worker).
2531 //
2532 // The gang tasks involved in parallel reference procssing create
2533 // their own instances of these closures, which do their own
2534 // synchronization among themselves.
2535 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2536 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2538 // We need at least one active thread. If reference processing
2539 // is not multi-threaded we use the current (VMThread) thread,
2540 // otherwise we use the work gang from the G1CollectedHeap and
2541 // we utilize all the worker threads we can.
2542 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
2543 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
2544 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2546 // Parallel processing task executor.
2547 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2548 g1h->workers(), active_workers);
2549 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
2551 // Set the concurrency level. The phase was already set prior to
2552 // executing the remark task.
2553 set_concurrency(active_workers);
2555 // Set the degree of MT processing here. If the discovery was done MT,
2556 // the number of threads involved during discovery could differ from
2557 // the number of active workers. This is OK as long as the discovered
2558 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2559 rp->set_active_mt_degree(active_workers);
2561 // Process the weak references.
2562 const ReferenceProcessorStats& stats =
2563 rp->process_discovered_references(&g1_is_alive,
2564 &g1_keep_alive,
2565 &g1_drain_mark_stack,
2566 executor,
2567 g1h->gc_timer_cm(),
2568 concurrent_gc_id());
2569 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
2571 // The do_oop work routines of the keep_alive and drain_marking_stack
2572 // oop closures will set the has_overflown flag if we overflow the
2573 // global marking stack.
2575 assert(_markStack.overflow() || _markStack.isEmpty(),
2576 "mark stack should be empty (unless it overflowed)");
2578 if (_markStack.overflow()) {
2579 // This should have been done already when we tried to push an
2580 // entry on to the global mark stack. But let's do it again.
2581 set_has_overflown();
2582 }
2584 assert(rp->num_q() == active_workers, "why not");
2586 rp->enqueue_discovered_references(executor);
2588 rp->verify_no_references_recorded();
2589 assert(!rp->discovery_enabled(), "Post condition");
2590 }
2592 if (has_overflown()) {
2593 // We can not trust g1_is_alive if the marking stack overflowed
2594 return;
2595 }
2597 assert(_markStack.isEmpty(), "Marking should have completed");
2599 // Unload Klasses, String, Symbols, Code Cache, etc.
2600 {
2601 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2603 if (ClassUnloadingWithConcurrentMark) {
2604 bool purged_classes;
2606 {
2607 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2608 purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2609 }
2611 {
2612 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2613 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2614 }
2615 }
2617 if (G1StringDedup::is_enabled()) {
2618 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2619 G1StringDedup::unlink(&g1_is_alive);
2620 }
2621 }
2622 }
2624 void ConcurrentMark::swapMarkBitMaps() {
2625 CMBitMapRO* temp = _prevMarkBitMap;
2626 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2627 _nextMarkBitMap = (CMBitMap*) temp;
2628 }
2630 class CMObjectClosure;
2632 // Closure for iterating over objects, currently only used for
2633 // processing SATB buffers.
2634 class CMObjectClosure : public ObjectClosure {
2635 private:
2636 CMTask* _task;
2638 public:
2639 void do_object(oop obj) {
2640 _task->deal_with_reference(obj);
2641 }
2643 CMObjectClosure(CMTask* task) : _task(task) { }
2644 };
2646 class G1RemarkThreadsClosure : public ThreadClosure {
2647 CMObjectClosure _cm_obj;
2648 G1CMOopClosure _cm_cl;
2649 MarkingCodeBlobClosure _code_cl;
2650 int _thread_parity;
2651 bool _is_par;
2653 public:
2654 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2655 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2656 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2658 void do_thread(Thread* thread) {
2659 if (thread->is_Java_thread()) {
2660 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2661 JavaThread* jt = (JavaThread*)thread;
2663 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2664 // however the liveness of oops reachable from nmethods have very complex lifecycles:
2665 // * Alive if on the stack of an executing method
2666 // * Weakly reachable otherwise
2667 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2668 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2669 jt->nmethods_do(&_code_cl);
2671 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2672 }
2673 } else if (thread->is_VM_thread()) {
2674 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2675 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2676 }
2677 }
2678 }
2679 };
2681 class CMRemarkTask: public AbstractGangTask {
2682 private:
2683 ConcurrentMark* _cm;
2684 bool _is_serial;
2685 public:
2686 void work(uint worker_id) {
2687 // Since all available tasks are actually started, we should
2688 // only proceed if we're supposed to be actived.
2689 if (worker_id < _cm->active_tasks()) {
2690 CMTask* task = _cm->task(worker_id);
2691 task->record_start_time();
2692 {
2693 ResourceMark rm;
2694 HandleMark hm;
2696 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2697 Threads::threads_do(&threads_f);
2698 }
2700 do {
2701 task->do_marking_step(1000000000.0 /* something very large */,
2702 true /* do_termination */,
2703 _is_serial);
2704 } while (task->has_aborted() && !_cm->has_overflown());
2705 // If we overflow, then we do not want to restart. We instead
2706 // want to abort remark and do concurrent marking again.
2707 task->record_end_time();
2708 }
2709 }
2711 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2712 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2713 _cm->terminator()->reset_for_reuse(active_workers);
2714 }
2715 };
2717 void ConcurrentMark::checkpointRootsFinalWork() {
2718 ResourceMark rm;
2719 HandleMark hm;
2720 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2722 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2724 g1h->ensure_parsability(false);
2726 if (G1CollectedHeap::use_parallel_gc_threads()) {
2727 G1CollectedHeap::StrongRootsScope srs(g1h);
2728 // this is remark, so we'll use up all active threads
2729 uint active_workers = g1h->workers()->active_workers();
2730 if (active_workers == 0) {
2731 assert(active_workers > 0, "Should have been set earlier");
2732 active_workers = (uint) ParallelGCThreads;
2733 g1h->workers()->set_active_workers(active_workers);
2734 }
2735 set_concurrency_and_phase(active_workers, false /* concurrent */);
2736 // Leave _parallel_marking_threads at it's
2737 // value originally calculated in the ConcurrentMark
2738 // constructor and pass values of the active workers
2739 // through the gang in the task.
2741 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2742 // We will start all available threads, even if we decide that the
2743 // active_workers will be fewer. The extra ones will just bail out
2744 // immediately.
2745 g1h->set_par_threads(active_workers);
2746 g1h->workers()->run_task(&remarkTask);
2747 g1h->set_par_threads(0);
2748 } else {
2749 G1CollectedHeap::StrongRootsScope srs(g1h);
2750 uint active_workers = 1;
2751 set_concurrency_and_phase(active_workers, false /* concurrent */);
2753 // Note - if there's no work gang then the VMThread will be
2754 // the thread to execute the remark - serially. We have
2755 // to pass true for the is_serial parameter so that
2756 // CMTask::do_marking_step() doesn't enter the sync
2757 // barriers in the event of an overflow. Doing so will
2758 // cause an assert that the current thread is not a
2759 // concurrent GC thread.
2760 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
2761 remarkTask.work(0);
2762 }
2763 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2764 guarantee(has_overflown() ||
2765 satb_mq_set.completed_buffers_num() == 0,
2766 err_msg("Invariant: has_overflown = %s, num buffers = %d",
2767 BOOL_TO_STR(has_overflown()),
2768 satb_mq_set.completed_buffers_num()));
2770 print_stats();
2771 }
2773 #ifndef PRODUCT
2775 class PrintReachableOopClosure: public OopClosure {
2776 private:
2777 G1CollectedHeap* _g1h;
2778 outputStream* _out;
2779 VerifyOption _vo;
2780 bool _all;
2782 public:
2783 PrintReachableOopClosure(outputStream* out,
2784 VerifyOption vo,
2785 bool all) :
2786 _g1h(G1CollectedHeap::heap()),
2787 _out(out), _vo(vo), _all(all) { }
2789 void do_oop(narrowOop* p) { do_oop_work(p); }
2790 void do_oop( oop* p) { do_oop_work(p); }
2792 template <class T> void do_oop_work(T* p) {
2793 oop obj = oopDesc::load_decode_heap_oop(p);
2794 const char* str = NULL;
2795 const char* str2 = "";
2797 if (obj == NULL) {
2798 str = "";
2799 } else if (!_g1h->is_in_g1_reserved(obj)) {
2800 str = " O";
2801 } else {
2802 HeapRegion* hr = _g1h->heap_region_containing(obj);
2803 guarantee(hr != NULL, "invariant");
2804 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
2805 bool marked = _g1h->is_marked(obj, _vo);
2807 if (over_tams) {
2808 str = " >";
2809 if (marked) {
2810 str2 = " AND MARKED";
2811 }
2812 } else if (marked) {
2813 str = " M";
2814 } else {
2815 str = " NOT";
2816 }
2817 }
2819 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
2820 p2i(p), p2i((void*) obj), str, str2);
2821 }
2822 };
2824 class PrintReachableObjectClosure : public ObjectClosure {
2825 private:
2826 G1CollectedHeap* _g1h;
2827 outputStream* _out;
2828 VerifyOption _vo;
2829 bool _all;
2830 HeapRegion* _hr;
2832 public:
2833 PrintReachableObjectClosure(outputStream* out,
2834 VerifyOption vo,
2835 bool all,
2836 HeapRegion* hr) :
2837 _g1h(G1CollectedHeap::heap()),
2838 _out(out), _vo(vo), _all(all), _hr(hr) { }
2840 void do_object(oop o) {
2841 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
2842 bool marked = _g1h->is_marked(o, _vo);
2843 bool print_it = _all || over_tams || marked;
2845 if (print_it) {
2846 _out->print_cr(" "PTR_FORMAT"%s",
2847 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
2848 PrintReachableOopClosure oopCl(_out, _vo, _all);
2849 o->oop_iterate_no_header(&oopCl);
2850 }
2851 }
2852 };
2854 class PrintReachableRegionClosure : public HeapRegionClosure {
2855 private:
2856 G1CollectedHeap* _g1h;
2857 outputStream* _out;
2858 VerifyOption _vo;
2859 bool _all;
2861 public:
2862 bool doHeapRegion(HeapRegion* hr) {
2863 HeapWord* b = hr->bottom();
2864 HeapWord* e = hr->end();
2865 HeapWord* t = hr->top();
2866 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
2867 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
2868 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
2869 _out->cr();
2871 HeapWord* from = b;
2872 HeapWord* to = t;
2874 if (to > from) {
2875 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
2876 _out->cr();
2877 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
2878 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
2879 _out->cr();
2880 }
2882 return false;
2883 }
2885 PrintReachableRegionClosure(outputStream* out,
2886 VerifyOption vo,
2887 bool all) :
2888 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
2889 };
2891 void ConcurrentMark::print_reachable(const char* str,
2892 VerifyOption vo,
2893 bool all) {
2894 gclog_or_tty->cr();
2895 gclog_or_tty->print_cr("== Doing heap dump... ");
2897 if (G1PrintReachableBaseFile == NULL) {
2898 gclog_or_tty->print_cr(" #### error: no base file defined");
2899 return;
2900 }
2902 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
2903 (JVM_MAXPATHLEN - 1)) {
2904 gclog_or_tty->print_cr(" #### error: file name too long");
2905 return;
2906 }
2908 char file_name[JVM_MAXPATHLEN];
2909 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
2910 gclog_or_tty->print_cr(" dumping to file %s", file_name);
2912 fileStream fout(file_name);
2913 if (!fout.is_open()) {
2914 gclog_or_tty->print_cr(" #### error: could not open file");
2915 return;
2916 }
2918 outputStream* out = &fout;
2919 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
2920 out->cr();
2922 out->print_cr("--- ITERATING OVER REGIONS");
2923 out->cr();
2924 PrintReachableRegionClosure rcl(out, vo, all);
2925 _g1h->heap_region_iterate(&rcl);
2926 out->cr();
2928 gclog_or_tty->print_cr(" done");
2929 gclog_or_tty->flush();
2930 }
2932 #endif // PRODUCT
2934 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2935 // Note we are overriding the read-only view of the prev map here, via
2936 // the cast.
2937 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2938 }
2940 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2941 _nextMarkBitMap->clearRange(mr);
2942 }
2944 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2945 clearRangePrevBitmap(mr);
2946 clearRangeNextBitmap(mr);
2947 }
2949 HeapRegion*
2950 ConcurrentMark::claim_region(uint worker_id) {
2951 // "checkpoint" the finger
2952 HeapWord* finger = _finger;
2954 // _heap_end will not change underneath our feet; it only changes at
2955 // yield points.
2956 while (finger < _heap_end) {
2957 assert(_g1h->is_in_g1_reserved(finger), "invariant");
2959 // Note on how this code handles humongous regions. In the
2960 // normal case the finger will reach the start of a "starts
2961 // humongous" (SH) region. Its end will either be the end of the
2962 // last "continues humongous" (CH) region in the sequence, or the
2963 // standard end of the SH region (if the SH is the only region in
2964 // the sequence). That way claim_region() will skip over the CH
2965 // regions. However, there is a subtle race between a CM thread
2966 // executing this method and a mutator thread doing a humongous
2967 // object allocation. The two are not mutually exclusive as the CM
2968 // thread does not need to hold the Heap_lock when it gets
2969 // here. So there is a chance that claim_region() will come across
2970 // a free region that's in the progress of becoming a SH or a CH
2971 // region. In the former case, it will either
2972 // a) Miss the update to the region's end, in which case it will
2973 // visit every subsequent CH region, will find their bitmaps
2974 // empty, and do nothing, or
2975 // b) Will observe the update of the region's end (in which case
2976 // it will skip the subsequent CH regions).
2977 // If it comes across a region that suddenly becomes CH, the
2978 // scenario will be similar to b). So, the race between
2979 // claim_region() and a humongous object allocation might force us
2980 // to do a bit of unnecessary work (due to some unnecessary bitmap
2981 // iterations) but it should not introduce and correctness issues.
2982 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
2983 HeapWord* bottom = curr_region->bottom();
2984 HeapWord* end = curr_region->end();
2985 HeapWord* limit = curr_region->next_top_at_mark_start();
2987 if (verbose_low()) {
2988 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
2989 "["PTR_FORMAT", "PTR_FORMAT"), "
2990 "limit = "PTR_FORMAT,
2991 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
2992 }
2994 // Is the gap between reading the finger and doing the CAS too long?
2995 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
2996 if (res == finger) {
2997 // we succeeded
2999 // notice that _finger == end cannot be guaranteed here since,
3000 // someone else might have moved the finger even further
3001 assert(_finger >= end, "the finger should have moved forward");
3003 if (verbose_low()) {
3004 gclog_or_tty->print_cr("[%u] we were successful with region = "
3005 PTR_FORMAT, worker_id, p2i(curr_region));
3006 }
3008 if (limit > bottom) {
3009 if (verbose_low()) {
3010 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
3011 "returning it ", worker_id, p2i(curr_region));
3012 }
3013 return curr_region;
3014 } else {
3015 assert(limit == bottom,
3016 "the region limit should be at bottom");
3017 if (verbose_low()) {
3018 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
3019 "returning NULL", worker_id, p2i(curr_region));
3020 }
3021 // we return NULL and the caller should try calling
3022 // claim_region() again.
3023 return NULL;
3024 }
3025 } else {
3026 assert(_finger > finger, "the finger should have moved forward");
3027 if (verbose_low()) {
3028 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
3029 "global finger = "PTR_FORMAT", "
3030 "our finger = "PTR_FORMAT,
3031 worker_id, p2i(_finger), p2i(finger));
3032 }
3034 // read it again
3035 finger = _finger;
3036 }
3037 }
3039 return NULL;
3040 }
3042 #ifndef PRODUCT
3043 enum VerifyNoCSetOopsPhase {
3044 VerifyNoCSetOopsStack,
3045 VerifyNoCSetOopsQueues,
3046 VerifyNoCSetOopsSATBCompleted,
3047 VerifyNoCSetOopsSATBThread
3048 };
3050 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
3051 private:
3052 G1CollectedHeap* _g1h;
3053 VerifyNoCSetOopsPhase _phase;
3054 int _info;
3056 const char* phase_str() {
3057 switch (_phase) {
3058 case VerifyNoCSetOopsStack: return "Stack";
3059 case VerifyNoCSetOopsQueues: return "Queue";
3060 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
3061 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
3062 default: ShouldNotReachHere();
3063 }
3064 return NULL;
3065 }
3067 void do_object_work(oop obj) {
3068 guarantee(!_g1h->obj_in_cs(obj),
3069 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
3070 p2i((void*) obj), phase_str(), _info));
3071 }
3073 public:
3074 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
3076 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
3077 _phase = phase;
3078 _info = info;
3079 }
3081 virtual void do_oop(oop* p) {
3082 oop obj = oopDesc::load_decode_heap_oop(p);
3083 do_object_work(obj);
3084 }
3086 virtual void do_oop(narrowOop* p) {
3087 // We should not come across narrow oops while scanning marking
3088 // stacks and SATB buffers.
3089 ShouldNotReachHere();
3090 }
3092 virtual void do_object(oop obj) {
3093 do_object_work(obj);
3094 }
3095 };
3097 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
3098 bool verify_enqueued_buffers,
3099 bool verify_thread_buffers,
3100 bool verify_fingers) {
3101 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
3102 if (!G1CollectedHeap::heap()->mark_in_progress()) {
3103 return;
3104 }
3106 VerifyNoCSetOopsClosure cl;
3108 if (verify_stacks) {
3109 // Verify entries on the global mark stack
3110 cl.set_phase(VerifyNoCSetOopsStack);
3111 _markStack.oops_do(&cl);
3113 // Verify entries on the task queues
3114 for (uint i = 0; i < _max_worker_id; i += 1) {
3115 cl.set_phase(VerifyNoCSetOopsQueues, i);
3116 CMTaskQueue* queue = _task_queues->queue(i);
3117 queue->oops_do(&cl);
3118 }
3119 }
3121 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
3123 // Verify entries on the enqueued SATB buffers
3124 if (verify_enqueued_buffers) {
3125 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
3126 satb_qs.iterate_completed_buffers_read_only(&cl);
3127 }
3129 // Verify entries on the per-thread SATB buffers
3130 if (verify_thread_buffers) {
3131 cl.set_phase(VerifyNoCSetOopsSATBThread);
3132 satb_qs.iterate_thread_buffers_read_only(&cl);
3133 }
3135 if (verify_fingers) {
3136 // Verify the global finger
3137 HeapWord* global_finger = finger();
3138 if (global_finger != NULL && global_finger < _heap_end) {
3139 // The global finger always points to a heap region boundary. We
3140 // use heap_region_containing_raw() to get the containing region
3141 // given that the global finger could be pointing to a free region
3142 // which subsequently becomes continues humongous. If that
3143 // happens, heap_region_containing() will return the bottom of the
3144 // corresponding starts humongous region and the check below will
3145 // not hold any more.
3146 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
3147 guarantee(global_finger == global_hr->bottom(),
3148 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
3149 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
3150 }
3152 // Verify the task fingers
3153 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
3154 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
3155 CMTask* task = _tasks[i];
3156 HeapWord* task_finger = task->finger();
3157 if (task_finger != NULL && task_finger < _heap_end) {
3158 // See above note on the global finger verification.
3159 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
3160 guarantee(task_finger == task_hr->bottom() ||
3161 !task_hr->in_collection_set(),
3162 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
3163 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
3164 }
3165 }
3166 }
3167 }
3168 #endif // PRODUCT
3170 // Aggregate the counting data that was constructed concurrently
3171 // with marking.
3172 class AggregateCountDataHRClosure: public HeapRegionClosure {
3173 G1CollectedHeap* _g1h;
3174 ConcurrentMark* _cm;
3175 CardTableModRefBS* _ct_bs;
3176 BitMap* _cm_card_bm;
3177 uint _max_worker_id;
3179 public:
3180 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
3181 BitMap* cm_card_bm,
3182 uint max_worker_id) :
3183 _g1h(g1h), _cm(g1h->concurrent_mark()),
3184 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
3185 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
3187 bool doHeapRegion(HeapRegion* hr) {
3188 if (hr->continuesHumongous()) {
3189 // We will ignore these here and process them when their
3190 // associated "starts humongous" region is processed.
3191 // Note that we cannot rely on their associated
3192 // "starts humongous" region to have their bit set to 1
3193 // since, due to the region chunking in the parallel region
3194 // iteration, a "continues humongous" region might be visited
3195 // before its associated "starts humongous".
3196 return false;
3197 }
3199 HeapWord* start = hr->bottom();
3200 HeapWord* limit = hr->next_top_at_mark_start();
3201 HeapWord* end = hr->end();
3203 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
3204 err_msg("Preconditions not met - "
3205 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
3206 "top: "PTR_FORMAT", end: "PTR_FORMAT,
3207 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
3209 assert(hr->next_marked_bytes() == 0, "Precondition");
3211 if (start == limit) {
3212 // NTAMS of this region has not been set so nothing to do.
3213 return false;
3214 }
3216 // 'start' should be in the heap.
3217 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3218 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
3219 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3221 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3222 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3223 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
3225 // If ntams is not card aligned then we bump card bitmap index
3226 // for limit so that we get the all the cards spanned by
3227 // the object ending at ntams.
3228 // Note: if this is the last region in the heap then ntams
3229 // could be actually just beyond the end of the the heap;
3230 // limit_idx will then correspond to a (non-existent) card
3231 // that is also outside the heap.
3232 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
3233 limit_idx += 1;
3234 }
3236 assert(limit_idx <= end_idx, "or else use atomics");
3238 // Aggregate the "stripe" in the count data associated with hr.
3239 uint hrs_index = hr->hrs_index();
3240 size_t marked_bytes = 0;
3242 for (uint i = 0; i < _max_worker_id; i += 1) {
3243 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
3244 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
3246 // Fetch the marked_bytes in this region for task i and
3247 // add it to the running total for this region.
3248 marked_bytes += marked_bytes_array[hrs_index];
3250 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
3251 // into the global card bitmap.
3252 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
3254 while (scan_idx < limit_idx) {
3255 assert(task_card_bm->at(scan_idx) == true, "should be");
3256 _cm_card_bm->set_bit(scan_idx);
3257 assert(_cm_card_bm->at(scan_idx) == true, "should be");
3259 // BitMap::get_next_one_offset() can handle the case when
3260 // its left_offset parameter is greater than its right_offset
3261 // parameter. It does, however, have an early exit if
3262 // left_offset == right_offset. So let's limit the value
3263 // passed in for left offset here.
3264 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3265 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3266 }
3267 }
3269 // Update the marked bytes for this region.
3270 hr->add_to_marked_bytes(marked_bytes);
3272 // Next heap region
3273 return false;
3274 }
3275 };
3277 class G1AggregateCountDataTask: public AbstractGangTask {
3278 protected:
3279 G1CollectedHeap* _g1h;
3280 ConcurrentMark* _cm;
3281 BitMap* _cm_card_bm;
3282 uint _max_worker_id;
3283 int _active_workers;
3285 public:
3286 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3287 ConcurrentMark* cm,
3288 BitMap* cm_card_bm,
3289 uint max_worker_id,
3290 int n_workers) :
3291 AbstractGangTask("Count Aggregation"),
3292 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3293 _max_worker_id(max_worker_id),
3294 _active_workers(n_workers) { }
3296 void work(uint worker_id) {
3297 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3299 if (G1CollectedHeap::use_parallel_gc_threads()) {
3300 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3301 _active_workers,
3302 HeapRegion::AggregateCountClaimValue);
3303 } else {
3304 _g1h->heap_region_iterate(&cl);
3305 }
3306 }
3307 };
3310 void ConcurrentMark::aggregate_count_data() {
3311 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3312 _g1h->workers()->active_workers() :
3313 1);
3315 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3316 _max_worker_id, n_workers);
3318 if (G1CollectedHeap::use_parallel_gc_threads()) {
3319 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3320 "sanity check");
3321 _g1h->set_par_threads(n_workers);
3322 _g1h->workers()->run_task(&g1_par_agg_task);
3323 _g1h->set_par_threads(0);
3325 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3326 "sanity check");
3327 _g1h->reset_heap_region_claim_values();
3328 } else {
3329 g1_par_agg_task.work(0);
3330 }
3331 }
3333 // Clear the per-worker arrays used to store the per-region counting data
3334 void ConcurrentMark::clear_all_count_data() {
3335 // Clear the global card bitmap - it will be filled during
3336 // liveness count aggregation (during remark) and the
3337 // final counting task.
3338 _card_bm.clear();
3340 // Clear the global region bitmap - it will be filled as part
3341 // of the final counting task.
3342 _region_bm.clear();
3344 uint max_regions = _g1h->max_regions();
3345 assert(_max_worker_id > 0, "uninitialized");
3347 for (uint i = 0; i < _max_worker_id; i += 1) {
3348 BitMap* task_card_bm = count_card_bitmap_for(i);
3349 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3351 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
3352 assert(marked_bytes_array != NULL, "uninitialized");
3354 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
3355 task_card_bm->clear();
3356 }
3357 }
3359 void ConcurrentMark::print_stats() {
3360 if (verbose_stats()) {
3361 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3362 for (size_t i = 0; i < _active_tasks; ++i) {
3363 _tasks[i]->print_stats();
3364 gclog_or_tty->print_cr("---------------------------------------------------------------------");
3365 }
3366 }
3367 }
3369 // abandon current marking iteration due to a Full GC
3370 void ConcurrentMark::abort() {
3371 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
3372 // concurrent bitmap clearing.
3373 _nextMarkBitMap->clearAll();
3375 // Note we cannot clear the previous marking bitmap here
3376 // since VerifyDuringGC verifies the objects marked during
3377 // a full GC against the previous bitmap.
3379 // Clear the liveness counting data
3380 clear_all_count_data();
3381 // Empty mark stack
3382 reset_marking_state();
3383 for (uint i = 0; i < _max_worker_id; ++i) {
3384 _tasks[i]->clear_region_fields();
3385 }
3386 _first_overflow_barrier_sync.abort();
3387 _second_overflow_barrier_sync.abort();
3388 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
3389 if (!gc_id.is_undefined()) {
3390 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
3391 // to detect that it was aborted. Only keep track of the first GC id that we aborted.
3392 _aborted_gc_id = gc_id;
3393 }
3394 _has_aborted = true;
3396 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3397 satb_mq_set.abandon_partial_marking();
3398 // This can be called either during or outside marking, we'll read
3399 // the expected_active value from the SATB queue set.
3400 satb_mq_set.set_active_all_threads(
3401 false, /* new active value */
3402 satb_mq_set.is_active() /* expected_active */);
3404 _g1h->trace_heap_after_concurrent_cycle();
3405 _g1h->register_concurrent_cycle_end();
3406 }
3408 const GCId& ConcurrentMark::concurrent_gc_id() {
3409 if (has_aborted()) {
3410 return _aborted_gc_id;
3411 }
3412 return _g1h->gc_tracer_cm()->gc_id();
3413 }
3415 static void print_ms_time_info(const char* prefix, const char* name,
3416 NumberSeq& ns) {
3417 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
3418 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
3419 if (ns.num() > 0) {
3420 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
3421 prefix, ns.sd(), ns.maximum());
3422 }
3423 }
3425 void ConcurrentMark::print_summary_info() {
3426 gclog_or_tty->print_cr(" Concurrent marking:");
3427 print_ms_time_info(" ", "init marks", _init_times);
3428 print_ms_time_info(" ", "remarks", _remark_times);
3429 {
3430 print_ms_time_info(" ", "final marks", _remark_mark_times);
3431 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
3433 }
3434 print_ms_time_info(" ", "cleanups", _cleanup_times);
3435 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
3436 _total_counting_time,
3437 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
3438 (double)_cleanup_times.num()
3439 : 0.0));
3440 if (G1ScrubRemSets) {
3441 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
3442 _total_rs_scrub_time,
3443 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
3444 (double)_cleanup_times.num()
3445 : 0.0));
3446 }
3447 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
3448 (_init_times.sum() + _remark_times.sum() +
3449 _cleanup_times.sum())/1000.0);
3450 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
3451 "(%8.2f s marking).",
3452 cmThread()->vtime_accum(),
3453 cmThread()->vtime_mark_accum());
3454 }
3456 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3457 if (use_parallel_marking_threads()) {
3458 _parallel_workers->print_worker_threads_on(st);
3459 }
3460 }
3462 void ConcurrentMark::print_on_error(outputStream* st) const {
3463 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3464 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3465 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3466 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3467 }
3469 // We take a break if someone is trying to stop the world.
3470 bool ConcurrentMark::do_yield_check(uint worker_id) {
3471 if (SuspendibleThreadSet::should_yield()) {
3472 if (worker_id == 0) {
3473 _g1h->g1_policy()->record_concurrent_pause();
3474 }
3475 SuspendibleThreadSet::yield();
3476 return true;
3477 } else {
3478 return false;
3479 }
3480 }
3482 bool ConcurrentMark::containing_card_is_marked(void* p) {
3483 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3484 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3485 }
3487 bool ConcurrentMark::containing_cards_are_marked(void* start,
3488 void* last) {
3489 return containing_card_is_marked(start) &&
3490 containing_card_is_marked(last);
3491 }
3493 #ifndef PRODUCT
3494 // for debugging purposes
3495 void ConcurrentMark::print_finger() {
3496 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3497 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3498 for (uint i = 0; i < _max_worker_id; ++i) {
3499 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3500 }
3501 gclog_or_tty->cr();
3502 }
3503 #endif
3505 void CMTask::scan_object(oop obj) {
3506 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3508 if (_cm->verbose_high()) {
3509 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3510 _worker_id, p2i((void*) obj));
3511 }
3513 size_t obj_size = obj->size();
3514 _words_scanned += obj_size;
3516 obj->oop_iterate(_cm_oop_closure);
3517 statsOnly( ++_objs_scanned );
3518 check_limits();
3519 }
3521 // Closure for iteration over bitmaps
3522 class CMBitMapClosure : public BitMapClosure {
3523 private:
3524 // the bitmap that is being iterated over
3525 CMBitMap* _nextMarkBitMap;
3526 ConcurrentMark* _cm;
3527 CMTask* _task;
3529 public:
3530 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
3531 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
3533 bool do_bit(size_t offset) {
3534 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
3535 assert(_nextMarkBitMap->isMarked(addr), "invariant");
3536 assert( addr < _cm->finger(), "invariant");
3538 statsOnly( _task->increase_objs_found_on_bitmap() );
3539 assert(addr >= _task->finger(), "invariant");
3541 // We move that task's local finger along.
3542 _task->move_finger_to(addr);
3544 _task->scan_object(oop(addr));
3545 // we only partially drain the local queue and global stack
3546 _task->drain_local_queue(true);
3547 _task->drain_global_stack(true);
3549 // if the has_aborted flag has been raised, we need to bail out of
3550 // the iteration
3551 return !_task->has_aborted();
3552 }
3553 };
3555 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3556 ConcurrentMark* cm,
3557 CMTask* task)
3558 : _g1h(g1h), _cm(cm), _task(task) {
3559 assert(_ref_processor == NULL, "should be initialized to NULL");
3561 if (G1UseConcMarkReferenceProcessing) {
3562 _ref_processor = g1h->ref_processor_cm();
3563 assert(_ref_processor != NULL, "should not be NULL");
3564 }
3565 }
3567 void CMTask::setup_for_region(HeapRegion* hr) {
3568 // Separated the asserts so that we know which one fires.
3569 assert(hr != NULL,
3570 "claim_region() should have filtered out continues humongous regions");
3571 assert(!hr->continuesHumongous(),
3572 "claim_region() should have filtered out continues humongous regions");
3574 if (_cm->verbose_low()) {
3575 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
3576 _worker_id, p2i(hr));
3577 }
3579 _curr_region = hr;
3580 _finger = hr->bottom();
3581 update_region_limit();
3582 }
3584 void CMTask::update_region_limit() {
3585 HeapRegion* hr = _curr_region;
3586 HeapWord* bottom = hr->bottom();
3587 HeapWord* limit = hr->next_top_at_mark_start();
3589 if (limit == bottom) {
3590 if (_cm->verbose_low()) {
3591 gclog_or_tty->print_cr("[%u] found an empty region "
3592 "["PTR_FORMAT", "PTR_FORMAT")",
3593 _worker_id, p2i(bottom), p2i(limit));
3594 }
3595 // The region was collected underneath our feet.
3596 // We set the finger to bottom to ensure that the bitmap
3597 // iteration that will follow this will not do anything.
3598 // (this is not a condition that holds when we set the region up,
3599 // as the region is not supposed to be empty in the first place)
3600 _finger = bottom;
3601 } else if (limit >= _region_limit) {
3602 assert(limit >= _finger, "peace of mind");
3603 } else {
3604 assert(limit < _region_limit, "only way to get here");
3605 // This can happen under some pretty unusual circumstances. An
3606 // evacuation pause empties the region underneath our feet (NTAMS
3607 // at bottom). We then do some allocation in the region (NTAMS
3608 // stays at bottom), followed by the region being used as a GC
3609 // alloc region (NTAMS will move to top() and the objects
3610 // originally below it will be grayed). All objects now marked in
3611 // the region are explicitly grayed, if below the global finger,
3612 // and we do not need in fact to scan anything else. So, we simply
3613 // set _finger to be limit to ensure that the bitmap iteration
3614 // doesn't do anything.
3615 _finger = limit;
3616 }
3618 _region_limit = limit;
3619 }
3621 void CMTask::giveup_current_region() {
3622 assert(_curr_region != NULL, "invariant");
3623 if (_cm->verbose_low()) {
3624 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
3625 _worker_id, p2i(_curr_region));
3626 }
3627 clear_region_fields();
3628 }
3630 void CMTask::clear_region_fields() {
3631 // Values for these three fields that indicate that we're not
3632 // holding on to a region.
3633 _curr_region = NULL;
3634 _finger = NULL;
3635 _region_limit = NULL;
3636 }
3638 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
3639 if (cm_oop_closure == NULL) {
3640 assert(_cm_oop_closure != NULL, "invariant");
3641 } else {
3642 assert(_cm_oop_closure == NULL, "invariant");
3643 }
3644 _cm_oop_closure = cm_oop_closure;
3645 }
3647 void CMTask::reset(CMBitMap* nextMarkBitMap) {
3648 guarantee(nextMarkBitMap != NULL, "invariant");
3650 if (_cm->verbose_low()) {
3651 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
3652 }
3654 _nextMarkBitMap = nextMarkBitMap;
3655 clear_region_fields();
3657 _calls = 0;
3658 _elapsed_time_ms = 0.0;
3659 _termination_time_ms = 0.0;
3660 _termination_start_time_ms = 0.0;
3662 #if _MARKING_STATS_
3663 _local_pushes = 0;
3664 _local_pops = 0;
3665 _local_max_size = 0;
3666 _objs_scanned = 0;
3667 _global_pushes = 0;
3668 _global_pops = 0;
3669 _global_max_size = 0;
3670 _global_transfers_to = 0;
3671 _global_transfers_from = 0;
3672 _regions_claimed = 0;
3673 _objs_found_on_bitmap = 0;
3674 _satb_buffers_processed = 0;
3675 _steal_attempts = 0;
3676 _steals = 0;
3677 _aborted = 0;
3678 _aborted_overflow = 0;
3679 _aborted_cm_aborted = 0;
3680 _aborted_yield = 0;
3681 _aborted_timed_out = 0;
3682 _aborted_satb = 0;
3683 _aborted_termination = 0;
3684 #endif // _MARKING_STATS_
3685 }
3687 bool CMTask::should_exit_termination() {
3688 regular_clock_call();
3689 // This is called when we are in the termination protocol. We should
3690 // quit if, for some reason, this task wants to abort or the global
3691 // stack is not empty (this means that we can get work from it).
3692 return !_cm->mark_stack_empty() || has_aborted();
3693 }
3695 void CMTask::reached_limit() {
3696 assert(_words_scanned >= _words_scanned_limit ||
3697 _refs_reached >= _refs_reached_limit ,
3698 "shouldn't have been called otherwise");
3699 regular_clock_call();
3700 }
3702 void CMTask::regular_clock_call() {
3703 if (has_aborted()) return;
3705 // First, we need to recalculate the words scanned and refs reached
3706 // limits for the next clock call.
3707 recalculate_limits();
3709 // During the regular clock call we do the following
3711 // (1) If an overflow has been flagged, then we abort.
3712 if (_cm->has_overflown()) {
3713 set_has_aborted();
3714 return;
3715 }
3717 // If we are not concurrent (i.e. we're doing remark) we don't need
3718 // to check anything else. The other steps are only needed during
3719 // the concurrent marking phase.
3720 if (!concurrent()) return;
3722 // (2) If marking has been aborted for Full GC, then we also abort.
3723 if (_cm->has_aborted()) {
3724 set_has_aborted();
3725 statsOnly( ++_aborted_cm_aborted );
3726 return;
3727 }
3729 double curr_time_ms = os::elapsedVTime() * 1000.0;
3731 // (3) If marking stats are enabled, then we update the step history.
3732 #if _MARKING_STATS_
3733 if (_words_scanned >= _words_scanned_limit) {
3734 ++_clock_due_to_scanning;
3735 }
3736 if (_refs_reached >= _refs_reached_limit) {
3737 ++_clock_due_to_marking;
3738 }
3740 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3741 _interval_start_time_ms = curr_time_ms;
3742 _all_clock_intervals_ms.add(last_interval_ms);
3744 if (_cm->verbose_medium()) {
3745 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3746 "scanned = %d%s, refs reached = %d%s",
3747 _worker_id, last_interval_ms,
3748 _words_scanned,
3749 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3750 _refs_reached,
3751 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3752 }
3753 #endif // _MARKING_STATS_
3755 // (4) We check whether we should yield. If we have to, then we abort.
3756 if (SuspendibleThreadSet::should_yield()) {
3757 // We should yield. To do this we abort the task. The caller is
3758 // responsible for yielding.
3759 set_has_aborted();
3760 statsOnly( ++_aborted_yield );
3761 return;
3762 }
3764 // (5) We check whether we've reached our time quota. If we have,
3765 // then we abort.
3766 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3767 if (elapsed_time_ms > _time_target_ms) {
3768 set_has_aborted();
3769 _has_timed_out = true;
3770 statsOnly( ++_aborted_timed_out );
3771 return;
3772 }
3774 // (6) Finally, we check whether there are enough completed STAB
3775 // buffers available for processing. If there are, we abort.
3776 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3777 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
3778 if (_cm->verbose_low()) {
3779 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
3780 _worker_id);
3781 }
3782 // we do need to process SATB buffers, we'll abort and restart
3783 // the marking task to do so
3784 set_has_aborted();
3785 statsOnly( ++_aborted_satb );
3786 return;
3787 }
3788 }
3790 void CMTask::recalculate_limits() {
3791 _real_words_scanned_limit = _words_scanned + words_scanned_period;
3792 _words_scanned_limit = _real_words_scanned_limit;
3794 _real_refs_reached_limit = _refs_reached + refs_reached_period;
3795 _refs_reached_limit = _real_refs_reached_limit;
3796 }
3798 void CMTask::decrease_limits() {
3799 // This is called when we believe that we're going to do an infrequent
3800 // operation which will increase the per byte scanned cost (i.e. move
3801 // entries to/from the global stack). It basically tries to decrease the
3802 // scanning limit so that the clock is called earlier.
3804 if (_cm->verbose_medium()) {
3805 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
3806 }
3808 _words_scanned_limit = _real_words_scanned_limit -
3809 3 * words_scanned_period / 4;
3810 _refs_reached_limit = _real_refs_reached_limit -
3811 3 * refs_reached_period / 4;
3812 }
3814 void CMTask::move_entries_to_global_stack() {
3815 // local array where we'll store the entries that will be popped
3816 // from the local queue
3817 oop buffer[global_stack_transfer_size];
3819 int n = 0;
3820 oop obj;
3821 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
3822 buffer[n] = obj;
3823 ++n;
3824 }
3826 if (n > 0) {
3827 // we popped at least one entry from the local queue
3829 statsOnly( ++_global_transfers_to; _local_pops += n );
3831 if (!_cm->mark_stack_push(buffer, n)) {
3832 if (_cm->verbose_low()) {
3833 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
3834 _worker_id);
3835 }
3836 set_has_aborted();
3837 } else {
3838 // the transfer was successful
3840 if (_cm->verbose_medium()) {
3841 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
3842 _worker_id, n);
3843 }
3844 statsOnly( int tmp_size = _cm->mark_stack_size();
3845 if (tmp_size > _global_max_size) {
3846 _global_max_size = tmp_size;
3847 }
3848 _global_pushes += n );
3849 }
3850 }
3852 // this operation was quite expensive, so decrease the limits
3853 decrease_limits();
3854 }
3856 void CMTask::get_entries_from_global_stack() {
3857 // local array where we'll store the entries that will be popped
3858 // from the global stack.
3859 oop buffer[global_stack_transfer_size];
3860 int n;
3861 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
3862 assert(n <= global_stack_transfer_size,
3863 "we should not pop more than the given limit");
3864 if (n > 0) {
3865 // yes, we did actually pop at least one entry
3867 statsOnly( ++_global_transfers_from; _global_pops += n );
3868 if (_cm->verbose_medium()) {
3869 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
3870 _worker_id, n);
3871 }
3872 for (int i = 0; i < n; ++i) {
3873 bool success = _task_queue->push(buffer[i]);
3874 // We only call this when the local queue is empty or under a
3875 // given target limit. So, we do not expect this push to fail.
3876 assert(success, "invariant");
3877 }
3879 statsOnly( int tmp_size = _task_queue->size();
3880 if (tmp_size > _local_max_size) {
3881 _local_max_size = tmp_size;
3882 }
3883 _local_pushes += n );
3884 }
3886 // this operation was quite expensive, so decrease the limits
3887 decrease_limits();
3888 }
3890 void CMTask::drain_local_queue(bool partially) {
3891 if (has_aborted()) return;
3893 // Decide what the target size is, depending whether we're going to
3894 // drain it partially (so that other tasks can steal if they run out
3895 // of things to do) or totally (at the very end).
3896 size_t target_size;
3897 if (partially) {
3898 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
3899 } else {
3900 target_size = 0;
3901 }
3903 if (_task_queue->size() > target_size) {
3904 if (_cm->verbose_high()) {
3905 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
3906 _worker_id, target_size);
3907 }
3909 oop obj;
3910 bool ret = _task_queue->pop_local(obj);
3911 while (ret) {
3912 statsOnly( ++_local_pops );
3914 if (_cm->verbose_high()) {
3915 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
3916 p2i((void*) obj));
3917 }
3919 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
3920 assert(!_g1h->is_on_master_free_list(
3921 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
3923 scan_object(obj);
3925 if (_task_queue->size() <= target_size || has_aborted()) {
3926 ret = false;
3927 } else {
3928 ret = _task_queue->pop_local(obj);
3929 }
3930 }
3932 if (_cm->verbose_high()) {
3933 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
3934 _worker_id, _task_queue->size());
3935 }
3936 }
3937 }
3939 void CMTask::drain_global_stack(bool partially) {
3940 if (has_aborted()) return;
3942 // We have a policy to drain the local queue before we attempt to
3943 // drain the global stack.
3944 assert(partially || _task_queue->size() == 0, "invariant");
3946 // Decide what the target size is, depending whether we're going to
3947 // drain it partially (so that other tasks can steal if they run out
3948 // of things to do) or totally (at the very end). Notice that,
3949 // because we move entries from the global stack in chunks or
3950 // because another task might be doing the same, we might in fact
3951 // drop below the target. But, this is not a problem.
3952 size_t target_size;
3953 if (partially) {
3954 target_size = _cm->partial_mark_stack_size_target();
3955 } else {
3956 target_size = 0;
3957 }
3959 if (_cm->mark_stack_size() > target_size) {
3960 if (_cm->verbose_low()) {
3961 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
3962 _worker_id, target_size);
3963 }
3965 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
3966 get_entries_from_global_stack();
3967 drain_local_queue(partially);
3968 }
3970 if (_cm->verbose_low()) {
3971 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
3972 _worker_id, _cm->mark_stack_size());
3973 }
3974 }
3975 }
3977 // SATB Queue has several assumptions on whether to call the par or
3978 // non-par versions of the methods. this is why some of the code is
3979 // replicated. We should really get rid of the single-threaded version
3980 // of the code to simplify things.
3981 void CMTask::drain_satb_buffers() {
3982 if (has_aborted()) return;
3984 // We set this so that the regular clock knows that we're in the
3985 // middle of draining buffers and doesn't set the abort flag when it
3986 // notices that SATB buffers are available for draining. It'd be
3987 // very counter productive if it did that. :-)
3988 _draining_satb_buffers = true;
3990 CMObjectClosure oc(this);
3991 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
3992 if (G1CollectedHeap::use_parallel_gc_threads()) {
3993 satb_mq_set.set_par_closure(_worker_id, &oc);
3994 } else {
3995 satb_mq_set.set_closure(&oc);
3996 }
3998 // This keeps claiming and applying the closure to completed buffers
3999 // until we run out of buffers or we need to abort.
4000 if (G1CollectedHeap::use_parallel_gc_threads()) {
4001 while (!has_aborted() &&
4002 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
4003 if (_cm->verbose_medium()) {
4004 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4005 }
4006 statsOnly( ++_satb_buffers_processed );
4007 regular_clock_call();
4008 }
4009 } else {
4010 while (!has_aborted() &&
4011 satb_mq_set.apply_closure_to_completed_buffer()) {
4012 if (_cm->verbose_medium()) {
4013 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4014 }
4015 statsOnly( ++_satb_buffers_processed );
4016 regular_clock_call();
4017 }
4018 }
4020 _draining_satb_buffers = false;
4022 assert(has_aborted() ||
4023 concurrent() ||
4024 satb_mq_set.completed_buffers_num() == 0, "invariant");
4026 if (G1CollectedHeap::use_parallel_gc_threads()) {
4027 satb_mq_set.set_par_closure(_worker_id, NULL);
4028 } else {
4029 satb_mq_set.set_closure(NULL);
4030 }
4032 // again, this was a potentially expensive operation, decrease the
4033 // limits to get the regular clock call early
4034 decrease_limits();
4035 }
4037 void CMTask::print_stats() {
4038 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
4039 _worker_id, _calls);
4040 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
4041 _elapsed_time_ms, _termination_time_ms);
4042 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4043 _step_times_ms.num(), _step_times_ms.avg(),
4044 _step_times_ms.sd());
4045 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4046 _step_times_ms.maximum(), _step_times_ms.sum());
4048 #if _MARKING_STATS_
4049 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
4050 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
4051 _all_clock_intervals_ms.sd());
4052 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
4053 _all_clock_intervals_ms.maximum(),
4054 _all_clock_intervals_ms.sum());
4055 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
4056 _clock_due_to_scanning, _clock_due_to_marking);
4057 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
4058 _objs_scanned, _objs_found_on_bitmap);
4059 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
4060 _local_pushes, _local_pops, _local_max_size);
4061 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
4062 _global_pushes, _global_pops, _global_max_size);
4063 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
4064 _global_transfers_to,_global_transfers_from);
4065 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
4066 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
4067 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
4068 _steal_attempts, _steals);
4069 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
4070 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
4071 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
4072 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
4073 _aborted_timed_out, _aborted_satb, _aborted_termination);
4074 #endif // _MARKING_STATS_
4075 }
4077 /*****************************************************************************
4079 The do_marking_step(time_target_ms, ...) method is the building
4080 block of the parallel marking framework. It can be called in parallel
4081 with other invocations of do_marking_step() on different tasks
4082 (but only one per task, obviously) and concurrently with the
4083 mutator threads, or during remark, hence it eliminates the need
4084 for two versions of the code. When called during remark, it will
4085 pick up from where the task left off during the concurrent marking
4086 phase. Interestingly, tasks are also claimable during evacuation
4087 pauses too, since do_marking_step() ensures that it aborts before
4088 it needs to yield.
4090 The data structures that it uses to do marking work are the
4091 following:
4093 (1) Marking Bitmap. If there are gray objects that appear only
4094 on the bitmap (this happens either when dealing with an overflow
4095 or when the initial marking phase has simply marked the roots
4096 and didn't push them on the stack), then tasks claim heap
4097 regions whose bitmap they then scan to find gray objects. A
4098 global finger indicates where the end of the last claimed region
4099 is. A local finger indicates how far into the region a task has
4100 scanned. The two fingers are used to determine how to gray an
4101 object (i.e. whether simply marking it is OK, as it will be
4102 visited by a task in the future, or whether it needs to be also
4103 pushed on a stack).
4105 (2) Local Queue. The local queue of the task which is accessed
4106 reasonably efficiently by the task. Other tasks can steal from
4107 it when they run out of work. Throughout the marking phase, a
4108 task attempts to keep its local queue short but not totally
4109 empty, so that entries are available for stealing by other
4110 tasks. Only when there is no more work, a task will totally
4111 drain its local queue.
4113 (3) Global Mark Stack. This handles local queue overflow. During
4114 marking only sets of entries are moved between it and the local
4115 queues, as access to it requires a mutex and more fine-grain
4116 interaction with it which might cause contention. If it
4117 overflows, then the marking phase should restart and iterate
4118 over the bitmap to identify gray objects. Throughout the marking
4119 phase, tasks attempt to keep the global mark stack at a small
4120 length but not totally empty, so that entries are available for
4121 popping by other tasks. Only when there is no more work, tasks
4122 will totally drain the global mark stack.
4124 (4) SATB Buffer Queue. This is where completed SATB buffers are
4125 made available. Buffers are regularly removed from this queue
4126 and scanned for roots, so that the queue doesn't get too
4127 long. During remark, all completed buffers are processed, as
4128 well as the filled in parts of any uncompleted buffers.
4130 The do_marking_step() method tries to abort when the time target
4131 has been reached. There are a few other cases when the
4132 do_marking_step() method also aborts:
4134 (1) When the marking phase has been aborted (after a Full GC).
4136 (2) When a global overflow (on the global stack) has been
4137 triggered. Before the task aborts, it will actually sync up with
4138 the other tasks to ensure that all the marking data structures
4139 (local queues, stacks, fingers etc.) are re-initialized so that
4140 when do_marking_step() completes, the marking phase can
4141 immediately restart.
4143 (3) When enough completed SATB buffers are available. The
4144 do_marking_step() method only tries to drain SATB buffers right
4145 at the beginning. So, if enough buffers are available, the
4146 marking step aborts and the SATB buffers are processed at
4147 the beginning of the next invocation.
4149 (4) To yield. when we have to yield then we abort and yield
4150 right at the end of do_marking_step(). This saves us from a lot
4151 of hassle as, by yielding we might allow a Full GC. If this
4152 happens then objects will be compacted underneath our feet, the
4153 heap might shrink, etc. We save checking for this by just
4154 aborting and doing the yield right at the end.
4156 From the above it follows that the do_marking_step() method should
4157 be called in a loop (or, otherwise, regularly) until it completes.
4159 If a marking step completes without its has_aborted() flag being
4160 true, it means it has completed the current marking phase (and
4161 also all other marking tasks have done so and have all synced up).
4163 A method called regular_clock_call() is invoked "regularly" (in
4164 sub ms intervals) throughout marking. It is this clock method that
4165 checks all the abort conditions which were mentioned above and
4166 decides when the task should abort. A work-based scheme is used to
4167 trigger this clock method: when the number of object words the
4168 marking phase has scanned or the number of references the marking
4169 phase has visited reach a given limit. Additional invocations to
4170 the method clock have been planted in a few other strategic places
4171 too. The initial reason for the clock method was to avoid calling
4172 vtime too regularly, as it is quite expensive. So, once it was in
4173 place, it was natural to piggy-back all the other conditions on it
4174 too and not constantly check them throughout the code.
4176 If do_termination is true then do_marking_step will enter its
4177 termination protocol.
4179 The value of is_serial must be true when do_marking_step is being
4180 called serially (i.e. by the VMThread) and do_marking_step should
4181 skip any synchronization in the termination and overflow code.
4182 Examples include the serial remark code and the serial reference
4183 processing closures.
4185 The value of is_serial must be false when do_marking_step is
4186 being called by any of the worker threads in a work gang.
4187 Examples include the concurrent marking code (CMMarkingTask),
4188 the MT remark code, and the MT reference processing closures.
4190 *****************************************************************************/
4192 void CMTask::do_marking_step(double time_target_ms,
4193 bool do_termination,
4194 bool is_serial) {
4195 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
4196 assert(concurrent() == _cm->concurrent(), "they should be the same");
4198 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
4199 assert(_task_queues != NULL, "invariant");
4200 assert(_task_queue != NULL, "invariant");
4201 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
4203 assert(!_claimed,
4204 "only one thread should claim this task at any one time");
4206 // OK, this doesn't safeguard again all possible scenarios, as it is
4207 // possible for two threads to set the _claimed flag at the same
4208 // time. But it is only for debugging purposes anyway and it will
4209 // catch most problems.
4210 _claimed = true;
4212 _start_time_ms = os::elapsedVTime() * 1000.0;
4213 statsOnly( _interval_start_time_ms = _start_time_ms );
4215 // If do_stealing is true then do_marking_step will attempt to
4216 // steal work from the other CMTasks. It only makes sense to
4217 // enable stealing when the termination protocol is enabled
4218 // and do_marking_step() is not being called serially.
4219 bool do_stealing = do_termination && !is_serial;
4221 double diff_prediction_ms =
4222 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
4223 _time_target_ms = time_target_ms - diff_prediction_ms;
4225 // set up the variables that are used in the work-based scheme to
4226 // call the regular clock method
4227 _words_scanned = 0;
4228 _refs_reached = 0;
4229 recalculate_limits();
4231 // clear all flags
4232 clear_has_aborted();
4233 _has_timed_out = false;
4234 _draining_satb_buffers = false;
4236 ++_calls;
4238 if (_cm->verbose_low()) {
4239 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
4240 "target = %1.2lfms >>>>>>>>>>",
4241 _worker_id, _calls, _time_target_ms);
4242 }
4244 // Set up the bitmap and oop closures. Anything that uses them is
4245 // eventually called from this method, so it is OK to allocate these
4246 // statically.
4247 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
4248 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
4249 set_cm_oop_closure(&cm_oop_closure);
4251 if (_cm->has_overflown()) {
4252 // This can happen if the mark stack overflows during a GC pause
4253 // and this task, after a yield point, restarts. We have to abort
4254 // as we need to get into the overflow protocol which happens
4255 // right at the end of this task.
4256 set_has_aborted();
4257 }
4259 // First drain any available SATB buffers. After this, we will not
4260 // look at SATB buffers before the next invocation of this method.
4261 // If enough completed SATB buffers are queued up, the regular clock
4262 // will abort this task so that it restarts.
4263 drain_satb_buffers();
4264 // ...then partially drain the local queue and the global stack
4265 drain_local_queue(true);
4266 drain_global_stack(true);
4268 do {
4269 if (!has_aborted() && _curr_region != NULL) {
4270 // This means that we're already holding on to a region.
4271 assert(_finger != NULL, "if region is not NULL, then the finger "
4272 "should not be NULL either");
4274 // We might have restarted this task after an evacuation pause
4275 // which might have evacuated the region we're holding on to
4276 // underneath our feet. Let's read its limit again to make sure
4277 // that we do not iterate over a region of the heap that
4278 // contains garbage (update_region_limit() will also move
4279 // _finger to the start of the region if it is found empty).
4280 update_region_limit();
4281 // We will start from _finger not from the start of the region,
4282 // as we might be restarting this task after aborting half-way
4283 // through scanning this region. In this case, _finger points to
4284 // the address where we last found a marked object. If this is a
4285 // fresh region, _finger points to start().
4286 MemRegion mr = MemRegion(_finger, _region_limit);
4288 if (_cm->verbose_low()) {
4289 gclog_or_tty->print_cr("[%u] we're scanning part "
4290 "["PTR_FORMAT", "PTR_FORMAT") "
4291 "of region "HR_FORMAT,
4292 _worker_id, p2i(_finger), p2i(_region_limit),
4293 HR_FORMAT_PARAMS(_curr_region));
4294 }
4296 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4297 "humongous regions should go around loop once only");
4299 // Some special cases:
4300 // If the memory region is empty, we can just give up the region.
4301 // If the current region is humongous then we only need to check
4302 // the bitmap for the bit associated with the start of the object,
4303 // scan the object if it's live, and give up the region.
4304 // Otherwise, let's iterate over the bitmap of the part of the region
4305 // that is left.
4306 // If the iteration is successful, give up the region.
4307 if (mr.is_empty()) {
4308 giveup_current_region();
4309 regular_clock_call();
4310 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4311 if (_nextMarkBitMap->isMarked(mr.start())) {
4312 // The object is marked - apply the closure
4313 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4314 bitmap_closure.do_bit(offset);
4315 }
4316 // Even if this task aborted while scanning the humongous object
4317 // we can (and should) give up the current region.
4318 giveup_current_region();
4319 regular_clock_call();
4320 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4321 giveup_current_region();
4322 regular_clock_call();
4323 } else {
4324 assert(has_aborted(), "currently the only way to do so");
4325 // The only way to abort the bitmap iteration is to return
4326 // false from the do_bit() method. However, inside the
4327 // do_bit() method we move the _finger to point to the
4328 // object currently being looked at. So, if we bail out, we
4329 // have definitely set _finger to something non-null.
4330 assert(_finger != NULL, "invariant");
4332 // Region iteration was actually aborted. So now _finger
4333 // points to the address of the object we last scanned. If we
4334 // leave it there, when we restart this task, we will rescan
4335 // the object. It is easy to avoid this. We move the finger by
4336 // enough to point to the next possible object header (the
4337 // bitmap knows by how much we need to move it as it knows its
4338 // granularity).
4339 assert(_finger < _region_limit, "invariant");
4340 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
4341 // Check if bitmap iteration was aborted while scanning the last object
4342 if (new_finger >= _region_limit) {
4343 giveup_current_region();
4344 } else {
4345 move_finger_to(new_finger);
4346 }
4347 }
4348 }
4349 // At this point we have either completed iterating over the
4350 // region we were holding on to, or we have aborted.
4352 // We then partially drain the local queue and the global stack.
4353 // (Do we really need this?)
4354 drain_local_queue(true);
4355 drain_global_stack(true);
4357 // Read the note on the claim_region() method on why it might
4358 // return NULL with potentially more regions available for
4359 // claiming and why we have to check out_of_regions() to determine
4360 // whether we're done or not.
4361 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
4362 // We are going to try to claim a new region. We should have
4363 // given up on the previous one.
4364 // Separated the asserts so that we know which one fires.
4365 assert(_curr_region == NULL, "invariant");
4366 assert(_finger == NULL, "invariant");
4367 assert(_region_limit == NULL, "invariant");
4368 if (_cm->verbose_low()) {
4369 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
4370 }
4371 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
4372 if (claimed_region != NULL) {
4373 // Yes, we managed to claim one
4374 statsOnly( ++_regions_claimed );
4376 if (_cm->verbose_low()) {
4377 gclog_or_tty->print_cr("[%u] we successfully claimed "
4378 "region "PTR_FORMAT,
4379 _worker_id, p2i(claimed_region));
4380 }
4382 setup_for_region(claimed_region);
4383 assert(_curr_region == claimed_region, "invariant");
4384 }
4385 // It is important to call the regular clock here. It might take
4386 // a while to claim a region if, for example, we hit a large
4387 // block of empty regions. So we need to call the regular clock
4388 // method once round the loop to make sure it's called
4389 // frequently enough.
4390 regular_clock_call();
4391 }
4393 if (!has_aborted() && _curr_region == NULL) {
4394 assert(_cm->out_of_regions(),
4395 "at this point we should be out of regions");
4396 }
4397 } while ( _curr_region != NULL && !has_aborted());
4399 if (!has_aborted()) {
4400 // We cannot check whether the global stack is empty, since other
4401 // tasks might be pushing objects to it concurrently.
4402 assert(_cm->out_of_regions(),
4403 "at this point we should be out of regions");
4405 if (_cm->verbose_low()) {
4406 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
4407 }
4409 // Try to reduce the number of available SATB buffers so that
4410 // remark has less work to do.
4411 drain_satb_buffers();
4412 }
4414 // Since we've done everything else, we can now totally drain the
4415 // local queue and global stack.
4416 drain_local_queue(false);
4417 drain_global_stack(false);
4419 // Attempt at work stealing from other task's queues.
4420 if (do_stealing && !has_aborted()) {
4421 // We have not aborted. This means that we have finished all that
4422 // we could. Let's try to do some stealing...
4424 // We cannot check whether the global stack is empty, since other
4425 // tasks might be pushing objects to it concurrently.
4426 assert(_cm->out_of_regions() && _task_queue->size() == 0,
4427 "only way to reach here");
4429 if (_cm->verbose_low()) {
4430 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
4431 }
4433 while (!has_aborted()) {
4434 oop obj;
4435 statsOnly( ++_steal_attempts );
4437 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
4438 if (_cm->verbose_medium()) {
4439 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
4440 _worker_id, p2i((void*) obj));
4441 }
4443 statsOnly( ++_steals );
4445 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
4446 "any stolen object should be marked");
4447 scan_object(obj);
4449 // And since we're towards the end, let's totally drain the
4450 // local queue and global stack.
4451 drain_local_queue(false);
4452 drain_global_stack(false);
4453 } else {
4454 break;
4455 }
4456 }
4457 }
4459 // If we are about to wrap up and go into termination, check if we
4460 // should raise the overflow flag.
4461 if (do_termination && !has_aborted()) {
4462 if (_cm->force_overflow()->should_force()) {
4463 _cm->set_has_overflown();
4464 regular_clock_call();
4465 }
4466 }
4468 // We still haven't aborted. Now, let's try to get into the
4469 // termination protocol.
4470 if (do_termination && !has_aborted()) {
4471 // We cannot check whether the global stack is empty, since other
4472 // tasks might be concurrently pushing objects on it.
4473 // Separated the asserts so that we know which one fires.
4474 assert(_cm->out_of_regions(), "only way to reach here");
4475 assert(_task_queue->size() == 0, "only way to reach here");
4477 if (_cm->verbose_low()) {
4478 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
4479 }
4481 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
4483 // The CMTask class also extends the TerminatorTerminator class,
4484 // hence its should_exit_termination() method will also decide
4485 // whether to exit the termination protocol or not.
4486 bool finished = (is_serial ||
4487 _cm->terminator()->offer_termination(this));
4488 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
4489 _termination_time_ms +=
4490 termination_end_time_ms - _termination_start_time_ms;
4492 if (finished) {
4493 // We're all done.
4495 if (_worker_id == 0) {
4496 // let's allow task 0 to do this
4497 if (concurrent()) {
4498 assert(_cm->concurrent_marking_in_progress(), "invariant");
4499 // we need to set this to false before the next
4500 // safepoint. This way we ensure that the marking phase
4501 // doesn't observe any more heap expansions.
4502 _cm->clear_concurrent_marking_in_progress();
4503 }
4504 }
4506 // We can now guarantee that the global stack is empty, since
4507 // all other tasks have finished. We separated the guarantees so
4508 // that, if a condition is false, we can immediately find out
4509 // which one.
4510 guarantee(_cm->out_of_regions(), "only way to reach here");
4511 guarantee(_cm->mark_stack_empty(), "only way to reach here");
4512 guarantee(_task_queue->size() == 0, "only way to reach here");
4513 guarantee(!_cm->has_overflown(), "only way to reach here");
4514 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
4516 if (_cm->verbose_low()) {
4517 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
4518 }
4519 } else {
4520 // Apparently there's more work to do. Let's abort this task. It
4521 // will restart it and we can hopefully find more things to do.
4523 if (_cm->verbose_low()) {
4524 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
4525 _worker_id);
4526 }
4528 set_has_aborted();
4529 statsOnly( ++_aborted_termination );
4530 }
4531 }
4533 // Mainly for debugging purposes to make sure that a pointer to the
4534 // closure which was statically allocated in this frame doesn't
4535 // escape it by accident.
4536 set_cm_oop_closure(NULL);
4537 double end_time_ms = os::elapsedVTime() * 1000.0;
4538 double elapsed_time_ms = end_time_ms - _start_time_ms;
4539 // Update the step history.
4540 _step_times_ms.add(elapsed_time_ms);
4542 if (has_aborted()) {
4543 // The task was aborted for some reason.
4545 statsOnly( ++_aborted );
4547 if (_has_timed_out) {
4548 double diff_ms = elapsed_time_ms - _time_target_ms;
4549 // Keep statistics of how well we did with respect to hitting
4550 // our target only if we actually timed out (if we aborted for
4551 // other reasons, then the results might get skewed).
4552 _marking_step_diffs_ms.add(diff_ms);
4553 }
4555 if (_cm->has_overflown()) {
4556 // This is the interesting one. We aborted because a global
4557 // overflow was raised. This means we have to restart the
4558 // marking phase and start iterating over regions. However, in
4559 // order to do this we have to make sure that all tasks stop
4560 // what they are doing and re-initialise in a safe manner. We
4561 // will achieve this with the use of two barrier sync points.
4563 if (_cm->verbose_low()) {
4564 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4565 }
4567 if (!is_serial) {
4568 // We only need to enter the sync barrier if being called
4569 // from a parallel context
4570 _cm->enter_first_sync_barrier(_worker_id);
4572 // When we exit this sync barrier we know that all tasks have
4573 // stopped doing marking work. So, it's now safe to
4574 // re-initialise our data structures. At the end of this method,
4575 // task 0 will clear the global data structures.
4576 }
4578 statsOnly( ++_aborted_overflow );
4580 // We clear the local state of this task...
4581 clear_region_fields();
4583 if (!is_serial) {
4584 // ...and enter the second barrier.
4585 _cm->enter_second_sync_barrier(_worker_id);
4586 }
4587 // At this point, if we're during the concurrent phase of
4588 // marking, everything has been re-initialized and we're
4589 // ready to restart.
4590 }
4592 if (_cm->verbose_low()) {
4593 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
4594 "elapsed = %1.2lfms <<<<<<<<<<",
4595 _worker_id, _time_target_ms, elapsed_time_ms);
4596 if (_cm->has_aborted()) {
4597 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
4598 _worker_id);
4599 }
4600 }
4601 } else {
4602 if (_cm->verbose_low()) {
4603 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
4604 "elapsed = %1.2lfms <<<<<<<<<<",
4605 _worker_id, _time_target_ms, elapsed_time_ms);
4606 }
4607 }
4609 _claimed = false;
4610 }
4612 CMTask::CMTask(uint worker_id,
4613 ConcurrentMark* cm,
4614 size_t* marked_bytes,
4615 BitMap* card_bm,
4616 CMTaskQueue* task_queue,
4617 CMTaskQueueSet* task_queues)
4618 : _g1h(G1CollectedHeap::heap()),
4619 _worker_id(worker_id), _cm(cm),
4620 _claimed(false),
4621 _nextMarkBitMap(NULL), _hash_seed(17),
4622 _task_queue(task_queue),
4623 _task_queues(task_queues),
4624 _cm_oop_closure(NULL),
4625 _marked_bytes_array(marked_bytes),
4626 _card_bm(card_bm) {
4627 guarantee(task_queue != NULL, "invariant");
4628 guarantee(task_queues != NULL, "invariant");
4630 statsOnly( _clock_due_to_scanning = 0;
4631 _clock_due_to_marking = 0 );
4633 _marking_step_diffs_ms.add(0.5);
4634 }
4636 // These are formatting macros that are used below to ensure
4637 // consistent formatting. The *_H_* versions are used to format the
4638 // header for a particular value and they should be kept consistent
4639 // with the corresponding macro. Also note that most of the macros add
4640 // the necessary white space (as a prefix) which makes them a bit
4641 // easier to compose.
4643 // All the output lines are prefixed with this string to be able to
4644 // identify them easily in a large log file.
4645 #define G1PPRL_LINE_PREFIX "###"
4647 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
4648 #ifdef _LP64
4649 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
4650 #else // _LP64
4651 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
4652 #endif // _LP64
4654 // For per-region info
4655 #define G1PPRL_TYPE_FORMAT " %-4s"
4656 #define G1PPRL_TYPE_H_FORMAT " %4s"
4657 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
4658 #define G1PPRL_BYTE_H_FORMAT " %9s"
4659 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
4660 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
4662 // For summary info
4663 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
4664 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
4665 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
4666 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
4668 G1PrintRegionLivenessInfoClosure::
4669 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
4670 : _out(out),
4671 _total_used_bytes(0), _total_capacity_bytes(0),
4672 _total_prev_live_bytes(0), _total_next_live_bytes(0),
4673 _hum_used_bytes(0), _hum_capacity_bytes(0),
4674 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
4675 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
4676 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4677 MemRegion g1_committed = g1h->g1_committed();
4678 MemRegion g1_reserved = g1h->g1_reserved();
4679 double now = os::elapsedTime();
4681 // Print the header of the output.
4682 _out->cr();
4683 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
4684 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
4685 G1PPRL_SUM_ADDR_FORMAT("committed")
4686 G1PPRL_SUM_ADDR_FORMAT("reserved")
4687 G1PPRL_SUM_BYTE_FORMAT("region-size"),
4688 p2i(g1_committed.start()), p2i(g1_committed.end()),
4689 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
4690 HeapRegion::GrainBytes);
4691 _out->print_cr(G1PPRL_LINE_PREFIX);
4692 _out->print_cr(G1PPRL_LINE_PREFIX
4693 G1PPRL_TYPE_H_FORMAT
4694 G1PPRL_ADDR_BASE_H_FORMAT
4695 G1PPRL_BYTE_H_FORMAT
4696 G1PPRL_BYTE_H_FORMAT
4697 G1PPRL_BYTE_H_FORMAT
4698 G1PPRL_DOUBLE_H_FORMAT
4699 G1PPRL_BYTE_H_FORMAT
4700 G1PPRL_BYTE_H_FORMAT,
4701 "type", "address-range",
4702 "used", "prev-live", "next-live", "gc-eff",
4703 "remset", "code-roots");
4704 _out->print_cr(G1PPRL_LINE_PREFIX
4705 G1PPRL_TYPE_H_FORMAT
4706 G1PPRL_ADDR_BASE_H_FORMAT
4707 G1PPRL_BYTE_H_FORMAT
4708 G1PPRL_BYTE_H_FORMAT
4709 G1PPRL_BYTE_H_FORMAT
4710 G1PPRL_DOUBLE_H_FORMAT
4711 G1PPRL_BYTE_H_FORMAT
4712 G1PPRL_BYTE_H_FORMAT,
4713 "", "",
4714 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
4715 "(bytes)", "(bytes)");
4716 }
4718 // It takes as a parameter a reference to one of the _hum_* fields, it
4719 // deduces the corresponding value for a region in a humongous region
4720 // series (either the region size, or what's left if the _hum_* field
4721 // is < the region size), and updates the _hum_* field accordingly.
4722 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
4723 size_t bytes = 0;
4724 // The > 0 check is to deal with the prev and next live bytes which
4725 // could be 0.
4726 if (*hum_bytes > 0) {
4727 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
4728 *hum_bytes -= bytes;
4729 }
4730 return bytes;
4731 }
4733 // It deduces the values for a region in a humongous region series
4734 // from the _hum_* fields and updates those accordingly. It assumes
4735 // that that _hum_* fields have already been set up from the "starts
4736 // humongous" region and we visit the regions in address order.
4737 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
4738 size_t* capacity_bytes,
4739 size_t* prev_live_bytes,
4740 size_t* next_live_bytes) {
4741 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
4742 *used_bytes = get_hum_bytes(&_hum_used_bytes);
4743 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
4744 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
4745 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
4746 }
4748 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
4749 const char* type = "";
4750 HeapWord* bottom = r->bottom();
4751 HeapWord* end = r->end();
4752 size_t capacity_bytes = r->capacity();
4753 size_t used_bytes = r->used();
4754 size_t prev_live_bytes = r->live_bytes();
4755 size_t next_live_bytes = r->next_live_bytes();
4756 double gc_eff = r->gc_efficiency();
4757 size_t remset_bytes = r->rem_set()->mem_size();
4758 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
4760 if (r->used() == 0) {
4761 type = "FREE";
4762 } else if (r->is_survivor()) {
4763 type = "SURV";
4764 } else if (r->is_young()) {
4765 type = "EDEN";
4766 } else if (r->startsHumongous()) {
4767 type = "HUMS";
4769 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
4770 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
4771 "they should have been zeroed after the last time we used them");
4772 // Set up the _hum_* fields.
4773 _hum_capacity_bytes = capacity_bytes;
4774 _hum_used_bytes = used_bytes;
4775 _hum_prev_live_bytes = prev_live_bytes;
4776 _hum_next_live_bytes = next_live_bytes;
4777 get_hum_bytes(&used_bytes, &capacity_bytes,
4778 &prev_live_bytes, &next_live_bytes);
4779 end = bottom + HeapRegion::GrainWords;
4780 } else if (r->continuesHumongous()) {
4781 type = "HUMC";
4782 get_hum_bytes(&used_bytes, &capacity_bytes,
4783 &prev_live_bytes, &next_live_bytes);
4784 assert(end == bottom + HeapRegion::GrainWords, "invariant");
4785 } else {
4786 type = "OLD";
4787 }
4789 _total_used_bytes += used_bytes;
4790 _total_capacity_bytes += capacity_bytes;
4791 _total_prev_live_bytes += prev_live_bytes;
4792 _total_next_live_bytes += next_live_bytes;
4793 _total_remset_bytes += remset_bytes;
4794 _total_strong_code_roots_bytes += strong_code_roots_bytes;
4796 // Print a line for this particular region.
4797 _out->print_cr(G1PPRL_LINE_PREFIX
4798 G1PPRL_TYPE_FORMAT
4799 G1PPRL_ADDR_BASE_FORMAT
4800 G1PPRL_BYTE_FORMAT
4801 G1PPRL_BYTE_FORMAT
4802 G1PPRL_BYTE_FORMAT
4803 G1PPRL_DOUBLE_FORMAT
4804 G1PPRL_BYTE_FORMAT
4805 G1PPRL_BYTE_FORMAT,
4806 type, p2i(bottom), p2i(end),
4807 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
4808 remset_bytes, strong_code_roots_bytes);
4810 return false;
4811 }
4813 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
4814 // add static memory usages to remembered set sizes
4815 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
4816 // Print the footer of the output.
4817 _out->print_cr(G1PPRL_LINE_PREFIX);
4818 _out->print_cr(G1PPRL_LINE_PREFIX
4819 " SUMMARY"
4820 G1PPRL_SUM_MB_FORMAT("capacity")
4821 G1PPRL_SUM_MB_PERC_FORMAT("used")
4822 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
4823 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
4824 G1PPRL_SUM_MB_FORMAT("remset")
4825 G1PPRL_SUM_MB_FORMAT("code-roots"),
4826 bytes_to_mb(_total_capacity_bytes),
4827 bytes_to_mb(_total_used_bytes),
4828 perc(_total_used_bytes, _total_capacity_bytes),
4829 bytes_to_mb(_total_prev_live_bytes),
4830 perc(_total_prev_live_bytes, _total_capacity_bytes),
4831 bytes_to_mb(_total_next_live_bytes),
4832 perc(_total_next_live_bytes, _total_capacity_bytes),
4833 bytes_to_mb(_total_remset_bytes),
4834 bytes_to_mb(_total_strong_code_roots_bytes));
4835 _out->cr();
4836 }