src/share/vm/gc_implementation/g1/concurrentMark.cpp

Tue, 19 Aug 2014 02:05:49 -0700

author
poonam
date
Tue, 19 Aug 2014 02:05:49 -0700
changeset 7452
e2ed74d2e054
parent 6693
8a140676873f
child 6876
710a3c8b516e
child 6904
0982ec23da03
permissions
-rw-r--r--

8044406: JVM crash with JDK8 (build 1.8.0-b132) with G1 GC
Summary: Fill the last card that has been allocated into with a dummy object
Reviewed-by: tschatzl, mgerdin

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
tonyp@2968 27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
brutisso@3710 32 #include "gc_implementation/g1/g1Log.hpp"
tonyp@2968 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/g1RemSet.hpp"
tonyp@3416 35 #include "gc_implementation/g1/heapRegion.inline.hpp"
stefank@2314 36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
kamg@2445 38 #include "gc_implementation/shared/vmGCOperations.hpp"
sla@5237 39 #include "gc_implementation/shared/gcTimer.hpp"
sla@5237 40 #include "gc_implementation/shared/gcTrace.hpp"
sla@5237 41 #include "gc_implementation/shared/gcTraceTime.hpp"
stefank@2314 42 #include "memory/genOopClosures.inline.hpp"
stefank@2314 43 #include "memory/referencePolicy.hpp"
stefank@2314 44 #include "memory/resourceArea.hpp"
stefank@2314 45 #include "oops/oop.inline.hpp"
stefank@2314 46 #include "runtime/handles.inline.hpp"
stefank@2314 47 #include "runtime/java.hpp"
zgu@3900 48 #include "services/memTracker.hpp"
ysr@777 49
brutisso@3455 50 // Concurrent marking bit map wrapper
ysr@777 51
johnc@4333 52 CMBitMapRO::CMBitMapRO(int shifter) :
johnc@4333 53 _bm(),
ysr@777 54 _shifter(shifter) {
johnc@4333 55 _bmStartWord = 0;
johnc@4333 56 _bmWordSize = 0;
ysr@777 57 }
ysr@777 58
ysr@777 59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
ysr@777 60 HeapWord* limit) const {
ysr@777 61 // First we must round addr *up* to a possible object boundary.
ysr@777 62 addr = (HeapWord*)align_size_up((intptr_t)addr,
ysr@777 63 HeapWordSize << _shifter);
ysr@777 64 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 65 if (limit == NULL) {
tonyp@2973 66 limit = _bmStartWord + _bmWordSize;
tonyp@2973 67 }
ysr@777 68 size_t limitOffset = heapWordToOffset(limit);
ysr@777 69 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
ysr@777 70 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 71 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 72 assert(nextAddr == limit || isMarked(nextAddr),
ysr@777 73 "get_next_one postcondition");
ysr@777 74 return nextAddr;
ysr@777 75 }
ysr@777 76
ysr@777 77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
ysr@777 78 HeapWord* limit) const {
ysr@777 79 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 80 if (limit == NULL) {
tonyp@2973 81 limit = _bmStartWord + _bmWordSize;
tonyp@2973 82 }
ysr@777 83 size_t limitOffset = heapWordToOffset(limit);
ysr@777 84 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
ysr@777 85 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 86 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 87 assert(nextAddr == limit || !isMarked(nextAddr),
ysr@777 88 "get_next_one postcondition");
ysr@777 89 return nextAddr;
ysr@777 90 }
ysr@777 91
ysr@777 92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
ysr@777 93 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
ysr@777 94 return (int) (diff >> _shifter);
ysr@777 95 }
ysr@777 96
ysr@777 97 #ifndef PRODUCT
johnc@4333 98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
ysr@777 99 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
brutisso@4061 100 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
ysr@777 101 "size inconsistency");
johnc@4333 102 return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
johnc@4333 103 _bmWordSize == heap_rs.size()>>LogHeapWordSize;
ysr@777 104 }
ysr@777 105 #endif
ysr@777 106
stefank@4904 107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
stefank@4904 108 _bm.print_on_error(st, prefix);
stefank@4904 109 }
stefank@4904 110
johnc@4333 111 bool CMBitMap::allocate(ReservedSpace heap_rs) {
johnc@4333 112 _bmStartWord = (HeapWord*)(heap_rs.base());
johnc@4333 113 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
johnc@4333 114 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
johnc@4333 115 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
johnc@4333 116 if (!brs.is_reserved()) {
johnc@4333 117 warning("ConcurrentMark marking bit map allocation failure");
johnc@4333 118 return false;
johnc@4333 119 }
johnc@4333 120 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
johnc@4333 121 // For now we'll just commit all of the bit map up front.
johnc@4333 122 // Later on we'll try to be more parsimonious with swap.
johnc@4333 123 if (!_virtual_space.initialize(brs, brs.size())) {
johnc@4333 124 warning("ConcurrentMark marking bit map backing store failure");
johnc@4333 125 return false;
johnc@4333 126 }
johnc@4333 127 assert(_virtual_space.committed_size() == brs.size(),
johnc@4333 128 "didn't reserve backing store for all of concurrent marking bit map?");
johnc@4333 129 _bm.set_map((uintptr_t*)_virtual_space.low());
johnc@4333 130 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
johnc@4333 131 _bmWordSize, "inconsistency in bit map sizing");
johnc@4333 132 _bm.set_size(_bmWordSize >> _shifter);
johnc@4333 133 return true;
johnc@4333 134 }
johnc@4333 135
ysr@777 136 void CMBitMap::clearAll() {
ysr@777 137 _bm.clear();
ysr@777 138 return;
ysr@777 139 }
ysr@777 140
ysr@777 141 void CMBitMap::markRange(MemRegion mr) {
ysr@777 142 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 143 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 144 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
ysr@777 145 ((HeapWord *) mr.end())),
ysr@777 146 "markRange memory region end is not card aligned");
ysr@777 147 // convert address range into offset range
ysr@777 148 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 149 heapWordToOffset(mr.end()), true);
ysr@777 150 }
ysr@777 151
ysr@777 152 void CMBitMap::clearRange(MemRegion mr) {
ysr@777 153 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 154 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 155 // convert address range into offset range
ysr@777 156 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 157 heapWordToOffset(mr.end()), false);
ysr@777 158 }
ysr@777 159
ysr@777 160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
ysr@777 161 HeapWord* end_addr) {
ysr@777 162 HeapWord* start = getNextMarkedWordAddress(addr);
ysr@777 163 start = MIN2(start, end_addr);
ysr@777 164 HeapWord* end = getNextUnmarkedWordAddress(start);
ysr@777 165 end = MIN2(end, end_addr);
ysr@777 166 assert(start <= end, "Consistency check");
ysr@777 167 MemRegion mr(start, end);
ysr@777 168 if (!mr.is_empty()) {
ysr@777 169 clearRange(mr);
ysr@777 170 }
ysr@777 171 return mr;
ysr@777 172 }
ysr@777 173
ysr@777 174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
ysr@777 175 _base(NULL), _cm(cm)
ysr@777 176 #ifdef ASSERT
ysr@777 177 , _drain_in_progress(false)
ysr@777 178 , _drain_in_progress_yields(false)
ysr@777 179 #endif
ysr@777 180 {}
ysr@777 181
johnc@4333 182 bool CMMarkStack::allocate(size_t capacity) {
johnc@4333 183 // allocate a stack of the requisite depth
johnc@4333 184 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
johnc@4333 185 if (!rs.is_reserved()) {
johnc@4333 186 warning("ConcurrentMark MarkStack allocation failure");
johnc@4333 187 return false;
tonyp@2973 188 }
johnc@4333 189 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
johnc@4333 190 if (!_virtual_space.initialize(rs, rs.size())) {
johnc@4333 191 warning("ConcurrentMark MarkStack backing store failure");
johnc@4333 192 // Release the virtual memory reserved for the marking stack
johnc@4333 193 rs.release();
johnc@4333 194 return false;
johnc@4333 195 }
johnc@4333 196 assert(_virtual_space.committed_size() == rs.size(),
johnc@4333 197 "Didn't reserve backing store for all of ConcurrentMark stack?");
johnc@4333 198 _base = (oop*) _virtual_space.low();
johnc@4333 199 setEmpty();
johnc@4333 200 _capacity = (jint) capacity;
tonyp@3416 201 _saved_index = -1;
johnc@4386 202 _should_expand = false;
ysr@777 203 NOT_PRODUCT(_max_depth = 0);
johnc@4333 204 return true;
johnc@4333 205 }
johnc@4333 206
johnc@4333 207 void CMMarkStack::expand() {
johnc@4333 208 // Called, during remark, if we've overflown the marking stack during marking.
johnc@4333 209 assert(isEmpty(), "stack should been emptied while handling overflow");
johnc@4333 210 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
johnc@4333 211 // Clear expansion flag
johnc@4333 212 _should_expand = false;
johnc@4333 213 if (_capacity == (jint) MarkStackSizeMax) {
johnc@4333 214 if (PrintGCDetails && Verbose) {
johnc@4333 215 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
johnc@4333 216 }
johnc@4333 217 return;
johnc@4333 218 }
johnc@4333 219 // Double capacity if possible
johnc@4333 220 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
johnc@4333 221 // Do not give up existing stack until we have managed to
johnc@4333 222 // get the double capacity that we desired.
johnc@4333 223 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
johnc@4333 224 sizeof(oop)));
johnc@4333 225 if (rs.is_reserved()) {
johnc@4333 226 // Release the backing store associated with old stack
johnc@4333 227 _virtual_space.release();
johnc@4333 228 // Reinitialize virtual space for new stack
johnc@4333 229 if (!_virtual_space.initialize(rs, rs.size())) {
johnc@4333 230 fatal("Not enough swap for expanded marking stack capacity");
johnc@4333 231 }
johnc@4333 232 _base = (oop*)(_virtual_space.low());
johnc@4333 233 _index = 0;
johnc@4333 234 _capacity = new_capacity;
johnc@4333 235 } else {
johnc@4333 236 if (PrintGCDetails && Verbose) {
johnc@4333 237 // Failed to double capacity, continue;
johnc@4333 238 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
johnc@4333 239 SIZE_FORMAT"K to " SIZE_FORMAT"K",
johnc@4333 240 _capacity / K, new_capacity / K);
johnc@4333 241 }
johnc@4333 242 }
johnc@4333 243 }
johnc@4333 244
johnc@4333 245 void CMMarkStack::set_should_expand() {
johnc@4333 246 // If we're resetting the marking state because of an
johnc@4333 247 // marking stack overflow, record that we should, if
johnc@4333 248 // possible, expand the stack.
johnc@4333 249 _should_expand = _cm->has_overflown();
ysr@777 250 }
ysr@777 251
ysr@777 252 CMMarkStack::~CMMarkStack() {
tonyp@2973 253 if (_base != NULL) {
johnc@4333 254 _base = NULL;
johnc@4333 255 _virtual_space.release();
tonyp@2973 256 }
ysr@777 257 }
ysr@777 258
ysr@777 259 void CMMarkStack::par_push(oop ptr) {
ysr@777 260 while (true) {
ysr@777 261 if (isFull()) {
ysr@777 262 _overflow = true;
ysr@777 263 return;
ysr@777 264 }
ysr@777 265 // Otherwise...
ysr@777 266 jint index = _index;
ysr@777 267 jint next_index = index+1;
ysr@777 268 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 269 if (res == index) {
ysr@777 270 _base[index] = ptr;
ysr@777 271 // Note that we don't maintain this atomically. We could, but it
ysr@777 272 // doesn't seem necessary.
ysr@777 273 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 274 return;
ysr@777 275 }
ysr@777 276 // Otherwise, we need to try again.
ysr@777 277 }
ysr@777 278 }
ysr@777 279
ysr@777 280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
ysr@777 281 while (true) {
ysr@777 282 if (isFull()) {
ysr@777 283 _overflow = true;
ysr@777 284 return;
ysr@777 285 }
ysr@777 286 // Otherwise...
ysr@777 287 jint index = _index;
ysr@777 288 jint next_index = index + n;
ysr@777 289 if (next_index > _capacity) {
ysr@777 290 _overflow = true;
ysr@777 291 return;
ysr@777 292 }
ysr@777 293 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 294 if (res == index) {
ysr@777 295 for (int i = 0; i < n; i++) {
johnc@4333 296 int ind = index + i;
ysr@777 297 assert(ind < _capacity, "By overflow test above.");
ysr@777 298 _base[ind] = ptr_arr[i];
ysr@777 299 }
ysr@777 300 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 301 return;
ysr@777 302 }
ysr@777 303 // Otherwise, we need to try again.
ysr@777 304 }
ysr@777 305 }
ysr@777 306
ysr@777 307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
ysr@777 308 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 309 jint start = _index;
ysr@777 310 jint next_index = start + n;
ysr@777 311 if (next_index > _capacity) {
ysr@777 312 _overflow = true;
ysr@777 313 return;
ysr@777 314 }
ysr@777 315 // Otherwise.
ysr@777 316 _index = next_index;
ysr@777 317 for (int i = 0; i < n; i++) {
ysr@777 318 int ind = start + i;
tonyp@1458 319 assert(ind < _capacity, "By overflow test above.");
ysr@777 320 _base[ind] = ptr_arr[i];
ysr@777 321 }
johnc@4333 322 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 323 }
ysr@777 324
ysr@777 325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
ysr@777 326 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 327 jint index = _index;
ysr@777 328 if (index == 0) {
ysr@777 329 *n = 0;
ysr@777 330 return false;
ysr@777 331 } else {
ysr@777 332 int k = MIN2(max, index);
johnc@4333 333 jint new_ind = index - k;
ysr@777 334 for (int j = 0; j < k; j++) {
ysr@777 335 ptr_arr[j] = _base[new_ind + j];
ysr@777 336 }
ysr@777 337 _index = new_ind;
ysr@777 338 *n = k;
ysr@777 339 return true;
ysr@777 340 }
ysr@777 341 }
ysr@777 342
ysr@777 343 template<class OopClosureClass>
ysr@777 344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
ysr@777 345 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
ysr@777 346 || SafepointSynchronize::is_at_safepoint(),
ysr@777 347 "Drain recursion must be yield-safe.");
ysr@777 348 bool res = true;
ysr@777 349 debug_only(_drain_in_progress = true);
ysr@777 350 debug_only(_drain_in_progress_yields = yield_after);
ysr@777 351 while (!isEmpty()) {
ysr@777 352 oop newOop = pop();
ysr@777 353 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
ysr@777 354 assert(newOop->is_oop(), "Expected an oop");
ysr@777 355 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
ysr@777 356 "only grey objects on this stack");
ysr@777 357 newOop->oop_iterate(cl);
ysr@777 358 if (yield_after && _cm->do_yield_check()) {
tonyp@2973 359 res = false;
tonyp@2973 360 break;
ysr@777 361 }
ysr@777 362 }
ysr@777 363 debug_only(_drain_in_progress = false);
ysr@777 364 return res;
ysr@777 365 }
ysr@777 366
tonyp@3416 367 void CMMarkStack::note_start_of_gc() {
tonyp@3416 368 assert(_saved_index == -1,
tonyp@3416 369 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
tonyp@3416 370 _saved_index = _index;
tonyp@3416 371 }
tonyp@3416 372
tonyp@3416 373 void CMMarkStack::note_end_of_gc() {
tonyp@3416 374 // This is intentionally a guarantee, instead of an assert. If we
tonyp@3416 375 // accidentally add something to the mark stack during GC, it
tonyp@3416 376 // will be a correctness issue so it's better if we crash. we'll
tonyp@3416 377 // only check this once per GC anyway, so it won't be a performance
tonyp@3416 378 // issue in any way.
tonyp@3416 379 guarantee(_saved_index == _index,
tonyp@3416 380 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 381 _saved_index = -1;
tonyp@3416 382 }
tonyp@3416 383
ysr@777 384 void CMMarkStack::oops_do(OopClosure* f) {
tonyp@3416 385 assert(_saved_index == _index,
tonyp@3416 386 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 387 for (int i = 0; i < _index; i += 1) {
ysr@777 388 f->do_oop(&_base[i]);
ysr@777 389 }
ysr@777 390 }
ysr@777 391
ysr@777 392 bool ConcurrentMark::not_yet_marked(oop obj) const {
coleenp@4037 393 return _g1h->is_obj_ill(obj);
ysr@777 394 }
ysr@777 395
tonyp@3464 396 CMRootRegions::CMRootRegions() :
tonyp@3464 397 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
tonyp@3464 398 _should_abort(false), _next_survivor(NULL) { }
tonyp@3464 399
tonyp@3464 400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
tonyp@3464 401 _young_list = g1h->young_list();
tonyp@3464 402 _cm = cm;
tonyp@3464 403 }
tonyp@3464 404
tonyp@3464 405 void CMRootRegions::prepare_for_scan() {
tonyp@3464 406 assert(!scan_in_progress(), "pre-condition");
tonyp@3464 407
tonyp@3464 408 // Currently, only survivors can be root regions.
tonyp@3464 409 assert(_next_survivor == NULL, "pre-condition");
tonyp@3464 410 _next_survivor = _young_list->first_survivor_region();
tonyp@3464 411 _scan_in_progress = (_next_survivor != NULL);
tonyp@3464 412 _should_abort = false;
tonyp@3464 413 }
tonyp@3464 414
tonyp@3464 415 HeapRegion* CMRootRegions::claim_next() {
tonyp@3464 416 if (_should_abort) {
tonyp@3464 417 // If someone has set the should_abort flag, we return NULL to
tonyp@3464 418 // force the caller to bail out of their loop.
tonyp@3464 419 return NULL;
tonyp@3464 420 }
tonyp@3464 421
tonyp@3464 422 // Currently, only survivors can be root regions.
tonyp@3464 423 HeapRegion* res = _next_survivor;
tonyp@3464 424 if (res != NULL) {
tonyp@3464 425 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 426 // Read it again in case it changed while we were waiting for the lock.
tonyp@3464 427 res = _next_survivor;
tonyp@3464 428 if (res != NULL) {
tonyp@3464 429 if (res == _young_list->last_survivor_region()) {
tonyp@3464 430 // We just claimed the last survivor so store NULL to indicate
tonyp@3464 431 // that we're done.
tonyp@3464 432 _next_survivor = NULL;
tonyp@3464 433 } else {
tonyp@3464 434 _next_survivor = res->get_next_young_region();
tonyp@3464 435 }
tonyp@3464 436 } else {
tonyp@3464 437 // Someone else claimed the last survivor while we were trying
tonyp@3464 438 // to take the lock so nothing else to do.
tonyp@3464 439 }
tonyp@3464 440 }
tonyp@3464 441 assert(res == NULL || res->is_survivor(), "post-condition");
tonyp@3464 442
tonyp@3464 443 return res;
tonyp@3464 444 }
tonyp@3464 445
tonyp@3464 446 void CMRootRegions::scan_finished() {
tonyp@3464 447 assert(scan_in_progress(), "pre-condition");
tonyp@3464 448
tonyp@3464 449 // Currently, only survivors can be root regions.
tonyp@3464 450 if (!_should_abort) {
tonyp@3464 451 assert(_next_survivor == NULL, "we should have claimed all survivors");
tonyp@3464 452 }
tonyp@3464 453 _next_survivor = NULL;
tonyp@3464 454
tonyp@3464 455 {
tonyp@3464 456 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 457 _scan_in_progress = false;
tonyp@3464 458 RootRegionScan_lock->notify_all();
tonyp@3464 459 }
tonyp@3464 460 }
tonyp@3464 461
tonyp@3464 462 bool CMRootRegions::wait_until_scan_finished() {
tonyp@3464 463 if (!scan_in_progress()) return false;
tonyp@3464 464
tonyp@3464 465 {
tonyp@3464 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 467 while (scan_in_progress()) {
tonyp@3464 468 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
tonyp@3464 469 }
tonyp@3464 470 }
tonyp@3464 471 return true;
tonyp@3464 472 }
tonyp@3464 473
ysr@777 474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 476 #endif // _MSC_VER
ysr@777 477
jmasa@3357 478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
jmasa@3357 479 return MAX2((n_par_threads + 2) / 4, 1U);
jmasa@3294 480 }
jmasa@3294 481
johnc@4333 482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
johnc@4333 483 _g1h(g1h),
tschatzl@5697 484 _markBitMap1(log2_intptr(MinObjAlignment)),
tschatzl@5697 485 _markBitMap2(log2_intptr(MinObjAlignment)),
ysr@777 486 _parallel_marking_threads(0),
jmasa@3294 487 _max_parallel_marking_threads(0),
ysr@777 488 _sleep_factor(0.0),
ysr@777 489 _marking_task_overhead(1.0),
ysr@777 490 _cleanup_sleep_factor(0.0),
ysr@777 491 _cleanup_task_overhead(1.0),
tonyp@2472 492 _cleanup_list("Cleanup List"),
johnc@4333 493 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
johnc@4333 494 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
johnc@4333 495 CardTableModRefBS::card_shift,
johnc@4333 496 false /* in_resource_area*/),
johnc@3463 497
ysr@777 498 _prevMarkBitMap(&_markBitMap1),
ysr@777 499 _nextMarkBitMap(&_markBitMap2),
ysr@777 500
ysr@777 501 _markStack(this),
ysr@777 502 // _finger set in set_non_marking_state
ysr@777 503
johnc@4173 504 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
ysr@777 505 // _active_tasks set in set_non_marking_state
ysr@777 506 // _tasks set inside the constructor
johnc@4173 507 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
johnc@4173 508 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
ysr@777 509
ysr@777 510 _has_overflown(false),
ysr@777 511 _concurrent(false),
tonyp@1054 512 _has_aborted(false),
tonyp@1054 513 _restart_for_overflow(false),
tonyp@1054 514 _concurrent_marking_in_progress(false),
ysr@777 515
ysr@777 516 // _verbose_level set below
ysr@777 517
ysr@777 518 _init_times(),
ysr@777 519 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
ysr@777 520 _cleanup_times(),
ysr@777 521 _total_counting_time(0.0),
ysr@777 522 _total_rs_scrub_time(0.0),
johnc@3463 523
johnc@3463 524 _parallel_workers(NULL),
johnc@3463 525
johnc@3463 526 _count_card_bitmaps(NULL),
johnc@4333 527 _count_marked_bytes(NULL),
johnc@4333 528 _completed_initialization(false) {
tonyp@2973 529 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
tonyp@2973 530 if (verbose_level < no_verbose) {
ysr@777 531 verbose_level = no_verbose;
tonyp@2973 532 }
tonyp@2973 533 if (verbose_level > high_verbose) {
ysr@777 534 verbose_level = high_verbose;
tonyp@2973 535 }
ysr@777 536 _verbose_level = verbose_level;
ysr@777 537
tonyp@2973 538 if (verbose_low()) {
ysr@777 539 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
drchase@6680 540 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
tonyp@2973 541 }
ysr@777 542
johnc@4333 543 if (!_markBitMap1.allocate(heap_rs)) {
johnc@4333 544 warning("Failed to allocate first CM bit map");
johnc@4333 545 return;
johnc@4333 546 }
johnc@4333 547 if (!_markBitMap2.allocate(heap_rs)) {
johnc@4333 548 warning("Failed to allocate second CM bit map");
johnc@4333 549 return;
johnc@4333 550 }
ysr@777 551
ysr@777 552 // Create & start a ConcurrentMark thread.
ysr@1280 553 _cmThread = new ConcurrentMarkThread(this);
ysr@1280 554 assert(cmThread() != NULL, "CM Thread should have been created");
ysr@1280 555 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
ehelin@6168 556 if (_cmThread->osthread() == NULL) {
ehelin@6168 557 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
ehelin@6168 558 }
ysr@1280 559
ysr@777 560 assert(CGC_lock != NULL, "Where's the CGC_lock?");
johnc@4333 561 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
johnc@4333 562 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
ysr@777 563
ysr@777 564 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@1717 565 satb_qs.set_buffer_size(G1SATBBufferSize);
ysr@777 566
tonyp@3464 567 _root_regions.init(_g1h, this);
tonyp@3464 568
jmasa@1719 569 if (ConcGCThreads > ParallelGCThreads) {
drchase@6680 570 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
drchase@6680 571 "than ParallelGCThreads (" UINTX_FORMAT ").",
johnc@4333 572 ConcGCThreads, ParallelGCThreads);
johnc@4333 573 return;
ysr@777 574 }
ysr@777 575 if (ParallelGCThreads == 0) {
ysr@777 576 // if we are not running with any parallel GC threads we will not
ysr@777 577 // spawn any marking threads either
jmasa@3294 578 _parallel_marking_threads = 0;
jmasa@3294 579 _max_parallel_marking_threads = 0;
jmasa@3294 580 _sleep_factor = 0.0;
jmasa@3294 581 _marking_task_overhead = 1.0;
ysr@777 582 } else {
johnc@4547 583 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
johnc@4547 584 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
ysr@777 585 // if both are set
ysr@777 586 _sleep_factor = 0.0;
ysr@777 587 _marking_task_overhead = 1.0;
johnc@1186 588 } else if (G1MarkingOverheadPercent > 0) {
johnc@4547 589 // We will calculate the number of parallel marking threads based
johnc@4547 590 // on a target overhead with respect to the soft real-time goal
johnc@1186 591 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
ysr@777 592 double overall_cm_overhead =
johnc@1186 593 (double) MaxGCPauseMillis * marking_overhead /
johnc@1186 594 (double) GCPauseIntervalMillis;
ysr@777 595 double cpu_ratio = 1.0 / (double) os::processor_count();
ysr@777 596 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
ysr@777 597 double marking_task_overhead =
ysr@777 598 overall_cm_overhead / marking_thread_num *
ysr@777 599 (double) os::processor_count();
ysr@777 600 double sleep_factor =
ysr@777 601 (1.0 - marking_task_overhead) / marking_task_overhead;
ysr@777 602
johnc@4547 603 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
ysr@777 604 _sleep_factor = sleep_factor;
ysr@777 605 _marking_task_overhead = marking_task_overhead;
ysr@777 606 } else {
johnc@4547 607 // Calculate the number of parallel marking threads by scaling
johnc@4547 608 // the number of parallel GC threads.
johnc@4547 609 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
johnc@4547 610 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
ysr@777 611 _sleep_factor = 0.0;
ysr@777 612 _marking_task_overhead = 1.0;
ysr@777 613 }
ysr@777 614
johnc@4547 615 assert(ConcGCThreads > 0, "Should have been set");
johnc@4547 616 _parallel_marking_threads = (uint) ConcGCThreads;
johnc@4547 617 _max_parallel_marking_threads = _parallel_marking_threads;
johnc@4547 618
tonyp@2973 619 if (parallel_marking_threads() > 1) {
ysr@777 620 _cleanup_task_overhead = 1.0;
tonyp@2973 621 } else {
ysr@777 622 _cleanup_task_overhead = marking_task_overhead();
tonyp@2973 623 }
ysr@777 624 _cleanup_sleep_factor =
ysr@777 625 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
ysr@777 626
ysr@777 627 #if 0
ysr@777 628 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
ysr@777 629 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
ysr@777 630 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
ysr@777 631 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
ysr@777 632 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
ysr@777 633 #endif
ysr@777 634
tonyp@1458 635 guarantee(parallel_marking_threads() > 0, "peace of mind");
jmasa@2188 636 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
jmasa@3357 637 _max_parallel_marking_threads, false, true);
jmasa@2188 638 if (_parallel_workers == NULL) {
ysr@777 639 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 640 } else {
jmasa@2188 641 _parallel_workers->initialize_workers();
jmasa@2188 642 }
ysr@777 643 }
ysr@777 644
johnc@4333 645 if (FLAG_IS_DEFAULT(MarkStackSize)) {
johnc@4333 646 uintx mark_stack_size =
johnc@4333 647 MIN2(MarkStackSizeMax,
johnc@4333 648 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
johnc@4333 649 // Verify that the calculated value for MarkStackSize is in range.
johnc@4333 650 // It would be nice to use the private utility routine from Arguments.
johnc@4333 651 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
johnc@4333 652 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
johnc@4333 653 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
drchase@6680 654 mark_stack_size, (uintx) 1, MarkStackSizeMax);
johnc@4333 655 return;
johnc@4333 656 }
johnc@4333 657 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
johnc@4333 658 } else {
johnc@4333 659 // Verify MarkStackSize is in range.
johnc@4333 660 if (FLAG_IS_CMDLINE(MarkStackSize)) {
johnc@4333 661 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
johnc@4333 662 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
johnc@4333 663 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
johnc@4333 664 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
drchase@6680 665 MarkStackSize, (uintx) 1, MarkStackSizeMax);
johnc@4333 666 return;
johnc@4333 667 }
johnc@4333 668 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
johnc@4333 669 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
johnc@4333 670 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
johnc@4333 671 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
johnc@4333 672 MarkStackSize, MarkStackSizeMax);
johnc@4333 673 return;
johnc@4333 674 }
johnc@4333 675 }
johnc@4333 676 }
johnc@4333 677 }
johnc@4333 678
johnc@4333 679 if (!_markStack.allocate(MarkStackSize)) {
johnc@4333 680 warning("Failed to allocate CM marking stack");
johnc@4333 681 return;
johnc@4333 682 }
johnc@4333 683
johnc@4333 684 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
johnc@4333 685 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
johnc@4333 686
johnc@4333 687 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
johnc@4333 688 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
johnc@4333 689
johnc@4333 690 BitMap::idx_t card_bm_size = _card_bm.size();
johnc@4333 691
johnc@4333 692 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
johnc@4333 693 _active_tasks = _max_worker_id;
johnc@4333 694
johnc@4333 695 size_t max_regions = (size_t) _g1h->max_regions();
johnc@4333 696 for (uint i = 0; i < _max_worker_id; ++i) {
johnc@4333 697 CMTaskQueue* task_queue = new CMTaskQueue();
johnc@4333 698 task_queue->initialize();
johnc@4333 699 _task_queues->register_queue(i, task_queue);
johnc@4333 700
johnc@4333 701 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
johnc@4333 702 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
johnc@4333 703
johnc@4333 704 _tasks[i] = new CMTask(i, this,
johnc@4333 705 _count_marked_bytes[i],
johnc@4333 706 &_count_card_bitmaps[i],
johnc@4333 707 task_queue, _task_queues);
johnc@4333 708
johnc@4333 709 _accum_task_vtime[i] = 0.0;
johnc@4333 710 }
johnc@4333 711
johnc@4333 712 // Calculate the card number for the bottom of the heap. Used
johnc@4333 713 // in biasing indexes into the accounting card bitmaps.
johnc@4333 714 _heap_bottom_card_num =
johnc@4333 715 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
johnc@4333 716 CardTableModRefBS::card_shift);
johnc@4333 717
johnc@4333 718 // Clear all the liveness counting data
johnc@4333 719 clear_all_count_data();
johnc@4333 720
ysr@777 721 // so that the call below can read a sensible value
johnc@4333 722 _heap_start = (HeapWord*) heap_rs.base();
ysr@777 723 set_non_marking_state();
johnc@4333 724 _completed_initialization = true;
ysr@777 725 }
ysr@777 726
ysr@777 727 void ConcurrentMark::update_g1_committed(bool force) {
ysr@777 728 // If concurrent marking is not in progress, then we do not need to
tonyp@3691 729 // update _heap_end.
tonyp@2973 730 if (!concurrent_marking_in_progress() && !force) return;
ysr@777 731
ysr@777 732 MemRegion committed = _g1h->g1_committed();
tonyp@1458 733 assert(committed.start() == _heap_start, "start shouldn't change");
ysr@777 734 HeapWord* new_end = committed.end();
ysr@777 735 if (new_end > _heap_end) {
ysr@777 736 // The heap has been expanded.
ysr@777 737
ysr@777 738 _heap_end = new_end;
ysr@777 739 }
ysr@777 740 // Notice that the heap can also shrink. However, this only happens
ysr@777 741 // during a Full GC (at least currently) and the entire marking
ysr@777 742 // phase will bail out and the task will not be restarted. So, let's
ysr@777 743 // do nothing.
ysr@777 744 }
ysr@777 745
ysr@777 746 void ConcurrentMark::reset() {
ysr@777 747 // Starting values for these two. This should be called in a STW
ysr@777 748 // phase. CM will be notified of any future g1_committed expansions
ysr@777 749 // will be at the end of evacuation pauses, when tasks are
ysr@777 750 // inactive.
ysr@777 751 MemRegion committed = _g1h->g1_committed();
ysr@777 752 _heap_start = committed.start();
ysr@777 753 _heap_end = committed.end();
ysr@777 754
tonyp@1458 755 // Separated the asserts so that we know which one fires.
tonyp@1458 756 assert(_heap_start != NULL, "heap bounds should look ok");
tonyp@1458 757 assert(_heap_end != NULL, "heap bounds should look ok");
tonyp@1458 758 assert(_heap_start < _heap_end, "heap bounds should look ok");
ysr@777 759
johnc@4386 760 // Reset all the marking data structures and any necessary flags
johnc@4386 761 reset_marking_state();
ysr@777 762
tonyp@2973 763 if (verbose_low()) {
ysr@777 764 gclog_or_tty->print_cr("[global] resetting");
tonyp@2973 765 }
ysr@777 766
ysr@777 767 // We do reset all of them, since different phases will use
ysr@777 768 // different number of active threads. So, it's easiest to have all
ysr@777 769 // of them ready.
johnc@4173 770 for (uint i = 0; i < _max_worker_id; ++i) {
ysr@777 771 _tasks[i]->reset(_nextMarkBitMap);
johnc@2190 772 }
ysr@777 773
ysr@777 774 // we need this to make sure that the flag is on during the evac
ysr@777 775 // pause with initial mark piggy-backed
ysr@777 776 set_concurrent_marking_in_progress();
ysr@777 777 }
ysr@777 778
johnc@4386 779
johnc@4386 780 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
johnc@4386 781 _markStack.set_should_expand();
johnc@4386 782 _markStack.setEmpty(); // Also clears the _markStack overflow flag
johnc@4386 783 if (clear_overflow) {
johnc@4386 784 clear_has_overflown();
johnc@4386 785 } else {
johnc@4386 786 assert(has_overflown(), "pre-condition");
johnc@4386 787 }
johnc@4386 788 _finger = _heap_start;
johnc@4386 789
johnc@4386 790 for (uint i = 0; i < _max_worker_id; ++i) {
johnc@4386 791 CMTaskQueue* queue = _task_queues->queue(i);
johnc@4386 792 queue->set_empty();
johnc@4386 793 }
johnc@4386 794 }
johnc@4386 795
johnc@4788 796 void ConcurrentMark::set_concurrency(uint active_tasks) {
johnc@4173 797 assert(active_tasks <= _max_worker_id, "we should not have more");
ysr@777 798
ysr@777 799 _active_tasks = active_tasks;
ysr@777 800 // Need to update the three data structures below according to the
ysr@777 801 // number of active threads for this phase.
ysr@777 802 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
ysr@777 803 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 804 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
johnc@4788 805 }
johnc@4788 806
johnc@4788 807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
johnc@4788 808 set_concurrency(active_tasks);
ysr@777 809
ysr@777 810 _concurrent = concurrent;
ysr@777 811 // We propagate this to all tasks, not just the active ones.
johnc@4173 812 for (uint i = 0; i < _max_worker_id; ++i)
ysr@777 813 _tasks[i]->set_concurrent(concurrent);
ysr@777 814
ysr@777 815 if (concurrent) {
ysr@777 816 set_concurrent_marking_in_progress();
ysr@777 817 } else {
ysr@777 818 // We currently assume that the concurrent flag has been set to
ysr@777 819 // false before we start remark. At this point we should also be
ysr@777 820 // in a STW phase.
tonyp@1458 821 assert(!concurrent_marking_in_progress(), "invariant");
pliden@6693 822 assert(out_of_regions(),
johnc@4788 823 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
drchase@6680 824 p2i(_finger), p2i(_heap_end)));
ysr@777 825 update_g1_committed(true);
ysr@777 826 }
ysr@777 827 }
ysr@777 828
ysr@777 829 void ConcurrentMark::set_non_marking_state() {
ysr@777 830 // We set the global marking state to some default values when we're
ysr@777 831 // not doing marking.
johnc@4386 832 reset_marking_state();
ysr@777 833 _active_tasks = 0;
ysr@777 834 clear_concurrent_marking_in_progress();
ysr@777 835 }
ysr@777 836
ysr@777 837 ConcurrentMark::~ConcurrentMark() {
stefank@3364 838 // The ConcurrentMark instance is never freed.
stefank@3364 839 ShouldNotReachHere();
ysr@777 840 }
ysr@777 841
ysr@777 842 void ConcurrentMark::clearNextBitmap() {
tonyp@1794 843 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1794 844 G1CollectorPolicy* g1p = g1h->g1_policy();
tonyp@1794 845
tonyp@1794 846 // Make sure that the concurrent mark thread looks to still be in
tonyp@1794 847 // the current cycle.
tonyp@1794 848 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 849
tonyp@1794 850 // We are finishing up the current cycle by clearing the next
tonyp@1794 851 // marking bitmap and getting it ready for the next cycle. During
tonyp@1794 852 // this time no other cycle can start. So, let's make sure that this
tonyp@1794 853 // is the case.
tonyp@1794 854 guarantee(!g1h->mark_in_progress(), "invariant");
tonyp@1794 855
tonyp@1794 856 // clear the mark bitmap (no grey objects to start with).
tonyp@1794 857 // We need to do this in chunks and offer to yield in between
tonyp@1794 858 // each chunk.
tonyp@1794 859 HeapWord* start = _nextMarkBitMap->startWord();
tonyp@1794 860 HeapWord* end = _nextMarkBitMap->endWord();
tonyp@1794 861 HeapWord* cur = start;
tonyp@1794 862 size_t chunkSize = M;
tonyp@1794 863 while (cur < end) {
tonyp@1794 864 HeapWord* next = cur + chunkSize;
tonyp@2973 865 if (next > end) {
tonyp@1794 866 next = end;
tonyp@2973 867 }
tonyp@1794 868 MemRegion mr(cur,next);
tonyp@1794 869 _nextMarkBitMap->clearRange(mr);
tonyp@1794 870 cur = next;
tonyp@1794 871 do_yield_check();
tonyp@1794 872
tonyp@1794 873 // Repeat the asserts from above. We'll do them as asserts here to
tonyp@1794 874 // minimize their overhead on the product. However, we'll have
tonyp@1794 875 // them as guarantees at the beginning / end of the bitmap
tonyp@1794 876 // clearing to get some checking in the product.
tonyp@1794 877 assert(cmThread()->during_cycle(), "invariant");
tonyp@1794 878 assert(!g1h->mark_in_progress(), "invariant");
tonyp@1794 879 }
tonyp@1794 880
johnc@3463 881 // Clear the liveness counting data
johnc@3463 882 clear_all_count_data();
johnc@3463 883
tonyp@1794 884 // Repeat the asserts from above.
tonyp@1794 885 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 886 guarantee(!g1h->mark_in_progress(), "invariant");
ysr@777 887 }
ysr@777 888
ysr@777 889 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
ysr@777 890 public:
ysr@777 891 bool doHeapRegion(HeapRegion* r) {
ysr@777 892 if (!r->continuesHumongous()) {
tonyp@3416 893 r->note_start_of_marking();
ysr@777 894 }
ysr@777 895 return false;
ysr@777 896 }
ysr@777 897 };
ysr@777 898
ysr@777 899 void ConcurrentMark::checkpointRootsInitialPre() {
ysr@777 900 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 901 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 902
ysr@777 903 _has_aborted = false;
ysr@777 904
jcoomes@1902 905 #ifndef PRODUCT
tonyp@1479 906 if (G1PrintReachableAtInitialMark) {
tonyp@1823 907 print_reachable("at-cycle-start",
johnc@2969 908 VerifyOption_G1UsePrevMarking, true /* all */);
tonyp@1479 909 }
jcoomes@1902 910 #endif
ysr@777 911
ysr@777 912 // Initialise marking structures. This has to be done in a STW phase.
ysr@777 913 reset();
tonyp@3416 914
tonyp@3416 915 // For each region note start of marking.
tonyp@3416 916 NoteStartOfMarkHRClosure startcl;
tonyp@3416 917 g1h->heap_region_iterate(&startcl);
ysr@777 918 }
ysr@777 919
ysr@777 920
ysr@777 921 void ConcurrentMark::checkpointRootsInitialPost() {
ysr@777 922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 923
tonyp@2848 924 // If we force an overflow during remark, the remark operation will
tonyp@2848 925 // actually abort and we'll restart concurrent marking. If we always
tonyp@2848 926 // force an oveflow during remark we'll never actually complete the
tonyp@2848 927 // marking phase. So, we initilize this here, at the start of the
tonyp@2848 928 // cycle, so that at the remaining overflow number will decrease at
tonyp@2848 929 // every remark and we'll eventually not need to cause one.
tonyp@2848 930 force_overflow_stw()->init();
tonyp@2848 931
johnc@3175 932 // Start Concurrent Marking weak-reference discovery.
johnc@3175 933 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3175 934 // enable ("weak") refs discovery
johnc@3175 935 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ysr@892 936 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
ysr@777 937
ysr@777 938 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1752 939 // This is the start of the marking cycle, we're expected all
tonyp@1752 940 // threads to have SATB queues with active set to false.
tonyp@1752 941 satb_mq_set.set_active_all_threads(true, /* new active value */
tonyp@1752 942 false /* expected_active */);
ysr@777 943
tonyp@3464 944 _root_regions.prepare_for_scan();
tonyp@3464 945
ysr@777 946 // update_g1_committed() will be called at the end of an evac pause
ysr@777 947 // when marking is on. So, it's also called at the end of the
ysr@777 948 // initial-mark pause to update the heap end, if the heap expands
ysr@777 949 // during it. No need to call it here.
ysr@777 950 }
ysr@777 951
ysr@777 952 /*
tonyp@2848 953 * Notice that in the next two methods, we actually leave the STS
tonyp@2848 954 * during the barrier sync and join it immediately afterwards. If we
tonyp@2848 955 * do not do this, the following deadlock can occur: one thread could
tonyp@2848 956 * be in the barrier sync code, waiting for the other thread to also
tonyp@2848 957 * sync up, whereas another one could be trying to yield, while also
tonyp@2848 958 * waiting for the other threads to sync up too.
tonyp@2848 959 *
tonyp@2848 960 * Note, however, that this code is also used during remark and in
tonyp@2848 961 * this case we should not attempt to leave / enter the STS, otherwise
tonyp@2848 962 * we'll either hit an asseert (debug / fastdebug) or deadlock
tonyp@2848 963 * (product). So we should only leave / enter the STS if we are
tonyp@2848 964 * operating concurrently.
tonyp@2848 965 *
tonyp@2848 966 * Because the thread that does the sync barrier has left the STS, it
tonyp@2848 967 * is possible to be suspended for a Full GC or an evacuation pause
tonyp@2848 968 * could occur. This is actually safe, since the entering the sync
tonyp@2848 969 * barrier is one of the last things do_marking_step() does, and it
tonyp@2848 970 * doesn't manipulate any data structures afterwards.
tonyp@2848 971 */
ysr@777 972
johnc@4173 973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
tonyp@2973 974 if (verbose_low()) {
johnc@4173 975 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
tonyp@2973 976 }
ysr@777 977
tonyp@2848 978 if (concurrent()) {
tonyp@2848 979 ConcurrentGCThread::stsLeave();
tonyp@2848 980 }
pliden@6692 981
pliden@6692 982 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
pliden@6692 983
tonyp@2848 984 if (concurrent()) {
tonyp@2848 985 ConcurrentGCThread::stsJoin();
tonyp@2848 986 }
ysr@777 987 // at this point everyone should have synced up and not be doing any
ysr@777 988 // more work
ysr@777 989
tonyp@2973 990 if (verbose_low()) {
pliden@6692 991 if (barrier_aborted) {
pliden@6692 992 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
pliden@6692 993 } else {
pliden@6692 994 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
pliden@6692 995 }
pliden@6692 996 }
pliden@6692 997
pliden@6692 998 if (barrier_aborted) {
pliden@6692 999 // If the barrier aborted we ignore the overflow condition and
pliden@6692 1000 // just abort the whole marking phase as quickly as possible.
pliden@6692 1001 return;
tonyp@2973 1002 }
ysr@777 1003
johnc@4788 1004 // If we're executing the concurrent phase of marking, reset the marking
johnc@4788 1005 // state; otherwise the marking state is reset after reference processing,
johnc@4788 1006 // during the remark pause.
johnc@4788 1007 // If we reset here as a result of an overflow during the remark we will
johnc@4788 1008 // see assertion failures from any subsequent set_concurrency_and_phase()
johnc@4788 1009 // calls.
johnc@4788 1010 if (concurrent()) {
johnc@4788 1011 // let the task associated with with worker 0 do this
johnc@4788 1012 if (worker_id == 0) {
johnc@4788 1013 // task 0 is responsible for clearing the global data structures
johnc@4788 1014 // We should be here because of an overflow. During STW we should
johnc@4788 1015 // not clear the overflow flag since we rely on it being true when
johnc@4788 1016 // we exit this method to abort the pause and restart concurent
johnc@4788 1017 // marking.
johnc@4788 1018 reset_marking_state(true /* clear_overflow */);
johnc@4788 1019 force_overflow()->update();
johnc@4788 1020
johnc@4788 1021 if (G1Log::fine()) {
johnc@4788 1022 gclog_or_tty->date_stamp(PrintGCDateStamps);
johnc@4788 1023 gclog_or_tty->stamp(PrintGCTimeStamps);
johnc@4788 1024 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
johnc@4788 1025 }
ysr@777 1026 }
ysr@777 1027 }
ysr@777 1028
ysr@777 1029 // after this, each task should reset its own data structures then
ysr@777 1030 // then go into the second barrier
ysr@777 1031 }
ysr@777 1032
johnc@4173 1033 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
tonyp@2973 1034 if (verbose_low()) {
johnc@4173 1035 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
tonyp@2973 1036 }
ysr@777 1037
tonyp@2848 1038 if (concurrent()) {
tonyp@2848 1039 ConcurrentGCThread::stsLeave();
tonyp@2848 1040 }
pliden@6692 1041
pliden@6692 1042 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
pliden@6692 1043
tonyp@2848 1044 if (concurrent()) {
tonyp@2848 1045 ConcurrentGCThread::stsJoin();
tonyp@2848 1046 }
johnc@4788 1047 // at this point everything should be re-initialized and ready to go
ysr@777 1048
tonyp@2973 1049 if (verbose_low()) {
pliden@6692 1050 if (barrier_aborted) {
pliden@6692 1051 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
pliden@6692 1052 } else {
pliden@6692 1053 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
pliden@6692 1054 }
tonyp@2973 1055 }
ysr@777 1056 }
ysr@777 1057
tonyp@2848 1058 #ifndef PRODUCT
tonyp@2848 1059 void ForceOverflowSettings::init() {
tonyp@2848 1060 _num_remaining = G1ConcMarkForceOverflow;
tonyp@2848 1061 _force = false;
tonyp@2848 1062 update();
tonyp@2848 1063 }
tonyp@2848 1064
tonyp@2848 1065 void ForceOverflowSettings::update() {
tonyp@2848 1066 if (_num_remaining > 0) {
tonyp@2848 1067 _num_remaining -= 1;
tonyp@2848 1068 _force = true;
tonyp@2848 1069 } else {
tonyp@2848 1070 _force = false;
tonyp@2848 1071 }
tonyp@2848 1072 }
tonyp@2848 1073
tonyp@2848 1074 bool ForceOverflowSettings::should_force() {
tonyp@2848 1075 if (_force) {
tonyp@2848 1076 _force = false;
tonyp@2848 1077 return true;
tonyp@2848 1078 } else {
tonyp@2848 1079 return false;
tonyp@2848 1080 }
tonyp@2848 1081 }
tonyp@2848 1082 #endif // !PRODUCT
tonyp@2848 1083
ysr@777 1084 class CMConcurrentMarkingTask: public AbstractGangTask {
ysr@777 1085 private:
ysr@777 1086 ConcurrentMark* _cm;
ysr@777 1087 ConcurrentMarkThread* _cmt;
ysr@777 1088
ysr@777 1089 public:
jmasa@3357 1090 void work(uint worker_id) {
tonyp@1458 1091 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@1458 1092 "this should only be done by a conc GC thread");
johnc@2316 1093 ResourceMark rm;
ysr@777 1094
ysr@777 1095 double start_vtime = os::elapsedVTime();
ysr@777 1096
ysr@777 1097 ConcurrentGCThread::stsJoin();
ysr@777 1098
jmasa@3357 1099 assert(worker_id < _cm->active_tasks(), "invariant");
jmasa@3357 1100 CMTask* the_task = _cm->task(worker_id);
ysr@777 1101 the_task->record_start_time();
ysr@777 1102 if (!_cm->has_aborted()) {
ysr@777 1103 do {
ysr@777 1104 double start_vtime_sec = os::elapsedVTime();
ysr@777 1105 double start_time_sec = os::elapsedTime();
johnc@2494 1106 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 1107
johnc@2494 1108 the_task->do_marking_step(mark_step_duration_ms,
johnc@4787 1109 true /* do_termination */,
johnc@4787 1110 false /* is_serial*/);
johnc@2494 1111
ysr@777 1112 double end_time_sec = os::elapsedTime();
ysr@777 1113 double end_vtime_sec = os::elapsedVTime();
ysr@777 1114 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
ysr@777 1115 double elapsed_time_sec = end_time_sec - start_time_sec;
ysr@777 1116 _cm->clear_has_overflown();
ysr@777 1117
jmasa@3357 1118 bool ret = _cm->do_yield_check(worker_id);
ysr@777 1119
ysr@777 1120 jlong sleep_time_ms;
ysr@777 1121 if (!_cm->has_aborted() && the_task->has_aborted()) {
ysr@777 1122 sleep_time_ms =
ysr@777 1123 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
ysr@777 1124 ConcurrentGCThread::stsLeave();
ysr@777 1125 os::sleep(Thread::current(), sleep_time_ms, false);
ysr@777 1126 ConcurrentGCThread::stsJoin();
ysr@777 1127 }
ysr@777 1128 double end_time2_sec = os::elapsedTime();
ysr@777 1129 double elapsed_time2_sec = end_time2_sec - start_time_sec;
ysr@777 1130
ysr@777 1131 #if 0
ysr@777 1132 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
ysr@777 1133 "overhead %1.4lf",
ysr@777 1134 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
ysr@777 1135 the_task->conc_overhead(os::elapsedTime()) * 8.0);
ysr@777 1136 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
ysr@777 1137 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
ysr@777 1138 #endif
ysr@777 1139 } while (!_cm->has_aborted() && the_task->has_aborted());
ysr@777 1140 }
ysr@777 1141 the_task->record_end_time();
tonyp@1458 1142 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
ysr@777 1143
ysr@777 1144 ConcurrentGCThread::stsLeave();
ysr@777 1145
ysr@777 1146 double end_vtime = os::elapsedVTime();
jmasa@3357 1147 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
ysr@777 1148 }
ysr@777 1149
ysr@777 1150 CMConcurrentMarkingTask(ConcurrentMark* cm,
ysr@777 1151 ConcurrentMarkThread* cmt) :
ysr@777 1152 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
ysr@777 1153
ysr@777 1154 ~CMConcurrentMarkingTask() { }
ysr@777 1155 };
ysr@777 1156
jmasa@3294 1157 // Calculates the number of active workers for a concurrent
jmasa@3294 1158 // phase.
jmasa@3357 1159 uint ConcurrentMark::calc_parallel_marking_threads() {
johnc@3338 1160 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1161 uint n_conc_workers = 0;
jmasa@3294 1162 if (!UseDynamicNumberOfGCThreads ||
jmasa@3294 1163 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
jmasa@3294 1164 !ForceDynamicNumberOfGCThreads)) {
jmasa@3294 1165 n_conc_workers = max_parallel_marking_threads();
jmasa@3294 1166 } else {
jmasa@3294 1167 n_conc_workers =
jmasa@3294 1168 AdaptiveSizePolicy::calc_default_active_workers(
jmasa@3294 1169 max_parallel_marking_threads(),
jmasa@3294 1170 1, /* Minimum workers */
jmasa@3294 1171 parallel_marking_threads(),
jmasa@3294 1172 Threads::number_of_non_daemon_threads());
jmasa@3294 1173 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
jmasa@3294 1174 // that scaling has already gone into "_max_parallel_marking_threads".
jmasa@3294 1175 }
johnc@3338 1176 assert(n_conc_workers > 0, "Always need at least 1");
johnc@3338 1177 return n_conc_workers;
jmasa@3294 1178 }
johnc@3338 1179 // If we are not running with any parallel GC threads we will not
johnc@3338 1180 // have spawned any marking threads either. Hence the number of
johnc@3338 1181 // concurrent workers should be 0.
johnc@3338 1182 return 0;
jmasa@3294 1183 }
jmasa@3294 1184
tonyp@3464 1185 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
tonyp@3464 1186 // Currently, only survivors can be root regions.
tonyp@3464 1187 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
tonyp@3464 1188 G1RootRegionScanClosure cl(_g1h, this, worker_id);
tonyp@3464 1189
tonyp@3464 1190 const uintx interval = PrefetchScanIntervalInBytes;
tonyp@3464 1191 HeapWord* curr = hr->bottom();
tonyp@3464 1192 const HeapWord* end = hr->top();
tonyp@3464 1193 while (curr < end) {
tonyp@3464 1194 Prefetch::read(curr, interval);
tonyp@3464 1195 oop obj = oop(curr);
tonyp@3464 1196 int size = obj->oop_iterate(&cl);
tonyp@3464 1197 assert(size == obj->size(), "sanity");
tonyp@3464 1198 curr += size;
tonyp@3464 1199 }
tonyp@3464 1200 }
tonyp@3464 1201
tonyp@3464 1202 class CMRootRegionScanTask : public AbstractGangTask {
tonyp@3464 1203 private:
tonyp@3464 1204 ConcurrentMark* _cm;
tonyp@3464 1205
tonyp@3464 1206 public:
tonyp@3464 1207 CMRootRegionScanTask(ConcurrentMark* cm) :
tonyp@3464 1208 AbstractGangTask("Root Region Scan"), _cm(cm) { }
tonyp@3464 1209
tonyp@3464 1210 void work(uint worker_id) {
tonyp@3464 1211 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@3464 1212 "this should only be done by a conc GC thread");
tonyp@3464 1213
tonyp@3464 1214 CMRootRegions* root_regions = _cm->root_regions();
tonyp@3464 1215 HeapRegion* hr = root_regions->claim_next();
tonyp@3464 1216 while (hr != NULL) {
tonyp@3464 1217 _cm->scanRootRegion(hr, worker_id);
tonyp@3464 1218 hr = root_regions->claim_next();
tonyp@3464 1219 }
tonyp@3464 1220 }
tonyp@3464 1221 };
tonyp@3464 1222
tonyp@3464 1223 void ConcurrentMark::scanRootRegions() {
tonyp@3464 1224 // scan_in_progress() will have been set to true only if there was
tonyp@3464 1225 // at least one root region to scan. So, if it's false, we
tonyp@3464 1226 // should not attempt to do any further work.
tonyp@3464 1227 if (root_regions()->scan_in_progress()) {
tonyp@3464 1228 _parallel_marking_threads = calc_parallel_marking_threads();
tonyp@3464 1229 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
tonyp@3464 1230 "Maximum number of marking threads exceeded");
tonyp@3464 1231 uint active_workers = MAX2(1U, parallel_marking_threads());
tonyp@3464 1232
tonyp@3464 1233 CMRootRegionScanTask task(this);
johnc@4549 1234 if (use_parallel_marking_threads()) {
tonyp@3464 1235 _parallel_workers->set_active_workers((int) active_workers);
tonyp@3464 1236 _parallel_workers->run_task(&task);
tonyp@3464 1237 } else {
tonyp@3464 1238 task.work(0);
tonyp@3464 1239 }
tonyp@3464 1240
tonyp@3464 1241 // It's possible that has_aborted() is true here without actually
tonyp@3464 1242 // aborting the survivor scan earlier. This is OK as it's
tonyp@3464 1243 // mainly used for sanity checking.
tonyp@3464 1244 root_regions()->scan_finished();
tonyp@3464 1245 }
tonyp@3464 1246 }
tonyp@3464 1247
ysr@777 1248 void ConcurrentMark::markFromRoots() {
ysr@777 1249 // we might be tempted to assert that:
ysr@777 1250 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
ysr@777 1251 // "inconsistent argument?");
ysr@777 1252 // However that wouldn't be right, because it's possible that
ysr@777 1253 // a safepoint is indeed in progress as a younger generation
ysr@777 1254 // stop-the-world GC happens even as we mark in this generation.
ysr@777 1255
ysr@777 1256 _restart_for_overflow = false;
tonyp@2848 1257 force_overflow_conc()->init();
jmasa@3294 1258
jmasa@3294 1259 // _g1h has _n_par_threads
jmasa@3294 1260 _parallel_marking_threads = calc_parallel_marking_threads();
jmasa@3294 1261 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
jmasa@3294 1262 "Maximum number of marking threads exceeded");
johnc@3338 1263
jmasa@3357 1264 uint active_workers = MAX2(1U, parallel_marking_threads());
johnc@3338 1265
johnc@4788 1266 // Parallel task terminator is set in "set_concurrency_and_phase()"
johnc@4788 1267 set_concurrency_and_phase(active_workers, true /* concurrent */);
ysr@777 1268
ysr@777 1269 CMConcurrentMarkingTask markingTask(this, cmThread());
johnc@4549 1270 if (use_parallel_marking_threads()) {
johnc@3338 1271 _parallel_workers->set_active_workers((int)active_workers);
johnc@3338 1272 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
johnc@3338 1273 // and the decisions on that MT processing is made elsewhere.
johnc@3338 1274 assert(_parallel_workers->active_workers() > 0, "Should have been set");
ysr@777 1275 _parallel_workers->run_task(&markingTask);
tonyp@2973 1276 } else {
ysr@777 1277 markingTask.work(0);
tonyp@2973 1278 }
ysr@777 1279 print_stats();
ysr@777 1280 }
ysr@777 1281
ysr@777 1282 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
ysr@777 1283 // world is stopped at this checkpoint
ysr@777 1284 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1285 "world should be stopped");
johnc@3175 1286
ysr@777 1287 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1288
ysr@777 1289 // If a full collection has happened, we shouldn't do this.
ysr@777 1290 if (has_aborted()) {
ysr@777 1291 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1292 return;
ysr@777 1293 }
ysr@777 1294
kamg@2445 1295 SvcGCMarker sgcm(SvcGCMarker::OTHER);
kamg@2445 1296
ysr@1280 1297 if (VerifyDuringGC) {
ysr@1280 1298 HandleMark hm; // handle scope
ysr@1280 1299 Universe::heap()->prepare_for_verify();
stefank@5018 1300 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 1301 " VerifyDuringGC:(before)");
ysr@1280 1302 }
ysr@1280 1303
ysr@777 1304 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 1305 g1p->record_concurrent_mark_remark_start();
ysr@777 1306
ysr@777 1307 double start = os::elapsedTime();
ysr@777 1308
ysr@777 1309 checkpointRootsFinalWork();
ysr@777 1310
ysr@777 1311 double mark_work_end = os::elapsedTime();
ysr@777 1312
ysr@777 1313 weakRefsWork(clear_all_soft_refs);
ysr@777 1314
ysr@777 1315 if (has_overflown()) {
ysr@777 1316 // Oops. We overflowed. Restart concurrent marking.
ysr@777 1317 _restart_for_overflow = true;
johnc@4789 1318 if (G1TraceMarkStackOverflow) {
johnc@4789 1319 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
johnc@4789 1320 }
johnc@4789 1321
johnc@4789 1322 // Verify the heap w.r.t. the previous marking bitmap.
johnc@4789 1323 if (VerifyDuringGC) {
johnc@4789 1324 HandleMark hm; // handle scope
johnc@4789 1325 Universe::heap()->prepare_for_verify();
stefank@5018 1326 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 1327 " VerifyDuringGC:(overflow)");
johnc@4789 1328 }
johnc@4789 1329
johnc@4386 1330 // Clear the marking state because we will be restarting
johnc@4386 1331 // marking due to overflowing the global mark stack.
johnc@4386 1332 reset_marking_state();
ysr@777 1333 } else {
johnc@3463 1334 // Aggregate the per-task counting data that we have accumulated
johnc@3463 1335 // while marking.
johnc@3463 1336 aggregate_count_data();
johnc@3463 1337
tonyp@2469 1338 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 1339 // We're done with marking.
tonyp@1752 1340 // This is the end of the marking cycle, we're expected all
tonyp@1752 1341 // threads to have SATB queues with active set to true.
tonyp@2469 1342 satb_mq_set.set_active_all_threads(false, /* new active value */
tonyp@2469 1343 true /* expected_active */);
tonyp@1246 1344
tonyp@1246 1345 if (VerifyDuringGC) {
ysr@1280 1346 HandleMark hm; // handle scope
ysr@1280 1347 Universe::heap()->prepare_for_verify();
stefank@5018 1348 Universe::verify(VerifyOption_G1UseNextMarking,
stefank@5018 1349 " VerifyDuringGC:(after)");
tonyp@1246 1350 }
johnc@2494 1351 assert(!restart_for_overflow(), "sanity");
johnc@4386 1352 // Completely reset the marking state since marking completed
johnc@4386 1353 set_non_marking_state();
johnc@2494 1354 }
johnc@2494 1355
johnc@4333 1356 // Expand the marking stack, if we have to and if we can.
johnc@4333 1357 if (_markStack.should_expand()) {
johnc@4333 1358 _markStack.expand();
johnc@4333 1359 }
johnc@4333 1360
ysr@777 1361 // Statistics
ysr@777 1362 double now = os::elapsedTime();
ysr@777 1363 _remark_mark_times.add((mark_work_end - start) * 1000.0);
ysr@777 1364 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
ysr@777 1365 _remark_times.add((now - start) * 1000.0);
ysr@777 1366
ysr@777 1367 g1p->record_concurrent_mark_remark_end();
sla@5237 1368
sla@5237 1369 G1CMIsAliveClosure is_alive(g1h);
sla@5237 1370 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
ysr@777 1371 }
ysr@777 1372
johnc@3731 1373 // Base class of the closures that finalize and verify the
johnc@3731 1374 // liveness counting data.
johnc@3731 1375 class CMCountDataClosureBase: public HeapRegionClosure {
johnc@3731 1376 protected:
johnc@4123 1377 G1CollectedHeap* _g1h;
ysr@777 1378 ConcurrentMark* _cm;
johnc@4123 1379 CardTableModRefBS* _ct_bs;
johnc@4123 1380
johnc@3463 1381 BitMap* _region_bm;
johnc@3463 1382 BitMap* _card_bm;
johnc@3463 1383
johnc@4123 1384 // Takes a region that's not empty (i.e., it has at least one
tonyp@1264 1385 // live object in it and sets its corresponding bit on the region
tonyp@1264 1386 // bitmap to 1. If the region is "starts humongous" it will also set
tonyp@1264 1387 // to 1 the bits on the region bitmap that correspond to its
tonyp@1264 1388 // associated "continues humongous" regions.
tonyp@1264 1389 void set_bit_for_region(HeapRegion* hr) {
tonyp@1264 1390 assert(!hr->continuesHumongous(), "should have filtered those out");
tonyp@1264 1391
tonyp@3713 1392 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
tonyp@1264 1393 if (!hr->startsHumongous()) {
tonyp@1264 1394 // Normal (non-humongous) case: just set the bit.
tonyp@3713 1395 _region_bm->par_at_put(index, true);
tonyp@1264 1396 } else {
tonyp@1264 1397 // Starts humongous case: calculate how many regions are part of
johnc@3463 1398 // this humongous region and then set the bit range.
tonyp@3957 1399 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
tonyp@3713 1400 _region_bm->par_at_put_range(index, end_index, true);
tonyp@1264 1401 }
tonyp@1264 1402 }
tonyp@1264 1403
johnc@3731 1404 public:
johnc@4123 1405 CMCountDataClosureBase(G1CollectedHeap* g1h,
johnc@3731 1406 BitMap* region_bm, BitMap* card_bm):
johnc@4123 1407 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 1408 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
johnc@4123 1409 _region_bm(region_bm), _card_bm(card_bm) { }
johnc@3731 1410 };
johnc@3731 1411
johnc@3731 1412 // Closure that calculates the # live objects per region. Used
johnc@3731 1413 // for verification purposes during the cleanup pause.
johnc@3731 1414 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
johnc@3731 1415 CMBitMapRO* _bm;
johnc@3731 1416 size_t _region_marked_bytes;
johnc@3731 1417
johnc@3731 1418 public:
johnc@4123 1419 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
johnc@3731 1420 BitMap* region_bm, BitMap* card_bm) :
johnc@4123 1421 CMCountDataClosureBase(g1h, region_bm, card_bm),
johnc@3731 1422 _bm(bm), _region_marked_bytes(0) { }
johnc@3731 1423
ysr@777 1424 bool doHeapRegion(HeapRegion* hr) {
ysr@777 1425
iveresov@1074 1426 if (hr->continuesHumongous()) {
tonyp@1264 1427 // We will ignore these here and process them when their
tonyp@1264 1428 // associated "starts humongous" region is processed (see
tonyp@1264 1429 // set_bit_for_heap_region()). Note that we cannot rely on their
tonyp@1264 1430 // associated "starts humongous" region to have their bit set to
tonyp@1264 1431 // 1 since, due to the region chunking in the parallel region
tonyp@1264 1432 // iteration, a "continues humongous" region might be visited
tonyp@1264 1433 // before its associated "starts humongous".
iveresov@1074 1434 return false;
iveresov@1074 1435 }
ysr@777 1436
johnc@4123 1437 HeapWord* ntams = hr->next_top_at_mark_start();
johnc@4123 1438 HeapWord* start = hr->bottom();
johnc@4123 1439
johnc@4123 1440 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
johnc@3463 1441 err_msg("Preconditions not met - "
johnc@4123 1442 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
drchase@6680 1443 p2i(start), p2i(ntams), p2i(hr->end())));
johnc@3463 1444
ysr@777 1445 // Find the first marked object at or after "start".
johnc@4123 1446 start = _bm->getNextMarkedWordAddress(start, ntams);
johnc@3463 1447
ysr@777 1448 size_t marked_bytes = 0;
ysr@777 1449
johnc@4123 1450 while (start < ntams) {
ysr@777 1451 oop obj = oop(start);
ysr@777 1452 int obj_sz = obj->size();
johnc@4123 1453 HeapWord* obj_end = start + obj_sz;
johnc@3731 1454
johnc@3731 1455 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
johnc@4123 1456 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
johnc@4123 1457
johnc@4123 1458 // Note: if we're looking at the last region in heap - obj_end
johnc@4123 1459 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1460 // will then correspond to a (non-existent) card that is also
johnc@4123 1461 // just beyond the heap.
johnc@4123 1462 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
johnc@4123 1463 // end of object is not card aligned - increment to cover
johnc@4123 1464 // all the cards spanned by the object
johnc@4123 1465 end_idx += 1;
johnc@4123 1466 }
johnc@4123 1467
johnc@4123 1468 // Set the bits in the card BM for the cards spanned by this object.
johnc@4123 1469 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
johnc@3731 1470
johnc@3731 1471 // Add the size of this object to the number of marked bytes.
apetrusenko@1465 1472 marked_bytes += (size_t)obj_sz * HeapWordSize;
johnc@3463 1473
ysr@777 1474 // Find the next marked object after this one.
johnc@4123 1475 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
tonyp@2973 1476 }
johnc@3463 1477
johnc@3463 1478 // Mark the allocated-since-marking portion...
johnc@3463 1479 HeapWord* top = hr->top();
johnc@4123 1480 if (ntams < top) {
johnc@4123 1481 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
johnc@4123 1482 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
johnc@4123 1483
johnc@4123 1484 // Note: if we're looking at the last region in heap - top
johnc@4123 1485 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1486 // will then correspond to a (non-existent) card that is also
johnc@4123 1487 // just beyond the heap.
johnc@4123 1488 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
johnc@4123 1489 // end of object is not card aligned - increment to cover
johnc@4123 1490 // all the cards spanned by the object
johnc@4123 1491 end_idx += 1;
johnc@4123 1492 }
johnc@4123 1493 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
johnc@3463 1494
johnc@3463 1495 // This definitely means the region has live objects.
johnc@3463 1496 set_bit_for_region(hr);
ysr@777 1497 }
ysr@777 1498
ysr@777 1499 // Update the live region bitmap.
ysr@777 1500 if (marked_bytes > 0) {
tonyp@1264 1501 set_bit_for_region(hr);
ysr@777 1502 }
johnc@3463 1503
johnc@3463 1504 // Set the marked bytes for the current region so that
johnc@3463 1505 // it can be queried by a calling verificiation routine
johnc@3463 1506 _region_marked_bytes = marked_bytes;
johnc@3463 1507
johnc@3463 1508 return false;
johnc@3463 1509 }
johnc@3463 1510
johnc@3463 1511 size_t region_marked_bytes() const { return _region_marked_bytes; }
johnc@3463 1512 };
johnc@3463 1513
johnc@3463 1514 // Heap region closure used for verifying the counting data
johnc@3463 1515 // that was accumulated concurrently and aggregated during
johnc@3463 1516 // the remark pause. This closure is applied to the heap
johnc@3463 1517 // regions during the STW cleanup pause.
johnc@3463 1518
johnc@3463 1519 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
johnc@4123 1520 G1CollectedHeap* _g1h;
johnc@3463 1521 ConcurrentMark* _cm;
johnc@3463 1522 CalcLiveObjectsClosure _calc_cl;
johnc@3463 1523 BitMap* _region_bm; // Region BM to be verified
johnc@3463 1524 BitMap* _card_bm; // Card BM to be verified
johnc@3463 1525 bool _verbose; // verbose output?
johnc@3463 1526
johnc@3463 1527 BitMap* _exp_region_bm; // Expected Region BM values
johnc@3463 1528 BitMap* _exp_card_bm; // Expected card BM values
johnc@3463 1529
johnc@3463 1530 int _failures;
johnc@3463 1531
johnc@3463 1532 public:
johnc@4123 1533 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
johnc@3463 1534 BitMap* region_bm,
johnc@3463 1535 BitMap* card_bm,
johnc@3463 1536 BitMap* exp_region_bm,
johnc@3463 1537 BitMap* exp_card_bm,
johnc@3463 1538 bool verbose) :
johnc@4123 1539 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 1540 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
johnc@3463 1541 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
johnc@3463 1542 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
johnc@3463 1543 _failures(0) { }
johnc@3463 1544
johnc@3463 1545 int failures() const { return _failures; }
johnc@3463 1546
johnc@3463 1547 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 1548 if (hr->continuesHumongous()) {
johnc@3463 1549 // We will ignore these here and process them when their
johnc@3463 1550 // associated "starts humongous" region is processed (see
johnc@3463 1551 // set_bit_for_heap_region()). Note that we cannot rely on their
johnc@3463 1552 // associated "starts humongous" region to have their bit set to
johnc@3463 1553 // 1 since, due to the region chunking in the parallel region
johnc@3463 1554 // iteration, a "continues humongous" region might be visited
johnc@3463 1555 // before its associated "starts humongous".
johnc@3463 1556 return false;
johnc@3463 1557 }
johnc@3463 1558
johnc@3463 1559 int failures = 0;
johnc@3463 1560
johnc@3463 1561 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
johnc@3463 1562 // this region and set the corresponding bits in the expected region
johnc@3463 1563 // and card bitmaps.
johnc@3463 1564 bool res = _calc_cl.doHeapRegion(hr);
johnc@3463 1565 assert(res == false, "should be continuing");
johnc@3463 1566
johnc@3463 1567 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
johnc@3463 1568 Mutex::_no_safepoint_check_flag);
johnc@3463 1569
johnc@3463 1570 // Verify the marked bytes for this region.
johnc@3463 1571 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
johnc@3463 1572 size_t act_marked_bytes = hr->next_marked_bytes();
johnc@3463 1573
johnc@3463 1574 // We're not OK if expected marked bytes > actual marked bytes. It means
johnc@3463 1575 // we have missed accounting some objects during the actual marking.
johnc@3463 1576 if (exp_marked_bytes > act_marked_bytes) {
johnc@3463 1577 if (_verbose) {
tonyp@3713 1578 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
johnc@3463 1579 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
johnc@3463 1580 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
johnc@3463 1581 }
johnc@3463 1582 failures += 1;
johnc@3463 1583 }
johnc@3463 1584
johnc@3463 1585 // Verify the bit, for this region, in the actual and expected
johnc@3463 1586 // (which was just calculated) region bit maps.
johnc@3463 1587 // We're not OK if the bit in the calculated expected region
johnc@3463 1588 // bitmap is set and the bit in the actual region bitmap is not.
tonyp@3713 1589 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
johnc@3463 1590
johnc@3463 1591 bool expected = _exp_region_bm->at(index);
johnc@3463 1592 bool actual = _region_bm->at(index);
johnc@3463 1593 if (expected && !actual) {
johnc@3463 1594 if (_verbose) {
tonyp@3713 1595 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
tonyp@3713 1596 "expected: %s, actual: %s",
tonyp@3713 1597 hr->hrs_index(),
tonyp@3713 1598 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
johnc@3463 1599 }
johnc@3463 1600 failures += 1;
johnc@3463 1601 }
johnc@3463 1602
johnc@3463 1603 // Verify that the card bit maps for the cards spanned by the current
johnc@3463 1604 // region match. We have an error if we have a set bit in the expected
johnc@3463 1605 // bit map and the corresponding bit in the actual bitmap is not set.
johnc@3463 1606
johnc@3463 1607 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
johnc@3463 1608 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
johnc@3463 1609
johnc@3463 1610 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
johnc@3463 1611 expected = _exp_card_bm->at(i);
johnc@3463 1612 actual = _card_bm->at(i);
johnc@3463 1613
johnc@3463 1614 if (expected && !actual) {
johnc@3463 1615 if (_verbose) {
tonyp@3713 1616 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
tonyp@3713 1617 "expected: %s, actual: %s",
tonyp@3713 1618 hr->hrs_index(), i,
tonyp@3713 1619 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
ysr@777 1620 }
johnc@3463 1621 failures += 1;
ysr@777 1622 }
ysr@777 1623 }
ysr@777 1624
johnc@3463 1625 if (failures > 0 && _verbose) {
johnc@3463 1626 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
johnc@3463 1627 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
drchase@6680 1628 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
johnc@3463 1629 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
johnc@3463 1630 }
johnc@3463 1631
johnc@3463 1632 _failures += failures;
johnc@3463 1633
johnc@3463 1634 // We could stop iteration over the heap when we
johnc@3731 1635 // find the first violating region by returning true.
ysr@777 1636 return false;
ysr@777 1637 }
ysr@777 1638 };
ysr@777 1639
johnc@3463 1640 class G1ParVerifyFinalCountTask: public AbstractGangTask {
johnc@3463 1641 protected:
johnc@3463 1642 G1CollectedHeap* _g1h;
johnc@3463 1643 ConcurrentMark* _cm;
johnc@3463 1644 BitMap* _actual_region_bm;
johnc@3463 1645 BitMap* _actual_card_bm;
johnc@3463 1646
johnc@3463 1647 uint _n_workers;
johnc@3463 1648
johnc@3463 1649 BitMap* _expected_region_bm;
johnc@3463 1650 BitMap* _expected_card_bm;
johnc@3463 1651
johnc@3463 1652 int _failures;
johnc@3463 1653 bool _verbose;
johnc@3463 1654
johnc@3463 1655 public:
johnc@3463 1656 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
johnc@3463 1657 BitMap* region_bm, BitMap* card_bm,
johnc@3463 1658 BitMap* expected_region_bm, BitMap* expected_card_bm)
johnc@3463 1659 : AbstractGangTask("G1 verify final counting"),
johnc@3463 1660 _g1h(g1h), _cm(_g1h->concurrent_mark()),
johnc@3463 1661 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
johnc@3463 1662 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
johnc@3463 1663 _failures(0), _verbose(false),
johnc@3463 1664 _n_workers(0) {
johnc@3463 1665 assert(VerifyDuringGC, "don't call this otherwise");
johnc@3463 1666
johnc@3463 1667 // Use the value already set as the number of active threads
johnc@3463 1668 // in the call to run_task().
johnc@3463 1669 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1670 assert( _g1h->workers()->active_workers() > 0,
johnc@3463 1671 "Should have been previously set");
johnc@3463 1672 _n_workers = _g1h->workers()->active_workers();
johnc@3463 1673 } else {
johnc@3463 1674 _n_workers = 1;
johnc@3463 1675 }
johnc@3463 1676
johnc@3463 1677 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
johnc@3463 1678 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
johnc@3463 1679
johnc@3463 1680 _verbose = _cm->verbose_medium();
johnc@3463 1681 }
johnc@3463 1682
johnc@3463 1683 void work(uint worker_id) {
johnc@3463 1684 assert(worker_id < _n_workers, "invariant");
johnc@3463 1685
johnc@4123 1686 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
johnc@3463 1687 _actual_region_bm, _actual_card_bm,
johnc@3463 1688 _expected_region_bm,
johnc@3463 1689 _expected_card_bm,
johnc@3463 1690 _verbose);
johnc@3463 1691
johnc@3463 1692 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1693 _g1h->heap_region_par_iterate_chunked(&verify_cl,
johnc@3463 1694 worker_id,
johnc@3463 1695 _n_workers,
johnc@3463 1696 HeapRegion::VerifyCountClaimValue);
johnc@3463 1697 } else {
johnc@3463 1698 _g1h->heap_region_iterate(&verify_cl);
johnc@3463 1699 }
johnc@3463 1700
johnc@3463 1701 Atomic::add(verify_cl.failures(), &_failures);
johnc@3463 1702 }
johnc@3463 1703
johnc@3463 1704 int failures() const { return _failures; }
johnc@3463 1705 };
johnc@3463 1706
johnc@3731 1707 // Closure that finalizes the liveness counting data.
johnc@3731 1708 // Used during the cleanup pause.
johnc@3731 1709 // Sets the bits corresponding to the interval [NTAMS, top]
johnc@3731 1710 // (which contains the implicitly live objects) in the
johnc@3731 1711 // card liveness bitmap. Also sets the bit for each region,
johnc@3731 1712 // containing live data, in the region liveness bitmap.
johnc@3731 1713
johnc@3731 1714 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
johnc@3463 1715 public:
johnc@4123 1716 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
johnc@3463 1717 BitMap* region_bm,
johnc@3463 1718 BitMap* card_bm) :
johnc@4123 1719 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
johnc@3463 1720
johnc@3463 1721 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 1722
johnc@3463 1723 if (hr->continuesHumongous()) {
johnc@3463 1724 // We will ignore these here and process them when their
johnc@3463 1725 // associated "starts humongous" region is processed (see
johnc@3463 1726 // set_bit_for_heap_region()). Note that we cannot rely on their
johnc@3463 1727 // associated "starts humongous" region to have their bit set to
johnc@3463 1728 // 1 since, due to the region chunking in the parallel region
johnc@3463 1729 // iteration, a "continues humongous" region might be visited
johnc@3463 1730 // before its associated "starts humongous".
johnc@3463 1731 return false;
johnc@3463 1732 }
johnc@3463 1733
johnc@3463 1734 HeapWord* ntams = hr->next_top_at_mark_start();
johnc@3463 1735 HeapWord* top = hr->top();
johnc@3463 1736
johnc@3731 1737 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
johnc@3463 1738
johnc@3463 1739 // Mark the allocated-since-marking portion...
johnc@3463 1740 if (ntams < top) {
johnc@3463 1741 // This definitely means the region has live objects.
johnc@3463 1742 set_bit_for_region(hr);
johnc@4123 1743
johnc@4123 1744 // Now set the bits in the card bitmap for [ntams, top)
johnc@4123 1745 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
johnc@4123 1746 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
johnc@4123 1747
johnc@4123 1748 // Note: if we're looking at the last region in heap - top
johnc@4123 1749 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1750 // will then correspond to a (non-existent) card that is also
johnc@4123 1751 // just beyond the heap.
johnc@4123 1752 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
johnc@4123 1753 // end of object is not card aligned - increment to cover
johnc@4123 1754 // all the cards spanned by the object
johnc@4123 1755 end_idx += 1;
johnc@4123 1756 }
johnc@4123 1757
johnc@4123 1758 assert(end_idx <= _card_bm->size(),
johnc@4123 1759 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
johnc@4123 1760 end_idx, _card_bm->size()));
johnc@4123 1761 assert(start_idx < _card_bm->size(),
johnc@4123 1762 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
johnc@4123 1763 start_idx, _card_bm->size()));
johnc@4123 1764
johnc@4123 1765 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
coleenp@4037 1766 }
johnc@3463 1767
johnc@3463 1768 // Set the bit for the region if it contains live data
johnc@3463 1769 if (hr->next_marked_bytes() > 0) {
johnc@3463 1770 set_bit_for_region(hr);
johnc@3463 1771 }
johnc@3463 1772
johnc@3463 1773 return false;
johnc@3463 1774 }
johnc@3463 1775 };
ysr@777 1776
ysr@777 1777 class G1ParFinalCountTask: public AbstractGangTask {
ysr@777 1778 protected:
ysr@777 1779 G1CollectedHeap* _g1h;
johnc@3463 1780 ConcurrentMark* _cm;
johnc@3463 1781 BitMap* _actual_region_bm;
johnc@3463 1782 BitMap* _actual_card_bm;
johnc@3463 1783
jmasa@3357 1784 uint _n_workers;
johnc@3463 1785
ysr@777 1786 public:
johnc@3463 1787 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
johnc@3463 1788 : AbstractGangTask("G1 final counting"),
johnc@3463 1789 _g1h(g1h), _cm(_g1h->concurrent_mark()),
johnc@3463 1790 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
johnc@3463 1791 _n_workers(0) {
jmasa@3294 1792 // Use the value already set as the number of active threads
tonyp@3714 1793 // in the call to run_task().
jmasa@3294 1794 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1795 assert( _g1h->workers()->active_workers() > 0,
jmasa@3294 1796 "Should have been previously set");
jmasa@3294 1797 _n_workers = _g1h->workers()->active_workers();
tonyp@2973 1798 } else {
ysr@777 1799 _n_workers = 1;
tonyp@2973 1800 }
ysr@777 1801 }
ysr@777 1802
jmasa@3357 1803 void work(uint worker_id) {
johnc@3463 1804 assert(worker_id < _n_workers, "invariant");
johnc@3463 1805
johnc@4123 1806 FinalCountDataUpdateClosure final_update_cl(_g1h,
johnc@3463 1807 _actual_region_bm,
johnc@3463 1808 _actual_card_bm);
johnc@3463 1809
jmasa@2188 1810 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1811 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
johnc@3463 1812 worker_id,
johnc@3463 1813 _n_workers,
tonyp@790 1814 HeapRegion::FinalCountClaimValue);
ysr@777 1815 } else {
johnc@3463 1816 _g1h->heap_region_iterate(&final_update_cl);
ysr@777 1817 }
ysr@777 1818 }
ysr@777 1819 };
ysr@777 1820
ysr@777 1821 class G1ParNoteEndTask;
ysr@777 1822
ysr@777 1823 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
ysr@777 1824 G1CollectedHeap* _g1;
ysr@777 1825 size_t _max_live_bytes;
tonyp@3713 1826 uint _regions_claimed;
ysr@777 1827 size_t _freed_bytes;
tonyp@2493 1828 FreeRegionList* _local_cleanup_list;
brutisso@6385 1829 HeapRegionSetCount _old_regions_removed;
brutisso@6385 1830 HeapRegionSetCount _humongous_regions_removed;
tonyp@2493 1831 HRRSCleanupTask* _hrrs_cleanup_task;
ysr@777 1832 double _claimed_region_time;
ysr@777 1833 double _max_region_time;
ysr@777 1834
ysr@777 1835 public:
ysr@777 1836 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
tonyp@2493 1837 FreeRegionList* local_cleanup_list,
johnc@3292 1838 HRRSCleanupTask* hrrs_cleanup_task) :
vkempik@6552 1839 _g1(g1),
johnc@3292 1840 _max_live_bytes(0), _regions_claimed(0),
johnc@3292 1841 _freed_bytes(0),
johnc@3292 1842 _claimed_region_time(0.0), _max_region_time(0.0),
johnc@3292 1843 _local_cleanup_list(local_cleanup_list),
brutisso@6385 1844 _old_regions_removed(),
brutisso@6385 1845 _humongous_regions_removed(),
johnc@3292 1846 _hrrs_cleanup_task(hrrs_cleanup_task) { }
johnc@3292 1847
ysr@777 1848 size_t freed_bytes() { return _freed_bytes; }
brutisso@6385 1849 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
brutisso@6385 1850 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
ysr@777 1851
johnc@3292 1852 bool doHeapRegion(HeapRegion *hr) {
tonyp@3957 1853 if (hr->continuesHumongous()) {
tonyp@3957 1854 return false;
tonyp@3957 1855 }
johnc@3292 1856 // We use a claim value of zero here because all regions
johnc@3292 1857 // were claimed with value 1 in the FinalCount task.
tonyp@3957 1858 _g1->reset_gc_time_stamps(hr);
tonyp@3957 1859 double start = os::elapsedTime();
tonyp@3957 1860 _regions_claimed++;
tonyp@3957 1861 hr->note_end_of_marking();
tonyp@3957 1862 _max_live_bytes += hr->max_live_bytes();
brutisso@6385 1863
brutisso@6385 1864 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
brutisso@6385 1865 _freed_bytes += hr->used();
brutisso@6385 1866 hr->set_containing_set(NULL);
brutisso@6385 1867 if (hr->isHumongous()) {
brutisso@6385 1868 assert(hr->startsHumongous(), "we should only see starts humongous");
brutisso@6385 1869 _humongous_regions_removed.increment(1u, hr->capacity());
brutisso@6385 1870 _g1->free_humongous_region(hr, _local_cleanup_list, true);
brutisso@6385 1871 } else {
brutisso@6385 1872 _old_regions_removed.increment(1u, hr->capacity());
brutisso@6385 1873 _g1->free_region(hr, _local_cleanup_list, true);
brutisso@6385 1874 }
brutisso@6385 1875 } else {
brutisso@6385 1876 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
brutisso@6385 1877 }
brutisso@6385 1878
tonyp@3957 1879 double region_time = (os::elapsedTime() - start);
tonyp@3957 1880 _claimed_region_time += region_time;
tonyp@3957 1881 if (region_time > _max_region_time) {
tonyp@3957 1882 _max_region_time = region_time;
johnc@3292 1883 }
johnc@3292 1884 return false;
johnc@3292 1885 }
ysr@777 1886
ysr@777 1887 size_t max_live_bytes() { return _max_live_bytes; }
tonyp@3713 1888 uint regions_claimed() { return _regions_claimed; }
ysr@777 1889 double claimed_region_time_sec() { return _claimed_region_time; }
ysr@777 1890 double max_region_time_sec() { return _max_region_time; }
ysr@777 1891 };
ysr@777 1892
ysr@777 1893 class G1ParNoteEndTask: public AbstractGangTask {
ysr@777 1894 friend class G1NoteEndOfConcMarkClosure;
tonyp@2472 1895
ysr@777 1896 protected:
ysr@777 1897 G1CollectedHeap* _g1h;
ysr@777 1898 size_t _max_live_bytes;
ysr@777 1899 size_t _freed_bytes;
tonyp@2472 1900 FreeRegionList* _cleanup_list;
tonyp@2472 1901
ysr@777 1902 public:
ysr@777 1903 G1ParNoteEndTask(G1CollectedHeap* g1h,
tonyp@2472 1904 FreeRegionList* cleanup_list) :
ysr@777 1905 AbstractGangTask("G1 note end"), _g1h(g1h),
tonyp@2472 1906 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
ysr@777 1907
jmasa@3357 1908 void work(uint worker_id) {
ysr@777 1909 double start = os::elapsedTime();
tonyp@2493 1910 FreeRegionList local_cleanup_list("Local Cleanup List");
tonyp@2493 1911 HRRSCleanupTask hrrs_cleanup_task;
vkempik@6552 1912 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
tonyp@2493 1913 &hrrs_cleanup_task);
jmasa@2188 1914 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1915 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
jmasa@3294 1916 _g1h->workers()->active_workers(),
tonyp@790 1917 HeapRegion::NoteEndClaimValue);
ysr@777 1918 } else {
ysr@777 1919 _g1h->heap_region_iterate(&g1_note_end);
ysr@777 1920 }
ysr@777 1921 assert(g1_note_end.complete(), "Shouldn't have yielded!");
ysr@777 1922
tonyp@2472 1923 // Now update the lists
brutisso@6385 1924 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
ysr@777 1925 {
ysr@777 1926 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
brutisso@6385 1927 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
ysr@777 1928 _max_live_bytes += g1_note_end.max_live_bytes();
ysr@777 1929 _freed_bytes += g1_note_end.freed_bytes();
tonyp@2472 1930
tonyp@2975 1931 // If we iterate over the global cleanup list at the end of
tonyp@2975 1932 // cleanup to do this printing we will not guarantee to only
tonyp@2975 1933 // generate output for the newly-reclaimed regions (the list
tonyp@2975 1934 // might not be empty at the beginning of cleanup; we might
tonyp@2975 1935 // still be working on its previous contents). So we do the
tonyp@2975 1936 // printing here, before we append the new regions to the global
tonyp@2975 1937 // cleanup list.
tonyp@2975 1938
tonyp@2975 1939 G1HRPrinter* hr_printer = _g1h->hr_printer();
tonyp@2975 1940 if (hr_printer->is_active()) {
brutisso@6385 1941 FreeRegionListIterator iter(&local_cleanup_list);
tonyp@2975 1942 while (iter.more_available()) {
tonyp@2975 1943 HeapRegion* hr = iter.get_next();
tonyp@2975 1944 hr_printer->cleanup(hr);
tonyp@2975 1945 }
tonyp@2975 1946 }
tonyp@2975 1947
jwilhelm@6422 1948 _cleanup_list->add_ordered(&local_cleanup_list);
tonyp@2493 1949 assert(local_cleanup_list.is_empty(), "post-condition");
tonyp@2493 1950
tonyp@2493 1951 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
ysr@777 1952 }
ysr@777 1953 }
ysr@777 1954 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1955 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1956 };
ysr@777 1957
ysr@777 1958 class G1ParScrubRemSetTask: public AbstractGangTask {
ysr@777 1959 protected:
ysr@777 1960 G1RemSet* _g1rs;
ysr@777 1961 BitMap* _region_bm;
ysr@777 1962 BitMap* _card_bm;
ysr@777 1963 public:
ysr@777 1964 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
ysr@777 1965 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1966 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
johnc@3463 1967 _region_bm(region_bm), _card_bm(card_bm) { }
ysr@777 1968
jmasa@3357 1969 void work(uint worker_id) {
jmasa@2188 1970 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1971 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
tonyp@790 1972 HeapRegion::ScrubRemSetClaimValue);
ysr@777 1973 } else {
ysr@777 1974 _g1rs->scrub(_region_bm, _card_bm);
ysr@777 1975 }
ysr@777 1976 }
ysr@777 1977
ysr@777 1978 };
ysr@777 1979
ysr@777 1980 void ConcurrentMark::cleanup() {
ysr@777 1981 // world is stopped at this checkpoint
ysr@777 1982 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1983 "world should be stopped");
ysr@777 1984 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1985
ysr@777 1986 // If a full collection has happened, we shouldn't do this.
ysr@777 1987 if (has_aborted()) {
ysr@777 1988 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1989 return;
ysr@777 1990 }
ysr@777 1991
tonyp@2472 1992 g1h->verify_region_sets_optional();
tonyp@2472 1993
ysr@1280 1994 if (VerifyDuringGC) {
ysr@1280 1995 HandleMark hm; // handle scope
ysr@1280 1996 Universe::heap()->prepare_for_verify();
stefank@5018 1997 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 1998 " VerifyDuringGC:(before)");
ysr@1280 1999 }
ysr@1280 2000
ysr@777 2001 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
ysr@777 2002 g1p->record_concurrent_mark_cleanup_start();
ysr@777 2003
ysr@777 2004 double start = os::elapsedTime();
ysr@777 2005
tonyp@2493 2006 HeapRegionRemSet::reset_for_cleanup_tasks();
tonyp@2493 2007
jmasa@3357 2008 uint n_workers;
jmasa@3294 2009
ysr@777 2010 // Do counting once more with the world stopped for good measure.
johnc@3463 2011 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
johnc@3463 2012
jmasa@2188 2013 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 2014 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2015 "sanity check");
tonyp@790 2016
johnc@3338 2017 g1h->set_par_threads();
johnc@3338 2018 n_workers = g1h->n_par_threads();
jmasa@3357 2019 assert(g1h->n_par_threads() == n_workers,
johnc@3338 2020 "Should not have been reset");
ysr@777 2021 g1h->workers()->run_task(&g1_par_count_task);
jmasa@3294 2022 // Done with the parallel phase so reset to 0.
ysr@777 2023 g1h->set_par_threads(0);
tonyp@790 2024
johnc@3463 2025 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
tonyp@790 2026 "sanity check");
ysr@777 2027 } else {
johnc@3338 2028 n_workers = 1;
ysr@777 2029 g1_par_count_task.work(0);
ysr@777 2030 }
ysr@777 2031
johnc@3463 2032 if (VerifyDuringGC) {
johnc@3463 2033 // Verify that the counting data accumulated during marking matches
johnc@3463 2034 // that calculated by walking the marking bitmap.
johnc@3463 2035
johnc@3463 2036 // Bitmaps to hold expected values
johnc@3463 2037 BitMap expected_region_bm(_region_bm.size(), false);
johnc@3463 2038 BitMap expected_card_bm(_card_bm.size(), false);
johnc@3463 2039
johnc@3463 2040 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
johnc@3463 2041 &_region_bm,
johnc@3463 2042 &_card_bm,
johnc@3463 2043 &expected_region_bm,
johnc@3463 2044 &expected_card_bm);
johnc@3463 2045
johnc@3463 2046 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 2047 g1h->set_par_threads((int)n_workers);
johnc@3463 2048 g1h->workers()->run_task(&g1_par_verify_task);
johnc@3463 2049 // Done with the parallel phase so reset to 0.
johnc@3463 2050 g1h->set_par_threads(0);
johnc@3463 2051
johnc@3463 2052 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
johnc@3463 2053 "sanity check");
johnc@3463 2054 } else {
johnc@3463 2055 g1_par_verify_task.work(0);
johnc@3463 2056 }
johnc@3463 2057
johnc@3463 2058 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
johnc@3463 2059 }
johnc@3463 2060
ysr@777 2061 size_t start_used_bytes = g1h->used();
ysr@777 2062 g1h->set_marking_complete();
ysr@777 2063
ysr@777 2064 double count_end = os::elapsedTime();
ysr@777 2065 double this_final_counting_time = (count_end - start);
ysr@777 2066 _total_counting_time += this_final_counting_time;
ysr@777 2067
tonyp@2717 2068 if (G1PrintRegionLivenessInfo) {
tonyp@2717 2069 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
tonyp@2717 2070 _g1h->heap_region_iterate(&cl);
tonyp@2717 2071 }
tonyp@2717 2072
ysr@777 2073 // Install newly created mark bitMap as "prev".
ysr@777 2074 swapMarkBitMaps();
ysr@777 2075
ysr@777 2076 g1h->reset_gc_time_stamp();
ysr@777 2077
ysr@777 2078 // Note end of marking in all heap regions.
tonyp@2472 2079 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
jmasa@2188 2080 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 2081 g1h->set_par_threads((int)n_workers);
ysr@777 2082 g1h->workers()->run_task(&g1_par_note_end_task);
ysr@777 2083 g1h->set_par_threads(0);
tonyp@790 2084
tonyp@790 2085 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
tonyp@790 2086 "sanity check");
ysr@777 2087 } else {
ysr@777 2088 g1_par_note_end_task.work(0);
ysr@777 2089 }
tonyp@3957 2090 g1h->check_gc_time_stamps();
tonyp@2472 2091
tonyp@2472 2092 if (!cleanup_list_is_empty()) {
tonyp@2472 2093 // The cleanup list is not empty, so we'll have to process it
tonyp@2472 2094 // concurrently. Notify anyone else that might be wanting free
tonyp@2472 2095 // regions that there will be more free regions coming soon.
tonyp@2472 2096 g1h->set_free_regions_coming();
tonyp@2472 2097 }
ysr@777 2098
ysr@777 2099 // call below, since it affects the metric by which we sort the heap
ysr@777 2100 // regions.
ysr@777 2101 if (G1ScrubRemSets) {
ysr@777 2102 double rs_scrub_start = os::elapsedTime();
ysr@777 2103 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
jmasa@2188 2104 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 2105 g1h->set_par_threads((int)n_workers);
ysr@777 2106 g1h->workers()->run_task(&g1_par_scrub_rs_task);
ysr@777 2107 g1h->set_par_threads(0);
tonyp@790 2108
tonyp@790 2109 assert(g1h->check_heap_region_claim_values(
tonyp@790 2110 HeapRegion::ScrubRemSetClaimValue),
tonyp@790 2111 "sanity check");
ysr@777 2112 } else {
ysr@777 2113 g1_par_scrub_rs_task.work(0);
ysr@777 2114 }
ysr@777 2115
ysr@777 2116 double rs_scrub_end = os::elapsedTime();
ysr@777 2117 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
ysr@777 2118 _total_rs_scrub_time += this_rs_scrub_time;
ysr@777 2119 }
ysr@777 2120
ysr@777 2121 // this will also free any regions totally full of garbage objects,
ysr@777 2122 // and sort the regions.
jmasa@3294 2123 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
ysr@777 2124
ysr@777 2125 // Statistics.
ysr@777 2126 double end = os::elapsedTime();
ysr@777 2127 _cleanup_times.add((end - start) * 1000.0);
ysr@777 2128
brutisso@3710 2129 if (G1Log::fine()) {
ysr@777 2130 g1h->print_size_transition(gclog_or_tty,
ysr@777 2131 start_used_bytes,
ysr@777 2132 g1h->used(),
ysr@777 2133 g1h->capacity());
ysr@777 2134 }
ysr@777 2135
johnc@3175 2136 // Clean up will have freed any regions completely full of garbage.
johnc@3175 2137 // Update the soft reference policy with the new heap occupancy.
johnc@3175 2138 Universe::update_heap_info_at_gc();
johnc@3175 2139
ysr@777 2140 // We need to make this be a "collection" so any collection pause that
ysr@777 2141 // races with it goes around and waits for completeCleanup to finish.
ysr@777 2142 g1h->increment_total_collections();
ysr@777 2143
tonyp@3457 2144 // We reclaimed old regions so we should calculate the sizes to make
tonyp@3457 2145 // sure we update the old gen/space data.
tonyp@3457 2146 g1h->g1mm()->update_sizes();
tonyp@3457 2147
johnc@1186 2148 if (VerifyDuringGC) {
ysr@1280 2149 HandleMark hm; // handle scope
ysr@1280 2150 Universe::heap()->prepare_for_verify();
stefank@5018 2151 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 2152 " VerifyDuringGC:(after)");
ysr@777 2153 }
tonyp@2472 2154
tonyp@2472 2155 g1h->verify_region_sets_optional();
sla@5237 2156 g1h->trace_heap_after_concurrent_cycle();
ysr@777 2157 }
ysr@777 2158
ysr@777 2159 void ConcurrentMark::completeCleanup() {
ysr@777 2160 if (has_aborted()) return;
ysr@777 2161
tonyp@2472 2162 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2472 2163
jwilhelm@6549 2164 _cleanup_list.verify_optional();
tonyp@2643 2165 FreeRegionList tmp_free_list("Tmp Free List");
tonyp@2472 2166
tonyp@2472 2167 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 2168 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@3713 2169 "cleanup list has %u entries",
tonyp@2472 2170 _cleanup_list.length());
tonyp@2472 2171 }
tonyp@2472 2172
tonyp@2472 2173 // Noone else should be accessing the _cleanup_list at this point,
tonyp@2472 2174 // so it's not necessary to take any locks
tonyp@2472 2175 while (!_cleanup_list.is_empty()) {
tonyp@2472 2176 HeapRegion* hr = _cleanup_list.remove_head();
jwilhelm@6422 2177 assert(hr != NULL, "Got NULL from a non-empty list");
tonyp@2849 2178 hr->par_clear();
jwilhelm@6422 2179 tmp_free_list.add_ordered(hr);
tonyp@2472 2180
tonyp@2472 2181 // Instead of adding one region at a time to the secondary_free_list,
tonyp@2472 2182 // we accumulate them in the local list and move them a few at a
tonyp@2472 2183 // time. This also cuts down on the number of notify_all() calls
tonyp@2472 2184 // we do during this process. We'll also append the local list when
tonyp@2472 2185 // _cleanup_list is empty (which means we just removed the last
tonyp@2472 2186 // region from the _cleanup_list).
tonyp@2643 2187 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
tonyp@2472 2188 _cleanup_list.is_empty()) {
tonyp@2472 2189 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 2190 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@3713 2191 "appending %u entries to the secondary_free_list, "
tonyp@3713 2192 "cleanup list still has %u entries",
tonyp@2643 2193 tmp_free_list.length(),
tonyp@2472 2194 _cleanup_list.length());
ysr@777 2195 }
tonyp@2472 2196
tonyp@2472 2197 {
tonyp@2472 2198 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
jwilhelm@6422 2199 g1h->secondary_free_list_add(&tmp_free_list);
tonyp@2472 2200 SecondaryFreeList_lock->notify_all();
tonyp@2472 2201 }
tonyp@2472 2202
tonyp@2472 2203 if (G1StressConcRegionFreeing) {
tonyp@2472 2204 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
tonyp@2472 2205 os::sleep(Thread::current(), (jlong) 1, false);
tonyp@2472 2206 }
tonyp@2472 2207 }
ysr@777 2208 }
ysr@777 2209 }
tonyp@2643 2210 assert(tmp_free_list.is_empty(), "post-condition");
ysr@777 2211 }
ysr@777 2212
johnc@4555 2213 // Supporting Object and Oop closures for reference discovery
johnc@4555 2214 // and processing in during marking
johnc@2494 2215
johnc@2379 2216 bool G1CMIsAliveClosure::do_object_b(oop obj) {
johnc@2379 2217 HeapWord* addr = (HeapWord*)obj;
johnc@2379 2218 return addr != NULL &&
johnc@2379 2219 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
johnc@2379 2220 }
ysr@777 2221
johnc@4555 2222 // 'Keep Alive' oop closure used by both serial parallel reference processing.
johnc@4555 2223 // Uses the CMTask associated with a worker thread (for serial reference
johnc@4555 2224 // processing the CMTask for worker 0 is used) to preserve (mark) and
johnc@4555 2225 // trace referent objects.
johnc@4555 2226 //
johnc@4555 2227 // Using the CMTask and embedded local queues avoids having the worker
johnc@4555 2228 // threads operating on the global mark stack. This reduces the risk
johnc@4555 2229 // of overflowing the stack - which we would rather avoid at this late
johnc@4555 2230 // state. Also using the tasks' local queues removes the potential
johnc@4555 2231 // of the workers interfering with each other that could occur if
johnc@4555 2232 // operating on the global stack.
johnc@4555 2233
johnc@4555 2234 class G1CMKeepAliveAndDrainClosure: public OopClosure {
johnc@4787 2235 ConcurrentMark* _cm;
johnc@4787 2236 CMTask* _task;
johnc@4787 2237 int _ref_counter_limit;
johnc@4787 2238 int _ref_counter;
johnc@4787 2239 bool _is_serial;
johnc@2494 2240 public:
johnc@4787 2241 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
johnc@4787 2242 _cm(cm), _task(task), _is_serial(is_serial),
johnc@4787 2243 _ref_counter_limit(G1RefProcDrainInterval) {
johnc@2494 2244 assert(_ref_counter_limit > 0, "sanity");
johnc@4787 2245 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
johnc@2494 2246 _ref_counter = _ref_counter_limit;
johnc@2494 2247 }
johnc@2494 2248
johnc@2494 2249 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@2494 2250 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@2494 2251
johnc@2494 2252 template <class T> void do_oop_work(T* p) {
johnc@2494 2253 if (!_cm->has_overflown()) {
johnc@2494 2254 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@2973 2255 if (_cm->verbose_high()) {
johnc@4173 2256 gclog_or_tty->print_cr("\t[%u] we're looking at location "
johnc@2494 2257 "*"PTR_FORMAT" = "PTR_FORMAT,
drchase@6680 2258 _task->worker_id(), p2i(p), p2i((void*) obj));
tonyp@2973 2259 }
johnc@2494 2260
johnc@2494 2261 _task->deal_with_reference(obj);
johnc@2494 2262 _ref_counter--;
johnc@2494 2263
johnc@2494 2264 if (_ref_counter == 0) {
johnc@4555 2265 // We have dealt with _ref_counter_limit references, pushing them
johnc@4555 2266 // and objects reachable from them on to the local stack (and
johnc@4555 2267 // possibly the global stack). Call CMTask::do_marking_step() to
johnc@4555 2268 // process these entries.
johnc@4555 2269 //
johnc@4555 2270 // We call CMTask::do_marking_step() in a loop, which we'll exit if
johnc@4555 2271 // there's nothing more to do (i.e. we're done with the entries that
johnc@4555 2272 // were pushed as a result of the CMTask::deal_with_reference() calls
johnc@4555 2273 // above) or we overflow.
johnc@4555 2274 //
johnc@4555 2275 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
johnc@4555 2276 // flag while there may still be some work to do. (See the comment at
johnc@4555 2277 // the beginning of CMTask::do_marking_step() for those conditions -
johnc@4555 2278 // one of which is reaching the specified time target.) It is only
johnc@4555 2279 // when CMTask::do_marking_step() returns without setting the
johnc@4555 2280 // has_aborted() flag that the marking step has completed.
johnc@2494 2281 do {
johnc@2494 2282 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 2283 _task->do_marking_step(mark_step_duration_ms,
johnc@4787 2284 false /* do_termination */,
johnc@4787 2285 _is_serial);
johnc@2494 2286 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2287 _ref_counter = _ref_counter_limit;
johnc@2494 2288 }
johnc@2494 2289 } else {
tonyp@2973 2290 if (_cm->verbose_high()) {
johnc@4173 2291 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
tonyp@2973 2292 }
johnc@2494 2293 }
johnc@2494 2294 }
johnc@2494 2295 };
johnc@2494 2296
johnc@4555 2297 // 'Drain' oop closure used by both serial and parallel reference processing.
johnc@4555 2298 // Uses the CMTask associated with a given worker thread (for serial
johnc@4555 2299 // reference processing the CMtask for worker 0 is used). Calls the
johnc@4555 2300 // do_marking_step routine, with an unbelievably large timeout value,
johnc@4555 2301 // to drain the marking data structures of the remaining entries
johnc@4555 2302 // added by the 'keep alive' oop closure above.
johnc@4555 2303
johnc@4555 2304 class G1CMDrainMarkingStackClosure: public VoidClosure {
johnc@2494 2305 ConcurrentMark* _cm;
johnc@4555 2306 CMTask* _task;
johnc@4787 2307 bool _is_serial;
johnc@2494 2308 public:
johnc@4787 2309 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
johnc@4787 2310 _cm(cm), _task(task), _is_serial(is_serial) {
johnc@4787 2311 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
johnc@4555 2312 }
johnc@2494 2313
johnc@2494 2314 void do_void() {
johnc@2494 2315 do {
tonyp@2973 2316 if (_cm->verbose_high()) {
johnc@4787 2317 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
johnc@4787 2318 _task->worker_id(), BOOL_TO_STR(_is_serial));
tonyp@2973 2319 }
johnc@2494 2320
johnc@4555 2321 // We call CMTask::do_marking_step() to completely drain the local
johnc@4555 2322 // and global marking stacks of entries pushed by the 'keep alive'
johnc@4555 2323 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
johnc@4555 2324 //
johnc@4555 2325 // CMTask::do_marking_step() is called in a loop, which we'll exit
johnc@4555 2326 // if there's nothing more to do (i.e. we'completely drained the
johnc@4555 2327 // entries that were pushed as a a result of applying the 'keep alive'
johnc@4555 2328 // closure to the entries on the discovered ref lists) or we overflow
johnc@4555 2329 // the global marking stack.
johnc@4555 2330 //
johnc@4555 2331 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
johnc@4555 2332 // flag while there may still be some work to do. (See the comment at
johnc@4555 2333 // the beginning of CMTask::do_marking_step() for those conditions -
johnc@4555 2334 // one of which is reaching the specified time target.) It is only
johnc@4555 2335 // when CMTask::do_marking_step() returns without setting the
johnc@4555 2336 // has_aborted() flag that the marking step has completed.
johnc@2494 2337
johnc@2494 2338 _task->do_marking_step(1000000000.0 /* something very large */,
johnc@4787 2339 true /* do_termination */,
johnc@4787 2340 _is_serial);
johnc@2494 2341 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2342 }
johnc@2494 2343 };
johnc@2494 2344
johnc@3175 2345 // Implementation of AbstractRefProcTaskExecutor for parallel
johnc@3175 2346 // reference processing at the end of G1 concurrent marking
johnc@3175 2347
johnc@3175 2348 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
johnc@2494 2349 private:
johnc@2494 2350 G1CollectedHeap* _g1h;
johnc@2494 2351 ConcurrentMark* _cm;
johnc@2494 2352 WorkGang* _workers;
johnc@2494 2353 int _active_workers;
johnc@2494 2354
johnc@2494 2355 public:
johnc@3175 2356 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
johnc@2494 2357 ConcurrentMark* cm,
johnc@2494 2358 WorkGang* workers,
johnc@2494 2359 int n_workers) :
johnc@3292 2360 _g1h(g1h), _cm(cm),
johnc@3292 2361 _workers(workers), _active_workers(n_workers) { }
johnc@2494 2362
johnc@2494 2363 // Executes the given task using concurrent marking worker threads.
johnc@2494 2364 virtual void execute(ProcessTask& task);
johnc@2494 2365 virtual void execute(EnqueueTask& task);
johnc@2494 2366 };
johnc@2494 2367
johnc@3175 2368 class G1CMRefProcTaskProxy: public AbstractGangTask {
johnc@2494 2369 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
johnc@2494 2370 ProcessTask& _proc_task;
johnc@2494 2371 G1CollectedHeap* _g1h;
johnc@2494 2372 ConcurrentMark* _cm;
johnc@2494 2373
johnc@2494 2374 public:
johnc@3175 2375 G1CMRefProcTaskProxy(ProcessTask& proc_task,
johnc@2494 2376 G1CollectedHeap* g1h,
johnc@3292 2377 ConcurrentMark* cm) :
johnc@2494 2378 AbstractGangTask("Process reference objects in parallel"),
johnc@4555 2379 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
johnc@4787 2380 ReferenceProcessor* rp = _g1h->ref_processor_cm();
johnc@4787 2381 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
johnc@4787 2382 }
johnc@2494 2383
jmasa@3357 2384 virtual void work(uint worker_id) {
johnc@4787 2385 CMTask* task = _cm->task(worker_id);
johnc@2494 2386 G1CMIsAliveClosure g1_is_alive(_g1h);
johnc@4787 2387 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
johnc@4787 2388 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
johnc@2494 2389
jmasa@3357 2390 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
johnc@2494 2391 }
johnc@2494 2392 };
johnc@2494 2393
johnc@3175 2394 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
johnc@2494 2395 assert(_workers != NULL, "Need parallel worker threads.");
johnc@4555 2396 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
johnc@2494 2397
johnc@3292 2398 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
johnc@2494 2399
johnc@4788 2400 // We need to reset the concurrency level before each
johnc@4788 2401 // proxy task execution, so that the termination protocol
johnc@4788 2402 // and overflow handling in CMTask::do_marking_step() knows
johnc@4788 2403 // how many workers to wait for.
johnc@4788 2404 _cm->set_concurrency(_active_workers);
johnc@2494 2405 _g1h->set_par_threads(_active_workers);
johnc@2494 2406 _workers->run_task(&proc_task_proxy);
johnc@2494 2407 _g1h->set_par_threads(0);
johnc@2494 2408 }
johnc@2494 2409
johnc@3175 2410 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
johnc@2494 2411 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
johnc@2494 2412 EnqueueTask& _enq_task;
johnc@2494 2413
johnc@2494 2414 public:
johnc@3175 2415 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
johnc@2494 2416 AbstractGangTask("Enqueue reference objects in parallel"),
johnc@3292 2417 _enq_task(enq_task) { }
johnc@2494 2418
jmasa@3357 2419 virtual void work(uint worker_id) {
jmasa@3357 2420 _enq_task.work(worker_id);
johnc@2494 2421 }
johnc@2494 2422 };
johnc@2494 2423
johnc@3175 2424 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
johnc@2494 2425 assert(_workers != NULL, "Need parallel worker threads.");
johnc@4555 2426 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
johnc@2494 2427
johnc@3175 2428 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
johnc@2494 2429
johnc@4788 2430 // Not strictly necessary but...
johnc@4788 2431 //
johnc@4788 2432 // We need to reset the concurrency level before each
johnc@4788 2433 // proxy task execution, so that the termination protocol
johnc@4788 2434 // and overflow handling in CMTask::do_marking_step() knows
johnc@4788 2435 // how many workers to wait for.
johnc@4788 2436 _cm->set_concurrency(_active_workers);
johnc@2494 2437 _g1h->set_par_threads(_active_workers);
johnc@2494 2438 _workers->run_task(&enq_task_proxy);
johnc@2494 2439 _g1h->set_par_threads(0);
johnc@2494 2440 }
johnc@2494 2441
ysr@777 2442 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
johnc@4788 2443 if (has_overflown()) {
johnc@4788 2444 // Skip processing the discovered references if we have
johnc@4788 2445 // overflown the global marking stack. Reference objects
johnc@4788 2446 // only get discovered once so it is OK to not
johnc@4788 2447 // de-populate the discovered reference lists. We could have,
johnc@4788 2448 // but the only benefit would be that, when marking restarts,
johnc@4788 2449 // less reference objects are discovered.
johnc@4788 2450 return;
johnc@4788 2451 }
johnc@4788 2452
ysr@777 2453 ResourceMark rm;
ysr@777 2454 HandleMark hm;
johnc@3171 2455
johnc@3171 2456 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@3171 2457
johnc@3171 2458 // Is alive closure.
johnc@3171 2459 G1CMIsAliveClosure g1_is_alive(g1h);
johnc@3171 2460
johnc@3171 2461 // Inner scope to exclude the cleaning of the string and symbol
johnc@3171 2462 // tables from the displayed time.
johnc@3171 2463 {
brutisso@3710 2464 if (G1Log::finer()) {
johnc@3171 2465 gclog_or_tty->put(' ');
johnc@3171 2466 }
sla@5237 2467 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
johnc@3171 2468
johnc@3175 2469 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3171 2470
johnc@3171 2471 // See the comment in G1CollectedHeap::ref_processing_init()
johnc@3171 2472 // about how reference processing currently works in G1.
johnc@3171 2473
johnc@4555 2474 // Set the soft reference policy
johnc@3171 2475 rp->setup_policy(clear_all_soft_refs);
johnc@3171 2476 assert(_markStack.isEmpty(), "mark stack should be empty");
johnc@3171 2477
johnc@4787 2478 // Instances of the 'Keep Alive' and 'Complete GC' closures used
johnc@4787 2479 // in serial reference processing. Note these closures are also
johnc@4787 2480 // used for serially processing (by the the current thread) the
johnc@4787 2481 // JNI references during parallel reference processing.
johnc@4787 2482 //
johnc@4787 2483 // These closures do not need to synchronize with the worker
johnc@4787 2484 // threads involved in parallel reference processing as these
johnc@4787 2485 // instances are executed serially by the current thread (e.g.
johnc@4787 2486 // reference processing is not multi-threaded and is thus
johnc@4787 2487 // performed by the current thread instead of a gang worker).
johnc@4787 2488 //
johnc@4787 2489 // The gang tasks involved in parallel reference procssing create
johnc@4787 2490 // their own instances of these closures, which do their own
johnc@4787 2491 // synchronization among themselves.
johnc@4787 2492 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
johnc@4787 2493 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
johnc@4787 2494
johnc@4787 2495 // We need at least one active thread. If reference processing
johnc@4787 2496 // is not multi-threaded we use the current (VMThread) thread,
johnc@4787 2497 // otherwise we use the work gang from the G1CollectedHeap and
johnc@4787 2498 // we utilize all the worker threads we can.
johnc@4787 2499 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
johnc@4787 2500 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
johnc@4173 2501 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
johnc@3171 2502
johnc@4787 2503 // Parallel processing task executor.
johnc@3292 2504 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
johnc@3175 2505 g1h->workers(), active_workers);
johnc@4787 2506 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
johnc@4555 2507
johnc@4788 2508 // Set the concurrency level. The phase was already set prior to
johnc@4788 2509 // executing the remark task.
johnc@4788 2510 set_concurrency(active_workers);
johnc@4788 2511
johnc@4555 2512 // Set the degree of MT processing here. If the discovery was done MT,
johnc@4555 2513 // the number of threads involved during discovery could differ from
johnc@4555 2514 // the number of active workers. This is OK as long as the discovered
johnc@4555 2515 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
johnc@4555 2516 rp->set_active_mt_degree(active_workers);
johnc@4555 2517
johnc@4555 2518 // Process the weak references.
sla@5237 2519 const ReferenceProcessorStats& stats =
sla@5237 2520 rp->process_discovered_references(&g1_is_alive,
sla@5237 2521 &g1_keep_alive,
sla@5237 2522 &g1_drain_mark_stack,
sla@5237 2523 executor,
sla@5237 2524 g1h->gc_timer_cm());
sla@5237 2525 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
johnc@4555 2526
johnc@4555 2527 // The do_oop work routines of the keep_alive and drain_marking_stack
johnc@4555 2528 // oop closures will set the has_overflown flag if we overflow the
johnc@4555 2529 // global marking stack.
johnc@3171 2530
johnc@3171 2531 assert(_markStack.overflow() || _markStack.isEmpty(),
johnc@3171 2532 "mark stack should be empty (unless it overflowed)");
johnc@4787 2533
johnc@3171 2534 if (_markStack.overflow()) {
johnc@4555 2535 // This should have been done already when we tried to push an
johnc@3171 2536 // entry on to the global mark stack. But let's do it again.
johnc@3171 2537 set_has_overflown();
johnc@3171 2538 }
johnc@3171 2539
johnc@4555 2540 assert(rp->num_q() == active_workers, "why not");
johnc@4555 2541
johnc@4555 2542 rp->enqueue_discovered_references(executor);
johnc@3171 2543
johnc@3171 2544 rp->verify_no_references_recorded();
johnc@3175 2545 assert(!rp->discovery_enabled(), "Post condition");
johnc@2494 2546 }
johnc@2494 2547
pliden@6399 2548 if (has_overflown()) {
pliden@6399 2549 // We can not trust g1_is_alive if the marking stack overflowed
pliden@6399 2550 return;
pliden@6399 2551 }
pliden@6399 2552
tschatzl@6230 2553 g1h->unlink_string_and_symbol_table(&g1_is_alive,
tschatzl@6230 2554 /* process_strings */ false, // currently strings are always roots
tschatzl@6230 2555 /* process_symbols */ true);
ysr@777 2556 }
ysr@777 2557
ysr@777 2558 void ConcurrentMark::swapMarkBitMaps() {
ysr@777 2559 CMBitMapRO* temp = _prevMarkBitMap;
ysr@777 2560 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
ysr@777 2561 _nextMarkBitMap = (CMBitMap*) temp;
ysr@777 2562 }
ysr@777 2563
ysr@777 2564 class CMRemarkTask: public AbstractGangTask {
ysr@777 2565 private:
johnc@4787 2566 ConcurrentMark* _cm;
johnc@4787 2567 bool _is_serial;
ysr@777 2568 public:
jmasa@3357 2569 void work(uint worker_id) {
ysr@777 2570 // Since all available tasks are actually started, we should
ysr@777 2571 // only proceed if we're supposed to be actived.
jmasa@3357 2572 if (worker_id < _cm->active_tasks()) {
jmasa@3357 2573 CMTask* task = _cm->task(worker_id);
ysr@777 2574 task->record_start_time();
ysr@777 2575 do {
johnc@2494 2576 task->do_marking_step(1000000000.0 /* something very large */,
johnc@4787 2577 true /* do_termination */,
johnc@4787 2578 _is_serial);
ysr@777 2579 } while (task->has_aborted() && !_cm->has_overflown());
ysr@777 2580 // If we overflow, then we do not want to restart. We instead
ysr@777 2581 // want to abort remark and do concurrent marking again.
ysr@777 2582 task->record_end_time();
ysr@777 2583 }
ysr@777 2584 }
ysr@777 2585
johnc@4787 2586 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
johnc@4787 2587 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
johnc@3338 2588 _cm->terminator()->reset_for_reuse(active_workers);
jmasa@3294 2589 }
ysr@777 2590 };
ysr@777 2591
ysr@777 2592 void ConcurrentMark::checkpointRootsFinalWork() {
ysr@777 2593 ResourceMark rm;
ysr@777 2594 HandleMark hm;
ysr@777 2595 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 2596
ysr@777 2597 g1h->ensure_parsability(false);
ysr@777 2598
jmasa@2188 2599 if (G1CollectedHeap::use_parallel_gc_threads()) {
jrose@1424 2600 G1CollectedHeap::StrongRootsScope srs(g1h);
jmasa@3294 2601 // this is remark, so we'll use up all active threads
jmasa@3357 2602 uint active_workers = g1h->workers()->active_workers();
jmasa@3294 2603 if (active_workers == 0) {
jmasa@3294 2604 assert(active_workers > 0, "Should have been set earlier");
jmasa@3357 2605 active_workers = (uint) ParallelGCThreads;
jmasa@3294 2606 g1h->workers()->set_active_workers(active_workers);
jmasa@3294 2607 }
johnc@4788 2608 set_concurrency_and_phase(active_workers, false /* concurrent */);
jmasa@3294 2609 // Leave _parallel_marking_threads at it's
jmasa@3294 2610 // value originally calculated in the ConcurrentMark
jmasa@3294 2611 // constructor and pass values of the active workers
jmasa@3294 2612 // through the gang in the task.
ysr@777 2613
johnc@4787 2614 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
johnc@4787 2615 // We will start all available threads, even if we decide that the
johnc@4787 2616 // active_workers will be fewer. The extra ones will just bail out
johnc@4787 2617 // immediately.
jmasa@3294 2618 g1h->set_par_threads(active_workers);
ysr@777 2619 g1h->workers()->run_task(&remarkTask);
ysr@777 2620 g1h->set_par_threads(0);
ysr@777 2621 } else {
jrose@1424 2622 G1CollectedHeap::StrongRootsScope srs(g1h);
jmasa@3357 2623 uint active_workers = 1;
johnc@4788 2624 set_concurrency_and_phase(active_workers, false /* concurrent */);
ysr@777 2625
johnc@4787 2626 // Note - if there's no work gang then the VMThread will be
johnc@4787 2627 // the thread to execute the remark - serially. We have
johnc@4787 2628 // to pass true for the is_serial parameter so that
johnc@4787 2629 // CMTask::do_marking_step() doesn't enter the sync
johnc@4787 2630 // barriers in the event of an overflow. Doing so will
johnc@4787 2631 // cause an assert that the current thread is not a
johnc@4787 2632 // concurrent GC thread.
johnc@4787 2633 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
ysr@777 2634 remarkTask.work(0);
ysr@777 2635 }
tonyp@1458 2636 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
johnc@4789 2637 guarantee(has_overflown() ||
johnc@4789 2638 satb_mq_set.completed_buffers_num() == 0,
johnc@4789 2639 err_msg("Invariant: has_overflown = %s, num buffers = %d",
johnc@4789 2640 BOOL_TO_STR(has_overflown()),
johnc@4789 2641 satb_mq_set.completed_buffers_num()));
ysr@777 2642
ysr@777 2643 print_stats();
ysr@777 2644 }
ysr@777 2645
tonyp@1479 2646 #ifndef PRODUCT
tonyp@1479 2647
tonyp@1823 2648 class PrintReachableOopClosure: public OopClosure {
ysr@777 2649 private:
ysr@777 2650 G1CollectedHeap* _g1h;
ysr@777 2651 outputStream* _out;
johnc@2969 2652 VerifyOption _vo;
tonyp@1823 2653 bool _all;
ysr@777 2654
ysr@777 2655 public:
johnc@2969 2656 PrintReachableOopClosure(outputStream* out,
johnc@2969 2657 VerifyOption vo,
tonyp@1823 2658 bool all) :
tonyp@1479 2659 _g1h(G1CollectedHeap::heap()),
johnc@2969 2660 _out(out), _vo(vo), _all(all) { }
ysr@777 2661
ysr@1280 2662 void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 2663 void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 2664
ysr@1280 2665 template <class T> void do_oop_work(T* p) {
ysr@1280 2666 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 2667 const char* str = NULL;
ysr@777 2668 const char* str2 = "";
ysr@777 2669
tonyp@1823 2670 if (obj == NULL) {
tonyp@1823 2671 str = "";
tonyp@1823 2672 } else if (!_g1h->is_in_g1_reserved(obj)) {
tonyp@1823 2673 str = " O";
tonyp@1823 2674 } else {
ysr@777 2675 HeapRegion* hr = _g1h->heap_region_containing(obj);
tonyp@1458 2676 guarantee(hr != NULL, "invariant");
tonyp@3957 2677 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
tonyp@3957 2678 bool marked = _g1h->is_marked(obj, _vo);
tonyp@1479 2679
tonyp@1479 2680 if (over_tams) {
tonyp@1823 2681 str = " >";
tonyp@1823 2682 if (marked) {
ysr@777 2683 str2 = " AND MARKED";
tonyp@1479 2684 }
tonyp@1823 2685 } else if (marked) {
tonyp@1823 2686 str = " M";
tonyp@1479 2687 } else {
tonyp@1823 2688 str = " NOT";
tonyp@1479 2689 }
ysr@777 2690 }
ysr@777 2691
tonyp@1823 2692 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
drchase@6680 2693 p2i(p), p2i((void*) obj), str, str2);
ysr@777 2694 }
ysr@777 2695 };
ysr@777 2696
tonyp@1823 2697 class PrintReachableObjectClosure : public ObjectClosure {
ysr@777 2698 private:
johnc@2969 2699 G1CollectedHeap* _g1h;
johnc@2969 2700 outputStream* _out;
johnc@2969 2701 VerifyOption _vo;
johnc@2969 2702 bool _all;
johnc@2969 2703 HeapRegion* _hr;
ysr@777 2704
ysr@777 2705 public:
johnc@2969 2706 PrintReachableObjectClosure(outputStream* out,
johnc@2969 2707 VerifyOption vo,
tonyp@1823 2708 bool all,
tonyp@1823 2709 HeapRegion* hr) :
johnc@2969 2710 _g1h(G1CollectedHeap::heap()),
johnc@2969 2711 _out(out), _vo(vo), _all(all), _hr(hr) { }
tonyp@1823 2712
tonyp@1823 2713 void do_object(oop o) {
tonyp@3957 2714 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
tonyp@3957 2715 bool marked = _g1h->is_marked(o, _vo);
tonyp@1823 2716 bool print_it = _all || over_tams || marked;
tonyp@1823 2717
tonyp@1823 2718 if (print_it) {
tonyp@1823 2719 _out->print_cr(" "PTR_FORMAT"%s",
drchase@6680 2720 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
johnc@2969 2721 PrintReachableOopClosure oopCl(_out, _vo, _all);
coleenp@4037 2722 o->oop_iterate_no_header(&oopCl);
tonyp@1823 2723 }
ysr@777 2724 }
ysr@777 2725 };
ysr@777 2726
tonyp@1823 2727 class PrintReachableRegionClosure : public HeapRegionClosure {
ysr@777 2728 private:
tonyp@3957 2729 G1CollectedHeap* _g1h;
tonyp@3957 2730 outputStream* _out;
tonyp@3957 2731 VerifyOption _vo;
tonyp@3957 2732 bool _all;
ysr@777 2733
ysr@777 2734 public:
ysr@777 2735 bool doHeapRegion(HeapRegion* hr) {
ysr@777 2736 HeapWord* b = hr->bottom();
ysr@777 2737 HeapWord* e = hr->end();
ysr@777 2738 HeapWord* t = hr->top();
tonyp@3957 2739 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
ysr@777 2740 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
drchase@6680 2741 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
tonyp@1823 2742 _out->cr();
tonyp@1823 2743
tonyp@1823 2744 HeapWord* from = b;
tonyp@1823 2745 HeapWord* to = t;
tonyp@1823 2746
tonyp@1823 2747 if (to > from) {
drchase@6680 2748 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
tonyp@1823 2749 _out->cr();
johnc@2969 2750 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
tonyp@1823 2751 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
tonyp@1823 2752 _out->cr();
tonyp@1823 2753 }
ysr@777 2754
ysr@777 2755 return false;
ysr@777 2756 }
ysr@777 2757
johnc@2969 2758 PrintReachableRegionClosure(outputStream* out,
johnc@2969 2759 VerifyOption vo,
tonyp@1823 2760 bool all) :
tonyp@3957 2761 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
ysr@777 2762 };
ysr@777 2763
tonyp@1823 2764 void ConcurrentMark::print_reachable(const char* str,
johnc@2969 2765 VerifyOption vo,
tonyp@1823 2766 bool all) {
tonyp@1823 2767 gclog_or_tty->cr();
tonyp@1823 2768 gclog_or_tty->print_cr("== Doing heap dump... ");
tonyp@1479 2769
tonyp@1479 2770 if (G1PrintReachableBaseFile == NULL) {
tonyp@1479 2771 gclog_or_tty->print_cr(" #### error: no base file defined");
tonyp@1479 2772 return;
tonyp@1479 2773 }
tonyp@1479 2774
tonyp@1479 2775 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
tonyp@1479 2776 (JVM_MAXPATHLEN - 1)) {
tonyp@1479 2777 gclog_or_tty->print_cr(" #### error: file name too long");
tonyp@1479 2778 return;
tonyp@1479 2779 }
tonyp@1479 2780
tonyp@1479 2781 char file_name[JVM_MAXPATHLEN];
tonyp@1479 2782 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
tonyp@1479 2783 gclog_or_tty->print_cr(" dumping to file %s", file_name);
tonyp@1479 2784
tonyp@1479 2785 fileStream fout(file_name);
tonyp@1479 2786 if (!fout.is_open()) {
tonyp@1479 2787 gclog_or_tty->print_cr(" #### error: could not open file");
tonyp@1479 2788 return;
tonyp@1479 2789 }
tonyp@1479 2790
tonyp@1479 2791 outputStream* out = &fout;
tonyp@3957 2792 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
tonyp@1479 2793 out->cr();
tonyp@1479 2794
tonyp@1823 2795 out->print_cr("--- ITERATING OVER REGIONS");
tonyp@1479 2796 out->cr();
johnc@2969 2797 PrintReachableRegionClosure rcl(out, vo, all);
ysr@777 2798 _g1h->heap_region_iterate(&rcl);
tonyp@1479 2799 out->cr();
tonyp@1479 2800
tonyp@1479 2801 gclog_or_tty->print_cr(" done");
tonyp@1823 2802 gclog_or_tty->flush();
ysr@777 2803 }
ysr@777 2804
tonyp@1479 2805 #endif // PRODUCT
tonyp@1479 2806
tonyp@3416 2807 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
ysr@777 2808 // Note we are overriding the read-only view of the prev map here, via
ysr@777 2809 // the cast.
ysr@777 2810 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
tonyp@3416 2811 }
tonyp@3416 2812
tonyp@3416 2813 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
ysr@777 2814 _nextMarkBitMap->clearRange(mr);
ysr@777 2815 }
ysr@777 2816
tonyp@3416 2817 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
tonyp@3416 2818 clearRangePrevBitmap(mr);
tonyp@3416 2819 clearRangeNextBitmap(mr);
tonyp@3416 2820 }
tonyp@3416 2821
ysr@777 2822 HeapRegion*
johnc@4173 2823 ConcurrentMark::claim_region(uint worker_id) {
ysr@777 2824 // "checkpoint" the finger
ysr@777 2825 HeapWord* finger = _finger;
ysr@777 2826
ysr@777 2827 // _heap_end will not change underneath our feet; it only changes at
ysr@777 2828 // yield points.
ysr@777 2829 while (finger < _heap_end) {
tonyp@1458 2830 assert(_g1h->is_in_g1_reserved(finger), "invariant");
ysr@777 2831
tonyp@2968 2832 // Note on how this code handles humongous regions. In the
tonyp@2968 2833 // normal case the finger will reach the start of a "starts
tonyp@2968 2834 // humongous" (SH) region. Its end will either be the end of the
tonyp@2968 2835 // last "continues humongous" (CH) region in the sequence, or the
tonyp@2968 2836 // standard end of the SH region (if the SH is the only region in
tonyp@2968 2837 // the sequence). That way claim_region() will skip over the CH
tonyp@2968 2838 // regions. However, there is a subtle race between a CM thread
tonyp@2968 2839 // executing this method and a mutator thread doing a humongous
tonyp@2968 2840 // object allocation. The two are not mutually exclusive as the CM
tonyp@2968 2841 // thread does not need to hold the Heap_lock when it gets
tonyp@2968 2842 // here. So there is a chance that claim_region() will come across
tonyp@2968 2843 // a free region that's in the progress of becoming a SH or a CH
tonyp@2968 2844 // region. In the former case, it will either
tonyp@2968 2845 // a) Miss the update to the region's end, in which case it will
tonyp@2968 2846 // visit every subsequent CH region, will find their bitmaps
tonyp@2968 2847 // empty, and do nothing, or
tonyp@2968 2848 // b) Will observe the update of the region's end (in which case
tonyp@2968 2849 // it will skip the subsequent CH regions).
tonyp@2968 2850 // If it comes across a region that suddenly becomes CH, the
tonyp@2968 2851 // scenario will be similar to b). So, the race between
tonyp@2968 2852 // claim_region() and a humongous object allocation might force us
tonyp@2968 2853 // to do a bit of unnecessary work (due to some unnecessary bitmap
tonyp@2968 2854 // iterations) but it should not introduce and correctness issues.
tonyp@2968 2855 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
ysr@777 2856 HeapWord* bottom = curr_region->bottom();
ysr@777 2857 HeapWord* end = curr_region->end();
ysr@777 2858 HeapWord* limit = curr_region->next_top_at_mark_start();
ysr@777 2859
tonyp@2968 2860 if (verbose_low()) {
johnc@4173 2861 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
ysr@777 2862 "["PTR_FORMAT", "PTR_FORMAT"), "
ysr@777 2863 "limit = "PTR_FORMAT,
drchase@6680 2864 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
tonyp@2968 2865 }
tonyp@2968 2866
tonyp@2968 2867 // Is the gap between reading the finger and doing the CAS too long?
tonyp@2968 2868 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
ysr@777 2869 if (res == finger) {
ysr@777 2870 // we succeeded
ysr@777 2871
ysr@777 2872 // notice that _finger == end cannot be guaranteed here since,
ysr@777 2873 // someone else might have moved the finger even further
tonyp@1458 2874 assert(_finger >= end, "the finger should have moved forward");
ysr@777 2875
tonyp@2973 2876 if (verbose_low()) {
johnc@4173 2877 gclog_or_tty->print_cr("[%u] we were successful with region = "
drchase@6680 2878 PTR_FORMAT, worker_id, p2i(curr_region));
tonyp@2973 2879 }
ysr@777 2880
ysr@777 2881 if (limit > bottom) {
tonyp@2973 2882 if (verbose_low()) {
johnc@4173 2883 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
drchase@6680 2884 "returning it ", worker_id, p2i(curr_region));
tonyp@2973 2885 }
ysr@777 2886 return curr_region;
ysr@777 2887 } else {
tonyp@1458 2888 assert(limit == bottom,
tonyp@1458 2889 "the region limit should be at bottom");
tonyp@2973 2890 if (verbose_low()) {
johnc@4173 2891 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
drchase@6680 2892 "returning NULL", worker_id, p2i(curr_region));
tonyp@2973 2893 }
ysr@777 2894 // we return NULL and the caller should try calling
ysr@777 2895 // claim_region() again.
ysr@777 2896 return NULL;
ysr@777 2897 }
ysr@777 2898 } else {
tonyp@1458 2899 assert(_finger > finger, "the finger should have moved forward");
tonyp@2973 2900 if (verbose_low()) {
johnc@4173 2901 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
ysr@777 2902 "global finger = "PTR_FORMAT", "
ysr@777 2903 "our finger = "PTR_FORMAT,
drchase@6680 2904 worker_id, p2i(_finger), p2i(finger));
tonyp@2973 2905 }
ysr@777 2906
ysr@777 2907 // read it again
ysr@777 2908 finger = _finger;
ysr@777 2909 }
ysr@777 2910 }
ysr@777 2911
ysr@777 2912 return NULL;
ysr@777 2913 }
ysr@777 2914
tonyp@3416 2915 #ifndef PRODUCT
tonyp@3416 2916 enum VerifyNoCSetOopsPhase {
tonyp@3416 2917 VerifyNoCSetOopsStack,
tonyp@3416 2918 VerifyNoCSetOopsQueues,
tonyp@3416 2919 VerifyNoCSetOopsSATBCompleted,
tonyp@3416 2920 VerifyNoCSetOopsSATBThread
tonyp@3416 2921 };
tonyp@3416 2922
tonyp@3416 2923 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
tonyp@3416 2924 private:
tonyp@3416 2925 G1CollectedHeap* _g1h;
tonyp@3416 2926 VerifyNoCSetOopsPhase _phase;
tonyp@3416 2927 int _info;
tonyp@3416 2928
tonyp@3416 2929 const char* phase_str() {
tonyp@3416 2930 switch (_phase) {
tonyp@3416 2931 case VerifyNoCSetOopsStack: return "Stack";
tonyp@3416 2932 case VerifyNoCSetOopsQueues: return "Queue";
tonyp@3416 2933 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
tonyp@3416 2934 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
tonyp@3416 2935 default: ShouldNotReachHere();
tonyp@3416 2936 }
tonyp@3416 2937 return NULL;
ysr@777 2938 }
johnc@2190 2939
tonyp@3416 2940 void do_object_work(oop obj) {
tonyp@3416 2941 guarantee(!_g1h->obj_in_cs(obj),
tonyp@3416 2942 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
drchase@6680 2943 p2i((void*) obj), phase_str(), _info));
johnc@2190 2944 }
johnc@2190 2945
tonyp@3416 2946 public:
tonyp@3416 2947 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
tonyp@3416 2948
tonyp@3416 2949 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
tonyp@3416 2950 _phase = phase;
tonyp@3416 2951 _info = info;
tonyp@3416 2952 }
tonyp@3416 2953
tonyp@3416 2954 virtual void do_oop(oop* p) {
tonyp@3416 2955 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@3416 2956 do_object_work(obj);
tonyp@3416 2957 }
tonyp@3416 2958
tonyp@3416 2959 virtual void do_oop(narrowOop* p) {
tonyp@3416 2960 // We should not come across narrow oops while scanning marking
tonyp@3416 2961 // stacks and SATB buffers.
tonyp@3416 2962 ShouldNotReachHere();
tonyp@3416 2963 }
tonyp@3416 2964
tonyp@3416 2965 virtual void do_object(oop obj) {
tonyp@3416 2966 do_object_work(obj);
tonyp@3416 2967 }
tonyp@3416 2968 };
tonyp@3416 2969
tonyp@3416 2970 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
tonyp@3416 2971 bool verify_enqueued_buffers,
tonyp@3416 2972 bool verify_thread_buffers,
tonyp@3416 2973 bool verify_fingers) {
tonyp@3416 2974 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3416 2975 if (!G1CollectedHeap::heap()->mark_in_progress()) {
tonyp@3416 2976 return;
tonyp@3416 2977 }
tonyp@3416 2978
tonyp@3416 2979 VerifyNoCSetOopsClosure cl;
tonyp@3416 2980
tonyp@3416 2981 if (verify_stacks) {
tonyp@3416 2982 // Verify entries on the global mark stack
tonyp@3416 2983 cl.set_phase(VerifyNoCSetOopsStack);
tonyp@3416 2984 _markStack.oops_do(&cl);
tonyp@3416 2985
tonyp@3416 2986 // Verify entries on the task queues
johnc@4173 2987 for (uint i = 0; i < _max_worker_id; i += 1) {
tonyp@3416 2988 cl.set_phase(VerifyNoCSetOopsQueues, i);
johnc@4333 2989 CMTaskQueue* queue = _task_queues->queue(i);
tonyp@3416 2990 queue->oops_do(&cl);
tonyp@3416 2991 }
tonyp@3416 2992 }
tonyp@3416 2993
tonyp@3416 2994 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@3416 2995
tonyp@3416 2996 // Verify entries on the enqueued SATB buffers
tonyp@3416 2997 if (verify_enqueued_buffers) {
tonyp@3416 2998 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
tonyp@3416 2999 satb_qs.iterate_completed_buffers_read_only(&cl);
tonyp@3416 3000 }
tonyp@3416 3001
tonyp@3416 3002 // Verify entries on the per-thread SATB buffers
tonyp@3416 3003 if (verify_thread_buffers) {
tonyp@3416 3004 cl.set_phase(VerifyNoCSetOopsSATBThread);
tonyp@3416 3005 satb_qs.iterate_thread_buffers_read_only(&cl);
tonyp@3416 3006 }
tonyp@3416 3007
tonyp@3416 3008 if (verify_fingers) {
tonyp@3416 3009 // Verify the global finger
tonyp@3416 3010 HeapWord* global_finger = finger();
tonyp@3416 3011 if (global_finger != NULL && global_finger < _heap_end) {
tonyp@3416 3012 // The global finger always points to a heap region boundary. We
tonyp@3416 3013 // use heap_region_containing_raw() to get the containing region
tonyp@3416 3014 // given that the global finger could be pointing to a free region
tonyp@3416 3015 // which subsequently becomes continues humongous. If that
tonyp@3416 3016 // happens, heap_region_containing() will return the bottom of the
tonyp@3416 3017 // corresponding starts humongous region and the check below will
tonyp@3416 3018 // not hold any more.
tonyp@3416 3019 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
tonyp@3416 3020 guarantee(global_finger == global_hr->bottom(),
tonyp@3416 3021 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
drchase@6680 3022 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
tonyp@3416 3023 }
tonyp@3416 3024
tonyp@3416 3025 // Verify the task fingers
johnc@4173 3026 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
tonyp@3416 3027 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
tonyp@3416 3028 CMTask* task = _tasks[i];
tonyp@3416 3029 HeapWord* task_finger = task->finger();
tonyp@3416 3030 if (task_finger != NULL && task_finger < _heap_end) {
tonyp@3416 3031 // See above note on the global finger verification.
tonyp@3416 3032 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
tonyp@3416 3033 guarantee(task_finger == task_hr->bottom() ||
tonyp@3416 3034 !task_hr->in_collection_set(),
tonyp@3416 3035 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
drchase@6680 3036 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
tonyp@3416 3037 }
tonyp@3416 3038 }
tonyp@3416 3039 }
ysr@777 3040 }
tonyp@3416 3041 #endif // PRODUCT
ysr@777 3042
johnc@3463 3043 // Aggregate the counting data that was constructed concurrently
johnc@3463 3044 // with marking.
johnc@3463 3045 class AggregateCountDataHRClosure: public HeapRegionClosure {
johnc@4123 3046 G1CollectedHeap* _g1h;
johnc@3463 3047 ConcurrentMark* _cm;
johnc@4123 3048 CardTableModRefBS* _ct_bs;
johnc@3463 3049 BitMap* _cm_card_bm;
johnc@4173 3050 uint _max_worker_id;
johnc@3463 3051
johnc@3463 3052 public:
johnc@4123 3053 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
johnc@3463 3054 BitMap* cm_card_bm,
johnc@4173 3055 uint max_worker_id) :
johnc@4123 3056 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 3057 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
johnc@4173 3058 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
johnc@3463 3059
johnc@3463 3060 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 3061 if (hr->continuesHumongous()) {
johnc@3463 3062 // We will ignore these here and process them when their
johnc@3463 3063 // associated "starts humongous" region is processed.
johnc@3463 3064 // Note that we cannot rely on their associated
johnc@3463 3065 // "starts humongous" region to have their bit set to 1
johnc@3463 3066 // since, due to the region chunking in the parallel region
johnc@3463 3067 // iteration, a "continues humongous" region might be visited
johnc@3463 3068 // before its associated "starts humongous".
johnc@3463 3069 return false;
johnc@3463 3070 }
johnc@3463 3071
johnc@3463 3072 HeapWord* start = hr->bottom();
johnc@3463 3073 HeapWord* limit = hr->next_top_at_mark_start();
johnc@3463 3074 HeapWord* end = hr->end();
johnc@3463 3075
johnc@3463 3076 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
johnc@3463 3077 err_msg("Preconditions not met - "
johnc@3463 3078 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
johnc@3463 3079 "top: "PTR_FORMAT", end: "PTR_FORMAT,
drchase@6680 3080 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
johnc@3463 3081
johnc@3463 3082 assert(hr->next_marked_bytes() == 0, "Precondition");
johnc@3463 3083
johnc@3463 3084 if (start == limit) {
johnc@3463 3085 // NTAMS of this region has not been set so nothing to do.
johnc@3463 3086 return false;
johnc@3463 3087 }
johnc@3463 3088
johnc@4123 3089 // 'start' should be in the heap.
johnc@4123 3090 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
johnc@4123 3091 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
johnc@4123 3092 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
johnc@3463 3093
johnc@3463 3094 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
johnc@3463 3095 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
johnc@3463 3096 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
johnc@3463 3097
johnc@4123 3098 // If ntams is not card aligned then we bump card bitmap index
johnc@4123 3099 // for limit so that we get the all the cards spanned by
johnc@4123 3100 // the object ending at ntams.
johnc@4123 3101 // Note: if this is the last region in the heap then ntams
johnc@4123 3102 // could be actually just beyond the end of the the heap;
johnc@4123 3103 // limit_idx will then correspond to a (non-existent) card
johnc@4123 3104 // that is also outside the heap.
johnc@4123 3105 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
johnc@3463 3106 limit_idx += 1;
johnc@3463 3107 }
johnc@3463 3108
johnc@3463 3109 assert(limit_idx <= end_idx, "or else use atomics");
johnc@3463 3110
johnc@3463 3111 // Aggregate the "stripe" in the count data associated with hr.
tonyp@3713 3112 uint hrs_index = hr->hrs_index();
johnc@3463 3113 size_t marked_bytes = 0;
johnc@3463 3114
johnc@4173 3115 for (uint i = 0; i < _max_worker_id; i += 1) {
johnc@3463 3116 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
johnc@3463 3117 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
johnc@3463 3118
johnc@3463 3119 // Fetch the marked_bytes in this region for task i and
johnc@3463 3120 // add it to the running total for this region.
johnc@3463 3121 marked_bytes += marked_bytes_array[hrs_index];
johnc@3463 3122
johnc@4173 3123 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
johnc@3463 3124 // into the global card bitmap.
johnc@3463 3125 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
johnc@3463 3126
johnc@3463 3127 while (scan_idx < limit_idx) {
johnc@3463 3128 assert(task_card_bm->at(scan_idx) == true, "should be");
johnc@3463 3129 _cm_card_bm->set_bit(scan_idx);
johnc@3463 3130 assert(_cm_card_bm->at(scan_idx) == true, "should be");
johnc@3463 3131
johnc@3463 3132 // BitMap::get_next_one_offset() can handle the case when
johnc@3463 3133 // its left_offset parameter is greater than its right_offset
johnc@4123 3134 // parameter. It does, however, have an early exit if
johnc@3463 3135 // left_offset == right_offset. So let's limit the value
johnc@3463 3136 // passed in for left offset here.
johnc@3463 3137 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
johnc@3463 3138 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
johnc@3463 3139 }
johnc@3463 3140 }
johnc@3463 3141
johnc@3463 3142 // Update the marked bytes for this region.
johnc@3463 3143 hr->add_to_marked_bytes(marked_bytes);
johnc@3463 3144
johnc@3463 3145 // Next heap region
johnc@3463 3146 return false;
johnc@3463 3147 }
johnc@3463 3148 };
johnc@3463 3149
johnc@3463 3150 class G1AggregateCountDataTask: public AbstractGangTask {
johnc@3463 3151 protected:
johnc@3463 3152 G1CollectedHeap* _g1h;
johnc@3463 3153 ConcurrentMark* _cm;
johnc@3463 3154 BitMap* _cm_card_bm;
johnc@4173 3155 uint _max_worker_id;
johnc@3463 3156 int _active_workers;
johnc@3463 3157
johnc@3463 3158 public:
johnc@3463 3159 G1AggregateCountDataTask(G1CollectedHeap* g1h,
johnc@3463 3160 ConcurrentMark* cm,
johnc@3463 3161 BitMap* cm_card_bm,
johnc@4173 3162 uint max_worker_id,
johnc@3463 3163 int n_workers) :
johnc@3463 3164 AbstractGangTask("Count Aggregation"),
johnc@3463 3165 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
johnc@4173 3166 _max_worker_id(max_worker_id),
johnc@3463 3167 _active_workers(n_workers) { }
johnc@3463 3168
johnc@3463 3169 void work(uint worker_id) {
johnc@4173 3170 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
johnc@3463 3171
johnc@3463 3172 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 3173 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
johnc@3463 3174 _active_workers,
johnc@3463 3175 HeapRegion::AggregateCountClaimValue);
johnc@3463 3176 } else {
johnc@3463 3177 _g1h->heap_region_iterate(&cl);
johnc@3463 3178 }
johnc@3463 3179 }
johnc@3463 3180 };
johnc@3463 3181
johnc@3463 3182
johnc@3463 3183 void ConcurrentMark::aggregate_count_data() {
johnc@3463 3184 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
johnc@3463 3185 _g1h->workers()->active_workers() :
johnc@3463 3186 1);
johnc@3463 3187
johnc@3463 3188 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
johnc@4173 3189 _max_worker_id, n_workers);
johnc@3463 3190
johnc@3463 3191 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 3192 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
johnc@3463 3193 "sanity check");
johnc@3463 3194 _g1h->set_par_threads(n_workers);
johnc@3463 3195 _g1h->workers()->run_task(&g1_par_agg_task);
johnc@3463 3196 _g1h->set_par_threads(0);
johnc@3463 3197
johnc@3463 3198 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
johnc@3463 3199 "sanity check");
johnc@3463 3200 _g1h->reset_heap_region_claim_values();
johnc@3463 3201 } else {
johnc@3463 3202 g1_par_agg_task.work(0);
johnc@3463 3203 }
johnc@3463 3204 }
johnc@3463 3205
johnc@3463 3206 // Clear the per-worker arrays used to store the per-region counting data
johnc@3463 3207 void ConcurrentMark::clear_all_count_data() {
johnc@3463 3208 // Clear the global card bitmap - it will be filled during
johnc@3463 3209 // liveness count aggregation (during remark) and the
johnc@3463 3210 // final counting task.
johnc@3463 3211 _card_bm.clear();
johnc@3463 3212
johnc@3463 3213 // Clear the global region bitmap - it will be filled as part
johnc@3463 3214 // of the final counting task.
johnc@3463 3215 _region_bm.clear();
johnc@3463 3216
tonyp@3713 3217 uint max_regions = _g1h->max_regions();
johnc@4173 3218 assert(_max_worker_id > 0, "uninitialized");
johnc@4173 3219
johnc@4173 3220 for (uint i = 0; i < _max_worker_id; i += 1) {
johnc@3463 3221 BitMap* task_card_bm = count_card_bitmap_for(i);
johnc@3463 3222 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
johnc@3463 3223
johnc@3463 3224 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
johnc@3463 3225 assert(marked_bytes_array != NULL, "uninitialized");
johnc@3463 3226
tonyp@3713 3227 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
johnc@3463 3228 task_card_bm->clear();
johnc@3463 3229 }
johnc@3463 3230 }
johnc@3463 3231
ysr@777 3232 void ConcurrentMark::print_stats() {
ysr@777 3233 if (verbose_stats()) {
ysr@777 3234 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3235 for (size_t i = 0; i < _active_tasks; ++i) {
ysr@777 3236 _tasks[i]->print_stats();
ysr@777 3237 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3238 }
ysr@777 3239 }
ysr@777 3240 }
ysr@777 3241
ysr@777 3242 // abandon current marking iteration due to a Full GC
ysr@777 3243 void ConcurrentMark::abort() {
ysr@777 3244 // Clear all marks to force marking thread to do nothing
ysr@777 3245 _nextMarkBitMap->clearAll();
johnc@3463 3246 // Clear the liveness counting data
johnc@3463 3247 clear_all_count_data();
ysr@777 3248 // Empty mark stack
johnc@4386 3249 reset_marking_state();
johnc@4173 3250 for (uint i = 0; i < _max_worker_id; ++i) {
ysr@777 3251 _tasks[i]->clear_region_fields();
johnc@2190 3252 }
pliden@6692 3253 _first_overflow_barrier_sync.abort();
pliden@6692 3254 _second_overflow_barrier_sync.abort();
ysr@777 3255 _has_aborted = true;
ysr@777 3256
ysr@777 3257 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3258 satb_mq_set.abandon_partial_marking();
tonyp@1752 3259 // This can be called either during or outside marking, we'll read
tonyp@1752 3260 // the expected_active value from the SATB queue set.
tonyp@1752 3261 satb_mq_set.set_active_all_threads(
tonyp@1752 3262 false, /* new active value */
tonyp@1752 3263 satb_mq_set.is_active() /* expected_active */);
sla@5237 3264
sla@5237 3265 _g1h->trace_heap_after_concurrent_cycle();
sla@5237 3266 _g1h->register_concurrent_cycle_end();
ysr@777 3267 }
ysr@777 3268
ysr@777 3269 static void print_ms_time_info(const char* prefix, const char* name,
ysr@777 3270 NumberSeq& ns) {
ysr@777 3271 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3272 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
ysr@777 3273 if (ns.num() > 0) {
ysr@777 3274 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 3275 prefix, ns.sd(), ns.maximum());
ysr@777 3276 }
ysr@777 3277 }
ysr@777 3278
ysr@777 3279 void ConcurrentMark::print_summary_info() {
ysr@777 3280 gclog_or_tty->print_cr(" Concurrent marking:");
ysr@777 3281 print_ms_time_info(" ", "init marks", _init_times);
ysr@777 3282 print_ms_time_info(" ", "remarks", _remark_times);
ysr@777 3283 {
ysr@777 3284 print_ms_time_info(" ", "final marks", _remark_mark_times);
ysr@777 3285 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
ysr@777 3286
ysr@777 3287 }
ysr@777 3288 print_ms_time_info(" ", "cleanups", _cleanup_times);
ysr@777 3289 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3290 _total_counting_time,
ysr@777 3291 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
ysr@777 3292 (double)_cleanup_times.num()
ysr@777 3293 : 0.0));
ysr@777 3294 if (G1ScrubRemSets) {
ysr@777 3295 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3296 _total_rs_scrub_time,
ysr@777 3297 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
ysr@777 3298 (double)_cleanup_times.num()
ysr@777 3299 : 0.0));
ysr@777 3300 }
ysr@777 3301 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
ysr@777 3302 (_init_times.sum() + _remark_times.sum() +
ysr@777 3303 _cleanup_times.sum())/1000.0);
ysr@777 3304 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
johnc@3463 3305 "(%8.2f s marking).",
ysr@777 3306 cmThread()->vtime_accum(),
johnc@3463 3307 cmThread()->vtime_mark_accum());
ysr@777 3308 }
ysr@777 3309
tonyp@1454 3310 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
johnc@4549 3311 if (use_parallel_marking_threads()) {
johnc@4549 3312 _parallel_workers->print_worker_threads_on(st);
johnc@4549 3313 }
tonyp@1454 3314 }
tonyp@1454 3315
stefank@4904 3316 void ConcurrentMark::print_on_error(outputStream* st) const {
stefank@4904 3317 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
drchase@6680 3318 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
stefank@4904 3319 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
stefank@4904 3320 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
stefank@4904 3321 }
stefank@4904 3322
ysr@777 3323 // We take a break if someone is trying to stop the world.
jmasa@3357 3324 bool ConcurrentMark::do_yield_check(uint worker_id) {
ysr@777 3325 if (should_yield()) {
jmasa@3357 3326 if (worker_id == 0) {
ysr@777 3327 _g1h->g1_policy()->record_concurrent_pause();
tonyp@2973 3328 }
ysr@777 3329 cmThread()->yield();
ysr@777 3330 return true;
ysr@777 3331 } else {
ysr@777 3332 return false;
ysr@777 3333 }
ysr@777 3334 }
ysr@777 3335
ysr@777 3336 bool ConcurrentMark::should_yield() {
ysr@777 3337 return cmThread()->should_yield();
ysr@777 3338 }
ysr@777 3339
ysr@777 3340 bool ConcurrentMark::containing_card_is_marked(void* p) {
ysr@777 3341 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
ysr@777 3342 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
ysr@777 3343 }
ysr@777 3344
ysr@777 3345 bool ConcurrentMark::containing_cards_are_marked(void* start,
ysr@777 3346 void* last) {
tonyp@2973 3347 return containing_card_is_marked(start) &&
tonyp@2973 3348 containing_card_is_marked(last);
ysr@777 3349 }
ysr@777 3350
ysr@777 3351 #ifndef PRODUCT
ysr@777 3352 // for debugging purposes
ysr@777 3353 void ConcurrentMark::print_finger() {
ysr@777 3354 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
drchase@6680 3355 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
johnc@4173 3356 for (uint i = 0; i < _max_worker_id; ++i) {
drchase@6680 3357 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
ysr@777 3358 }
drchase@6680 3359 gclog_or_tty->cr();
ysr@777 3360 }
ysr@777 3361 #endif
ysr@777 3362
tonyp@2968 3363 void CMTask::scan_object(oop obj) {
tonyp@2968 3364 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
tonyp@2968 3365
tonyp@2968 3366 if (_cm->verbose_high()) {
johnc@4173 3367 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
drchase@6680 3368 _worker_id, p2i((void*) obj));
tonyp@2968 3369 }
tonyp@2968 3370
tonyp@2968 3371 size_t obj_size = obj->size();
tonyp@2968 3372 _words_scanned += obj_size;
tonyp@2968 3373
tonyp@2968 3374 obj->oop_iterate(_cm_oop_closure);
tonyp@2968 3375 statsOnly( ++_objs_scanned );
tonyp@2968 3376 check_limits();
tonyp@2968 3377 }
tonyp@2968 3378
ysr@777 3379 // Closure for iteration over bitmaps
ysr@777 3380 class CMBitMapClosure : public BitMapClosure {
ysr@777 3381 private:
ysr@777 3382 // the bitmap that is being iterated over
ysr@777 3383 CMBitMap* _nextMarkBitMap;
ysr@777 3384 ConcurrentMark* _cm;
ysr@777 3385 CMTask* _task;
ysr@777 3386
ysr@777 3387 public:
tonyp@3691 3388 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
tonyp@3691 3389 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
ysr@777 3390
ysr@777 3391 bool do_bit(size_t offset) {
ysr@777 3392 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
tonyp@1458 3393 assert(_nextMarkBitMap->isMarked(addr), "invariant");
tonyp@1458 3394 assert( addr < _cm->finger(), "invariant");
ysr@777 3395
tonyp@3691 3396 statsOnly( _task->increase_objs_found_on_bitmap() );
tonyp@3691 3397 assert(addr >= _task->finger(), "invariant");
tonyp@3691 3398
tonyp@3691 3399 // We move that task's local finger along.
tonyp@3691 3400 _task->move_finger_to(addr);
ysr@777 3401
ysr@777 3402 _task->scan_object(oop(addr));
ysr@777 3403 // we only partially drain the local queue and global stack
ysr@777 3404 _task->drain_local_queue(true);
ysr@777 3405 _task->drain_global_stack(true);
ysr@777 3406
ysr@777 3407 // if the has_aborted flag has been raised, we need to bail out of
ysr@777 3408 // the iteration
ysr@777 3409 return !_task->has_aborted();
ysr@777 3410 }
ysr@777 3411 };
ysr@777 3412
ysr@777 3413 // Closure for iterating over objects, currently only used for
ysr@777 3414 // processing SATB buffers.
ysr@777 3415 class CMObjectClosure : public ObjectClosure {
ysr@777 3416 private:
ysr@777 3417 CMTask* _task;
ysr@777 3418
ysr@777 3419 public:
ysr@777 3420 void do_object(oop obj) {
ysr@777 3421 _task->deal_with_reference(obj);
ysr@777 3422 }
ysr@777 3423
ysr@777 3424 CMObjectClosure(CMTask* task) : _task(task) { }
ysr@777 3425 };
ysr@777 3426
tonyp@2968 3427 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
tonyp@2968 3428 ConcurrentMark* cm,
tonyp@2968 3429 CMTask* task)
tonyp@2968 3430 : _g1h(g1h), _cm(cm), _task(task) {
tonyp@2968 3431 assert(_ref_processor == NULL, "should be initialized to NULL");
tonyp@2968 3432
tonyp@2968 3433 if (G1UseConcMarkReferenceProcessing) {
johnc@3175 3434 _ref_processor = g1h->ref_processor_cm();
tonyp@2968 3435 assert(_ref_processor != NULL, "should not be NULL");
ysr@777 3436 }
tonyp@2968 3437 }
ysr@777 3438
ysr@777 3439 void CMTask::setup_for_region(HeapRegion* hr) {
tonyp@1458 3440 // Separated the asserts so that we know which one fires.
tonyp@1458 3441 assert(hr != NULL,
tonyp@1458 3442 "claim_region() should have filtered out continues humongous regions");
tonyp@1458 3443 assert(!hr->continuesHumongous(),
tonyp@1458 3444 "claim_region() should have filtered out continues humongous regions");
ysr@777 3445
tonyp@2973 3446 if (_cm->verbose_low()) {
johnc@4173 3447 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
drchase@6680 3448 _worker_id, p2i(hr));
tonyp@2973 3449 }
ysr@777 3450
ysr@777 3451 _curr_region = hr;
ysr@777 3452 _finger = hr->bottom();
ysr@777 3453 update_region_limit();
ysr@777 3454 }
ysr@777 3455
ysr@777 3456 void CMTask::update_region_limit() {
ysr@777 3457 HeapRegion* hr = _curr_region;
ysr@777 3458 HeapWord* bottom = hr->bottom();
ysr@777 3459 HeapWord* limit = hr->next_top_at_mark_start();
ysr@777 3460
ysr@777 3461 if (limit == bottom) {
tonyp@2973 3462 if (_cm->verbose_low()) {
johnc@4173 3463 gclog_or_tty->print_cr("[%u] found an empty region "
ysr@777 3464 "["PTR_FORMAT", "PTR_FORMAT")",
drchase@6680 3465 _worker_id, p2i(bottom), p2i(limit));
tonyp@2973 3466 }
ysr@777 3467 // The region was collected underneath our feet.
ysr@777 3468 // We set the finger to bottom to ensure that the bitmap
ysr@777 3469 // iteration that will follow this will not do anything.
ysr@777 3470 // (this is not a condition that holds when we set the region up,
ysr@777 3471 // as the region is not supposed to be empty in the first place)
ysr@777 3472 _finger = bottom;
ysr@777 3473 } else if (limit >= _region_limit) {
tonyp@1458 3474 assert(limit >= _finger, "peace of mind");
ysr@777 3475 } else {
tonyp@1458 3476 assert(limit < _region_limit, "only way to get here");
ysr@777 3477 // This can happen under some pretty unusual circumstances. An
ysr@777 3478 // evacuation pause empties the region underneath our feet (NTAMS
ysr@777 3479 // at bottom). We then do some allocation in the region (NTAMS
ysr@777 3480 // stays at bottom), followed by the region being used as a GC
ysr@777 3481 // alloc region (NTAMS will move to top() and the objects
ysr@777 3482 // originally below it will be grayed). All objects now marked in
ysr@777 3483 // the region are explicitly grayed, if below the global finger,
ysr@777 3484 // and we do not need in fact to scan anything else. So, we simply
ysr@777 3485 // set _finger to be limit to ensure that the bitmap iteration
ysr@777 3486 // doesn't do anything.
ysr@777 3487 _finger = limit;
ysr@777 3488 }
ysr@777 3489
ysr@777 3490 _region_limit = limit;
ysr@777 3491 }
ysr@777 3492
ysr@777 3493 void CMTask::giveup_current_region() {
tonyp@1458 3494 assert(_curr_region != NULL, "invariant");
tonyp@2973 3495 if (_cm->verbose_low()) {
johnc@4173 3496 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
drchase@6680 3497 _worker_id, p2i(_curr_region));
tonyp@2973 3498 }
ysr@777 3499 clear_region_fields();
ysr@777 3500 }
ysr@777 3501
ysr@777 3502 void CMTask::clear_region_fields() {
ysr@777 3503 // Values for these three fields that indicate that we're not
ysr@777 3504 // holding on to a region.
ysr@777 3505 _curr_region = NULL;
ysr@777 3506 _finger = NULL;
ysr@777 3507 _region_limit = NULL;
ysr@777 3508 }
ysr@777 3509
tonyp@2968 3510 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
tonyp@2968 3511 if (cm_oop_closure == NULL) {
tonyp@2968 3512 assert(_cm_oop_closure != NULL, "invariant");
tonyp@2968 3513 } else {
tonyp@2968 3514 assert(_cm_oop_closure == NULL, "invariant");
tonyp@2968 3515 }
tonyp@2968 3516 _cm_oop_closure = cm_oop_closure;
tonyp@2968 3517 }
tonyp@2968 3518
ysr@777 3519 void CMTask::reset(CMBitMap* nextMarkBitMap) {
tonyp@1458 3520 guarantee(nextMarkBitMap != NULL, "invariant");
ysr@777 3521
tonyp@2973 3522 if (_cm->verbose_low()) {
johnc@4173 3523 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
tonyp@2973 3524 }
ysr@777 3525
ysr@777 3526 _nextMarkBitMap = nextMarkBitMap;
ysr@777 3527 clear_region_fields();
ysr@777 3528
ysr@777 3529 _calls = 0;
ysr@777 3530 _elapsed_time_ms = 0.0;
ysr@777 3531 _termination_time_ms = 0.0;
ysr@777 3532 _termination_start_time_ms = 0.0;
ysr@777 3533
ysr@777 3534 #if _MARKING_STATS_
ysr@777 3535 _local_pushes = 0;
ysr@777 3536 _local_pops = 0;
ysr@777 3537 _local_max_size = 0;
ysr@777 3538 _objs_scanned = 0;
ysr@777 3539 _global_pushes = 0;
ysr@777 3540 _global_pops = 0;
ysr@777 3541 _global_max_size = 0;
ysr@777 3542 _global_transfers_to = 0;
ysr@777 3543 _global_transfers_from = 0;
ysr@777 3544 _regions_claimed = 0;
ysr@777 3545 _objs_found_on_bitmap = 0;
ysr@777 3546 _satb_buffers_processed = 0;
ysr@777 3547 _steal_attempts = 0;
ysr@777 3548 _steals = 0;
ysr@777 3549 _aborted = 0;
ysr@777 3550 _aborted_overflow = 0;
ysr@777 3551 _aborted_cm_aborted = 0;
ysr@777 3552 _aborted_yield = 0;
ysr@777 3553 _aborted_timed_out = 0;
ysr@777 3554 _aborted_satb = 0;
ysr@777 3555 _aborted_termination = 0;
ysr@777 3556 #endif // _MARKING_STATS_
ysr@777 3557 }
ysr@777 3558
ysr@777 3559 bool CMTask::should_exit_termination() {
ysr@777 3560 regular_clock_call();
ysr@777 3561 // This is called when we are in the termination protocol. We should
ysr@777 3562 // quit if, for some reason, this task wants to abort or the global
ysr@777 3563 // stack is not empty (this means that we can get work from it).
ysr@777 3564 return !_cm->mark_stack_empty() || has_aborted();
ysr@777 3565 }
ysr@777 3566
ysr@777 3567 void CMTask::reached_limit() {
tonyp@1458 3568 assert(_words_scanned >= _words_scanned_limit ||
tonyp@1458 3569 _refs_reached >= _refs_reached_limit ,
tonyp@1458 3570 "shouldn't have been called otherwise");
ysr@777 3571 regular_clock_call();
ysr@777 3572 }
ysr@777 3573
ysr@777 3574 void CMTask::regular_clock_call() {
tonyp@2973 3575 if (has_aborted()) return;
ysr@777 3576
ysr@777 3577 // First, we need to recalculate the words scanned and refs reached
ysr@777 3578 // limits for the next clock call.
ysr@777 3579 recalculate_limits();
ysr@777 3580
ysr@777 3581 // During the regular clock call we do the following
ysr@777 3582
ysr@777 3583 // (1) If an overflow has been flagged, then we abort.
ysr@777 3584 if (_cm->has_overflown()) {
ysr@777 3585 set_has_aborted();
ysr@777 3586 return;
ysr@777 3587 }
ysr@777 3588
ysr@777 3589 // If we are not concurrent (i.e. we're doing remark) we don't need
ysr@777 3590 // to check anything else. The other steps are only needed during
ysr@777 3591 // the concurrent marking phase.
tonyp@2973 3592 if (!concurrent()) return;
ysr@777 3593
ysr@777 3594 // (2) If marking has been aborted for Full GC, then we also abort.
ysr@777 3595 if (_cm->has_aborted()) {
ysr@777 3596 set_has_aborted();
ysr@777 3597 statsOnly( ++_aborted_cm_aborted );
ysr@777 3598 return;
ysr@777 3599 }
ysr@777 3600
ysr@777 3601 double curr_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 3602
ysr@777 3603 // (3) If marking stats are enabled, then we update the step history.
ysr@777 3604 #if _MARKING_STATS_
tonyp@2973 3605 if (_words_scanned >= _words_scanned_limit) {
ysr@777 3606 ++_clock_due_to_scanning;
tonyp@2973 3607 }
tonyp@2973 3608 if (_refs_reached >= _refs_reached_limit) {
ysr@777 3609 ++_clock_due_to_marking;
tonyp@2973 3610 }
ysr@777 3611
ysr@777 3612 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
ysr@777 3613 _interval_start_time_ms = curr_time_ms;
ysr@777 3614 _all_clock_intervals_ms.add(last_interval_ms);
ysr@777 3615
ysr@777 3616 if (_cm->verbose_medium()) {
johnc@4173 3617 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
tonyp@2973 3618 "scanned = %d%s, refs reached = %d%s",
johnc@4173 3619 _worker_id, last_interval_ms,
tonyp@2973 3620 _words_scanned,
tonyp@2973 3621 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
tonyp@2973 3622 _refs_reached,
tonyp@2973 3623 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
ysr@777 3624 }
ysr@777 3625 #endif // _MARKING_STATS_
ysr@777 3626
ysr@777 3627 // (4) We check whether we should yield. If we have to, then we abort.
ysr@777 3628 if (_cm->should_yield()) {
ysr@777 3629 // We should yield. To do this we abort the task. The caller is
ysr@777 3630 // responsible for yielding.
ysr@777 3631 set_has_aborted();
ysr@777 3632 statsOnly( ++_aborted_yield );
ysr@777 3633 return;
ysr@777 3634 }
ysr@777 3635
ysr@777 3636 // (5) We check whether we've reached our time quota. If we have,
ysr@777 3637 // then we abort.
ysr@777 3638 double elapsed_time_ms = curr_time_ms - _start_time_ms;
ysr@777 3639 if (elapsed_time_ms > _time_target_ms) {
ysr@777 3640 set_has_aborted();
johnc@2494 3641 _has_timed_out = true;
ysr@777 3642 statsOnly( ++_aborted_timed_out );
ysr@777 3643 return;
ysr@777 3644 }
ysr@777 3645
ysr@777 3646 // (6) Finally, we check whether there are enough completed STAB
ysr@777 3647 // buffers available for processing. If there are, we abort.
ysr@777 3648 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3649 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
tonyp@2973 3650 if (_cm->verbose_low()) {
johnc@4173 3651 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
johnc@4173 3652 _worker_id);
tonyp@2973 3653 }
ysr@777 3654 // we do need to process SATB buffers, we'll abort and restart
ysr@777 3655 // the marking task to do so
ysr@777 3656 set_has_aborted();
ysr@777 3657 statsOnly( ++_aborted_satb );
ysr@777 3658 return;
ysr@777 3659 }
ysr@777 3660 }
ysr@777 3661
ysr@777 3662 void CMTask::recalculate_limits() {
ysr@777 3663 _real_words_scanned_limit = _words_scanned + words_scanned_period;
ysr@777 3664 _words_scanned_limit = _real_words_scanned_limit;
ysr@777 3665
ysr@777 3666 _real_refs_reached_limit = _refs_reached + refs_reached_period;
ysr@777 3667 _refs_reached_limit = _real_refs_reached_limit;
ysr@777 3668 }
ysr@777 3669
ysr@777 3670 void CMTask::decrease_limits() {
ysr@777 3671 // This is called when we believe that we're going to do an infrequent
ysr@777 3672 // operation which will increase the per byte scanned cost (i.e. move
ysr@777 3673 // entries to/from the global stack). It basically tries to decrease the
ysr@777 3674 // scanning limit so that the clock is called earlier.
ysr@777 3675
tonyp@2973 3676 if (_cm->verbose_medium()) {
johnc@4173 3677 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
tonyp@2973 3678 }
ysr@777 3679
ysr@777 3680 _words_scanned_limit = _real_words_scanned_limit -
ysr@777 3681 3 * words_scanned_period / 4;
ysr@777 3682 _refs_reached_limit = _real_refs_reached_limit -
ysr@777 3683 3 * refs_reached_period / 4;
ysr@777 3684 }
ysr@777 3685
ysr@777 3686 void CMTask::move_entries_to_global_stack() {
ysr@777 3687 // local array where we'll store the entries that will be popped
ysr@777 3688 // from the local queue
ysr@777 3689 oop buffer[global_stack_transfer_size];
ysr@777 3690
ysr@777 3691 int n = 0;
ysr@777 3692 oop obj;
ysr@777 3693 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
ysr@777 3694 buffer[n] = obj;
ysr@777 3695 ++n;
ysr@777 3696 }
ysr@777 3697
ysr@777 3698 if (n > 0) {
ysr@777 3699 // we popped at least one entry from the local queue
ysr@777 3700
ysr@777 3701 statsOnly( ++_global_transfers_to; _local_pops += n );
ysr@777 3702
ysr@777 3703 if (!_cm->mark_stack_push(buffer, n)) {
tonyp@2973 3704 if (_cm->verbose_low()) {
johnc@4173 3705 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
johnc@4173 3706 _worker_id);
tonyp@2973 3707 }
ysr@777 3708 set_has_aborted();
ysr@777 3709 } else {
ysr@777 3710 // the transfer was successful
ysr@777 3711
tonyp@2973 3712 if (_cm->verbose_medium()) {
johnc@4173 3713 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
johnc@4173 3714 _worker_id, n);
tonyp@2973 3715 }
ysr@777 3716 statsOnly( int tmp_size = _cm->mark_stack_size();
tonyp@2973 3717 if (tmp_size > _global_max_size) {
ysr@777 3718 _global_max_size = tmp_size;
tonyp@2973 3719 }
ysr@777 3720 _global_pushes += n );
ysr@777 3721 }
ysr@777 3722 }
ysr@777 3723
ysr@777 3724 // this operation was quite expensive, so decrease the limits
ysr@777 3725 decrease_limits();
ysr@777 3726 }
ysr@777 3727
ysr@777 3728 void CMTask::get_entries_from_global_stack() {
ysr@777 3729 // local array where we'll store the entries that will be popped
ysr@777 3730 // from the global stack.
ysr@777 3731 oop buffer[global_stack_transfer_size];
ysr@777 3732 int n;
ysr@777 3733 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
tonyp@1458 3734 assert(n <= global_stack_transfer_size,
tonyp@1458 3735 "we should not pop more than the given limit");
ysr@777 3736 if (n > 0) {
ysr@777 3737 // yes, we did actually pop at least one entry
ysr@777 3738
ysr@777 3739 statsOnly( ++_global_transfers_from; _global_pops += n );
tonyp@2973 3740 if (_cm->verbose_medium()) {
johnc@4173 3741 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
johnc@4173 3742 _worker_id, n);
tonyp@2973 3743 }
ysr@777 3744 for (int i = 0; i < n; ++i) {
ysr@777 3745 bool success = _task_queue->push(buffer[i]);
ysr@777 3746 // We only call this when the local queue is empty or under a
ysr@777 3747 // given target limit. So, we do not expect this push to fail.
tonyp@1458 3748 assert(success, "invariant");
ysr@777 3749 }
ysr@777 3750
ysr@777 3751 statsOnly( int tmp_size = _task_queue->size();
tonyp@2973 3752 if (tmp_size > _local_max_size) {
ysr@777 3753 _local_max_size = tmp_size;
tonyp@2973 3754 }
ysr@777 3755 _local_pushes += n );
ysr@777 3756 }
ysr@777 3757
ysr@777 3758 // this operation was quite expensive, so decrease the limits
ysr@777 3759 decrease_limits();
ysr@777 3760 }
ysr@777 3761
ysr@777 3762 void CMTask::drain_local_queue(bool partially) {
tonyp@2973 3763 if (has_aborted()) return;
ysr@777 3764
ysr@777 3765 // Decide what the target size is, depending whether we're going to
ysr@777 3766 // drain it partially (so that other tasks can steal if they run out
ysr@777 3767 // of things to do) or totally (at the very end).
ysr@777 3768 size_t target_size;
tonyp@2973 3769 if (partially) {
ysr@777 3770 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
tonyp@2973 3771 } else {
ysr@777 3772 target_size = 0;
tonyp@2973 3773 }
ysr@777 3774
ysr@777 3775 if (_task_queue->size() > target_size) {
tonyp@2973 3776 if (_cm->verbose_high()) {
drchase@6680 3777 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
johnc@4173 3778 _worker_id, target_size);
tonyp@2973 3779 }
ysr@777 3780
ysr@777 3781 oop obj;
ysr@777 3782 bool ret = _task_queue->pop_local(obj);
ysr@777 3783 while (ret) {
ysr@777 3784 statsOnly( ++_local_pops );
ysr@777 3785
tonyp@2973 3786 if (_cm->verbose_high()) {
johnc@4173 3787 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
drchase@6680 3788 p2i((void*) obj));
tonyp@2973 3789 }
ysr@777 3790
tonyp@1458 3791 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
tonyp@2643 3792 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3793 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
ysr@777 3794
ysr@777 3795 scan_object(obj);
ysr@777 3796
tonyp@2973 3797 if (_task_queue->size() <= target_size || has_aborted()) {
ysr@777 3798 ret = false;
tonyp@2973 3799 } else {
ysr@777 3800 ret = _task_queue->pop_local(obj);
tonyp@2973 3801 }
ysr@777 3802 }
ysr@777 3803
tonyp@2973 3804 if (_cm->verbose_high()) {
johnc@4173 3805 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
johnc@4173 3806 _worker_id, _task_queue->size());
tonyp@2973 3807 }
ysr@777 3808 }
ysr@777 3809 }
ysr@777 3810
ysr@777 3811 void CMTask::drain_global_stack(bool partially) {
tonyp@2973 3812 if (has_aborted()) return;
ysr@777 3813
ysr@777 3814 // We have a policy to drain the local queue before we attempt to
ysr@777 3815 // drain the global stack.
tonyp@1458 3816 assert(partially || _task_queue->size() == 0, "invariant");
ysr@777 3817
ysr@777 3818 // Decide what the target size is, depending whether we're going to
ysr@777 3819 // drain it partially (so that other tasks can steal if they run out
ysr@777 3820 // of things to do) or totally (at the very end). Notice that,
ysr@777 3821 // because we move entries from the global stack in chunks or
ysr@777 3822 // because another task might be doing the same, we might in fact
ysr@777 3823 // drop below the target. But, this is not a problem.
ysr@777 3824 size_t target_size;
tonyp@2973 3825 if (partially) {
ysr@777 3826 target_size = _cm->partial_mark_stack_size_target();
tonyp@2973 3827 } else {
ysr@777 3828 target_size = 0;
tonyp@2973 3829 }
ysr@777 3830
ysr@777 3831 if (_cm->mark_stack_size() > target_size) {
tonyp@2973 3832 if (_cm->verbose_low()) {
drchase@6680 3833 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
johnc@4173 3834 _worker_id, target_size);
tonyp@2973 3835 }
ysr@777 3836
ysr@777 3837 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
ysr@777 3838 get_entries_from_global_stack();
ysr@777 3839 drain_local_queue(partially);
ysr@777 3840 }
ysr@777 3841
tonyp@2973 3842 if (_cm->verbose_low()) {
drchase@6680 3843 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
johnc@4173 3844 _worker_id, _cm->mark_stack_size());
tonyp@2973 3845 }
ysr@777 3846 }
ysr@777 3847 }
ysr@777 3848
ysr@777 3849 // SATB Queue has several assumptions on whether to call the par or
ysr@777 3850 // non-par versions of the methods. this is why some of the code is
ysr@777 3851 // replicated. We should really get rid of the single-threaded version
ysr@777 3852 // of the code to simplify things.
ysr@777 3853 void CMTask::drain_satb_buffers() {
tonyp@2973 3854 if (has_aborted()) return;
ysr@777 3855
ysr@777 3856 // We set this so that the regular clock knows that we're in the
ysr@777 3857 // middle of draining buffers and doesn't set the abort flag when it
ysr@777 3858 // notices that SATB buffers are available for draining. It'd be
ysr@777 3859 // very counter productive if it did that. :-)
ysr@777 3860 _draining_satb_buffers = true;
ysr@777 3861
ysr@777 3862 CMObjectClosure oc(this);
ysr@777 3863 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@2973 3864 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@4173 3865 satb_mq_set.set_par_closure(_worker_id, &oc);
tonyp@2973 3866 } else {
ysr@777 3867 satb_mq_set.set_closure(&oc);
tonyp@2973 3868 }
ysr@777 3869
ysr@777 3870 // This keeps claiming and applying the closure to completed buffers
ysr@777 3871 // until we run out of buffers or we need to abort.
jmasa@2188 3872 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3873 while (!has_aborted() &&
johnc@4173 3874 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
tonyp@2973 3875 if (_cm->verbose_medium()) {
johnc@4173 3876 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
tonyp@2973 3877 }
ysr@777 3878 statsOnly( ++_satb_buffers_processed );
ysr@777 3879 regular_clock_call();
ysr@777 3880 }
ysr@777 3881 } else {
ysr@777 3882 while (!has_aborted() &&
ysr@777 3883 satb_mq_set.apply_closure_to_completed_buffer()) {
tonyp@2973 3884 if (_cm->verbose_medium()) {
johnc@4173 3885 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
tonyp@2973 3886 }
ysr@777 3887 statsOnly( ++_satb_buffers_processed );
ysr@777 3888 regular_clock_call();
ysr@777 3889 }
ysr@777 3890 }
ysr@777 3891
ysr@777 3892 if (!concurrent() && !has_aborted()) {
ysr@777 3893 // We should only do this during remark.
tonyp@2973 3894 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@4173 3895 satb_mq_set.par_iterate_closure_all_threads(_worker_id);
tonyp@2973 3896 } else {
ysr@777 3897 satb_mq_set.iterate_closure_all_threads();
tonyp@2973 3898 }
ysr@777 3899 }
ysr@777 3900
ysr@777 3901 _draining_satb_buffers = false;
ysr@777 3902
tonyp@1458 3903 assert(has_aborted() ||
tonyp@1458 3904 concurrent() ||
tonyp@1458 3905 satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 3906
tonyp@2973 3907 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@4173 3908 satb_mq_set.set_par_closure(_worker_id, NULL);
tonyp@2973 3909 } else {
ysr@777 3910 satb_mq_set.set_closure(NULL);
tonyp@2973 3911 }
ysr@777 3912
ysr@777 3913 // again, this was a potentially expensive operation, decrease the
ysr@777 3914 // limits to get the regular clock call early
ysr@777 3915 decrease_limits();
ysr@777 3916 }
ysr@777 3917
ysr@777 3918 void CMTask::print_stats() {
johnc@4173 3919 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
johnc@4173 3920 _worker_id, _calls);
ysr@777 3921 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
ysr@777 3922 _elapsed_time_ms, _termination_time_ms);
ysr@777 3923 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 3924 _step_times_ms.num(), _step_times_ms.avg(),
ysr@777 3925 _step_times_ms.sd());
ysr@777 3926 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 3927 _step_times_ms.maximum(), _step_times_ms.sum());
ysr@777 3928
ysr@777 3929 #if _MARKING_STATS_
ysr@777 3930 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 3931 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
ysr@777 3932 _all_clock_intervals_ms.sd());
ysr@777 3933 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 3934 _all_clock_intervals_ms.maximum(),
ysr@777 3935 _all_clock_intervals_ms.sum());
ysr@777 3936 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
ysr@777 3937 _clock_due_to_scanning, _clock_due_to_marking);
ysr@777 3938 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
ysr@777 3939 _objs_scanned, _objs_found_on_bitmap);
ysr@777 3940 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
ysr@777 3941 _local_pushes, _local_pops, _local_max_size);
ysr@777 3942 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
ysr@777 3943 _global_pushes, _global_pops, _global_max_size);
ysr@777 3944 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
ysr@777 3945 _global_transfers_to,_global_transfers_from);
tonyp@3691 3946 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
ysr@777 3947 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
ysr@777 3948 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
ysr@777 3949 _steal_attempts, _steals);
ysr@777 3950 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
ysr@777 3951 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
ysr@777 3952 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
ysr@777 3953 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
ysr@777 3954 _aborted_timed_out, _aborted_satb, _aborted_termination);
ysr@777 3955 #endif // _MARKING_STATS_
ysr@777 3956 }
ysr@777 3957
ysr@777 3958 /*****************************************************************************
ysr@777 3959
johnc@4787 3960 The do_marking_step(time_target_ms, ...) method is the building
johnc@4787 3961 block of the parallel marking framework. It can be called in parallel
ysr@777 3962 with other invocations of do_marking_step() on different tasks
ysr@777 3963 (but only one per task, obviously) and concurrently with the
ysr@777 3964 mutator threads, or during remark, hence it eliminates the need
ysr@777 3965 for two versions of the code. When called during remark, it will
ysr@777 3966 pick up from where the task left off during the concurrent marking
ysr@777 3967 phase. Interestingly, tasks are also claimable during evacuation
ysr@777 3968 pauses too, since do_marking_step() ensures that it aborts before
ysr@777 3969 it needs to yield.
ysr@777 3970
johnc@4787 3971 The data structures that it uses to do marking work are the
ysr@777 3972 following:
ysr@777 3973
ysr@777 3974 (1) Marking Bitmap. If there are gray objects that appear only
ysr@777 3975 on the bitmap (this happens either when dealing with an overflow
ysr@777 3976 or when the initial marking phase has simply marked the roots
ysr@777 3977 and didn't push them on the stack), then tasks claim heap
ysr@777 3978 regions whose bitmap they then scan to find gray objects. A
ysr@777 3979 global finger indicates where the end of the last claimed region
ysr@777 3980 is. A local finger indicates how far into the region a task has
ysr@777 3981 scanned. The two fingers are used to determine how to gray an
ysr@777 3982 object (i.e. whether simply marking it is OK, as it will be
ysr@777 3983 visited by a task in the future, or whether it needs to be also
ysr@777 3984 pushed on a stack).
ysr@777 3985
ysr@777 3986 (2) Local Queue. The local queue of the task which is accessed
ysr@777 3987 reasonably efficiently by the task. Other tasks can steal from
ysr@777 3988 it when they run out of work. Throughout the marking phase, a
ysr@777 3989 task attempts to keep its local queue short but not totally
ysr@777 3990 empty, so that entries are available for stealing by other
ysr@777 3991 tasks. Only when there is no more work, a task will totally
ysr@777 3992 drain its local queue.
ysr@777 3993
ysr@777 3994 (3) Global Mark Stack. This handles local queue overflow. During
ysr@777 3995 marking only sets of entries are moved between it and the local
ysr@777 3996 queues, as access to it requires a mutex and more fine-grain
ysr@777 3997 interaction with it which might cause contention. If it
ysr@777 3998 overflows, then the marking phase should restart and iterate
ysr@777 3999 over the bitmap to identify gray objects. Throughout the marking
ysr@777 4000 phase, tasks attempt to keep the global mark stack at a small
ysr@777 4001 length but not totally empty, so that entries are available for
ysr@777 4002 popping by other tasks. Only when there is no more work, tasks
ysr@777 4003 will totally drain the global mark stack.
ysr@777 4004
tonyp@3691 4005 (4) SATB Buffer Queue. This is where completed SATB buffers are
ysr@777 4006 made available. Buffers are regularly removed from this queue
ysr@777 4007 and scanned for roots, so that the queue doesn't get too
ysr@777 4008 long. During remark, all completed buffers are processed, as
ysr@777 4009 well as the filled in parts of any uncompleted buffers.
ysr@777 4010
ysr@777 4011 The do_marking_step() method tries to abort when the time target
ysr@777 4012 has been reached. There are a few other cases when the
ysr@777 4013 do_marking_step() method also aborts:
ysr@777 4014
ysr@777 4015 (1) When the marking phase has been aborted (after a Full GC).
ysr@777 4016
tonyp@3691 4017 (2) When a global overflow (on the global stack) has been
tonyp@3691 4018 triggered. Before the task aborts, it will actually sync up with
tonyp@3691 4019 the other tasks to ensure that all the marking data structures
johnc@4788 4020 (local queues, stacks, fingers etc.) are re-initialized so that
tonyp@3691 4021 when do_marking_step() completes, the marking phase can
tonyp@3691 4022 immediately restart.
ysr@777 4023
ysr@777 4024 (3) When enough completed SATB buffers are available. The
ysr@777 4025 do_marking_step() method only tries to drain SATB buffers right
ysr@777 4026 at the beginning. So, if enough buffers are available, the
ysr@777 4027 marking step aborts and the SATB buffers are processed at
ysr@777 4028 the beginning of the next invocation.
ysr@777 4029
ysr@777 4030 (4) To yield. when we have to yield then we abort and yield
ysr@777 4031 right at the end of do_marking_step(). This saves us from a lot
ysr@777 4032 of hassle as, by yielding we might allow a Full GC. If this
ysr@777 4033 happens then objects will be compacted underneath our feet, the
ysr@777 4034 heap might shrink, etc. We save checking for this by just
ysr@777 4035 aborting and doing the yield right at the end.
ysr@777 4036
ysr@777 4037 From the above it follows that the do_marking_step() method should
ysr@777 4038 be called in a loop (or, otherwise, regularly) until it completes.
ysr@777 4039
ysr@777 4040 If a marking step completes without its has_aborted() flag being
ysr@777 4041 true, it means it has completed the current marking phase (and
ysr@777 4042 also all other marking tasks have done so and have all synced up).
ysr@777 4043
ysr@777 4044 A method called regular_clock_call() is invoked "regularly" (in
ysr@777 4045 sub ms intervals) throughout marking. It is this clock method that
ysr@777 4046 checks all the abort conditions which were mentioned above and
ysr@777 4047 decides when the task should abort. A work-based scheme is used to
ysr@777 4048 trigger this clock method: when the number of object words the
ysr@777 4049 marking phase has scanned or the number of references the marking
ysr@777 4050 phase has visited reach a given limit. Additional invocations to
ysr@777 4051 the method clock have been planted in a few other strategic places
ysr@777 4052 too. The initial reason for the clock method was to avoid calling
ysr@777 4053 vtime too regularly, as it is quite expensive. So, once it was in
ysr@777 4054 place, it was natural to piggy-back all the other conditions on it
ysr@777 4055 too and not constantly check them throughout the code.
ysr@777 4056
johnc@4787 4057 If do_termination is true then do_marking_step will enter its
johnc@4787 4058 termination protocol.
johnc@4787 4059
johnc@4787 4060 The value of is_serial must be true when do_marking_step is being
johnc@4787 4061 called serially (i.e. by the VMThread) and do_marking_step should
johnc@4787 4062 skip any synchronization in the termination and overflow code.
johnc@4787 4063 Examples include the serial remark code and the serial reference
johnc@4787 4064 processing closures.
johnc@4787 4065
johnc@4787 4066 The value of is_serial must be false when do_marking_step is
johnc@4787 4067 being called by any of the worker threads in a work gang.
johnc@4787 4068 Examples include the concurrent marking code (CMMarkingTask),
johnc@4787 4069 the MT remark code, and the MT reference processing closures.
johnc@4787 4070
ysr@777 4071 *****************************************************************************/
ysr@777 4072
johnc@2494 4073 void CMTask::do_marking_step(double time_target_ms,
johnc@4787 4074 bool do_termination,
johnc@4787 4075 bool is_serial) {
tonyp@1458 4076 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
tonyp@1458 4077 assert(concurrent() == _cm->concurrent(), "they should be the same");
tonyp@1458 4078
ysr@777 4079 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
tonyp@1458 4080 assert(_task_queues != NULL, "invariant");
tonyp@1458 4081 assert(_task_queue != NULL, "invariant");
johnc@4173 4082 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
tonyp@1458 4083
tonyp@1458 4084 assert(!_claimed,
tonyp@1458 4085 "only one thread should claim this task at any one time");
ysr@777 4086
ysr@777 4087 // OK, this doesn't safeguard again all possible scenarios, as it is
ysr@777 4088 // possible for two threads to set the _claimed flag at the same
ysr@777 4089 // time. But it is only for debugging purposes anyway and it will
ysr@777 4090 // catch most problems.
ysr@777 4091 _claimed = true;
ysr@777 4092
ysr@777 4093 _start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4094 statsOnly( _interval_start_time_ms = _start_time_ms );
ysr@777 4095
johnc@4787 4096 // If do_stealing is true then do_marking_step will attempt to
johnc@4787 4097 // steal work from the other CMTasks. It only makes sense to
johnc@4787 4098 // enable stealing when the termination protocol is enabled
johnc@4787 4099 // and do_marking_step() is not being called serially.
johnc@4787 4100 bool do_stealing = do_termination && !is_serial;
johnc@4787 4101
ysr@777 4102 double diff_prediction_ms =
ysr@777 4103 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
ysr@777 4104 _time_target_ms = time_target_ms - diff_prediction_ms;
ysr@777 4105
ysr@777 4106 // set up the variables that are used in the work-based scheme to
ysr@777 4107 // call the regular clock method
ysr@777 4108 _words_scanned = 0;
ysr@777 4109 _refs_reached = 0;
ysr@777 4110 recalculate_limits();
ysr@777 4111
ysr@777 4112 // clear all flags
ysr@777 4113 clear_has_aborted();
johnc@2494 4114 _has_timed_out = false;
ysr@777 4115 _draining_satb_buffers = false;
ysr@777 4116
ysr@777 4117 ++_calls;
ysr@777 4118
tonyp@2973 4119 if (_cm->verbose_low()) {
johnc@4173 4120 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
ysr@777 4121 "target = %1.2lfms >>>>>>>>>>",
johnc@4173 4122 _worker_id, _calls, _time_target_ms);
tonyp@2973 4123 }
ysr@777 4124
ysr@777 4125 // Set up the bitmap and oop closures. Anything that uses them is
ysr@777 4126 // eventually called from this method, so it is OK to allocate these
ysr@777 4127 // statically.
ysr@777 4128 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
tonyp@2968 4129 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
tonyp@2968 4130 set_cm_oop_closure(&cm_oop_closure);
ysr@777 4131
ysr@777 4132 if (_cm->has_overflown()) {
tonyp@3691 4133 // This can happen if the mark stack overflows during a GC pause
tonyp@3691 4134 // and this task, after a yield point, restarts. We have to abort
tonyp@3691 4135 // as we need to get into the overflow protocol which happens
tonyp@3691 4136 // right at the end of this task.
ysr@777 4137 set_has_aborted();
ysr@777 4138 }
ysr@777 4139
ysr@777 4140 // First drain any available SATB buffers. After this, we will not
ysr@777 4141 // look at SATB buffers before the next invocation of this method.
ysr@777 4142 // If enough completed SATB buffers are queued up, the regular clock
ysr@777 4143 // will abort this task so that it restarts.
ysr@777 4144 drain_satb_buffers();
ysr@777 4145 // ...then partially drain the local queue and the global stack
ysr@777 4146 drain_local_queue(true);
ysr@777 4147 drain_global_stack(true);
ysr@777 4148
ysr@777 4149 do {
ysr@777 4150 if (!has_aborted() && _curr_region != NULL) {
ysr@777 4151 // This means that we're already holding on to a region.
tonyp@1458 4152 assert(_finger != NULL, "if region is not NULL, then the finger "
tonyp@1458 4153 "should not be NULL either");
ysr@777 4154
ysr@777 4155 // We might have restarted this task after an evacuation pause
ysr@777 4156 // which might have evacuated the region we're holding on to
ysr@777 4157 // underneath our feet. Let's read its limit again to make sure
ysr@777 4158 // that we do not iterate over a region of the heap that
ysr@777 4159 // contains garbage (update_region_limit() will also move
ysr@777 4160 // _finger to the start of the region if it is found empty).
ysr@777 4161 update_region_limit();
ysr@777 4162 // We will start from _finger not from the start of the region,
ysr@777 4163 // as we might be restarting this task after aborting half-way
ysr@777 4164 // through scanning this region. In this case, _finger points to
ysr@777 4165 // the address where we last found a marked object. If this is a
ysr@777 4166 // fresh region, _finger points to start().
ysr@777 4167 MemRegion mr = MemRegion(_finger, _region_limit);
ysr@777 4168
tonyp@2973 4169 if (_cm->verbose_low()) {
johnc@4173 4170 gclog_or_tty->print_cr("[%u] we're scanning part "
ysr@777 4171 "["PTR_FORMAT", "PTR_FORMAT") "
johnc@4580 4172 "of region "HR_FORMAT,
drchase@6680 4173 _worker_id, p2i(_finger), p2i(_region_limit),
johnc@4580 4174 HR_FORMAT_PARAMS(_curr_region));
tonyp@2973 4175 }
ysr@777 4176
johnc@4580 4177 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
johnc@4580 4178 "humongous regions should go around loop once only");
johnc@4580 4179
johnc@4580 4180 // Some special cases:
johnc@4580 4181 // If the memory region is empty, we can just give up the region.
johnc@4580 4182 // If the current region is humongous then we only need to check
johnc@4580 4183 // the bitmap for the bit associated with the start of the object,
johnc@4580 4184 // scan the object if it's live, and give up the region.
johnc@4580 4185 // Otherwise, let's iterate over the bitmap of the part of the region
johnc@4580 4186 // that is left.
johnc@4575 4187 // If the iteration is successful, give up the region.
johnc@4580 4188 if (mr.is_empty()) {
johnc@4580 4189 giveup_current_region();
johnc@4580 4190 regular_clock_call();
johnc@4580 4191 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
johnc@4580 4192 if (_nextMarkBitMap->isMarked(mr.start())) {
johnc@4580 4193 // The object is marked - apply the closure
johnc@4580 4194 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
johnc@4580 4195 bitmap_closure.do_bit(offset);
johnc@4580 4196 }
johnc@4580 4197 // Even if this task aborted while scanning the humongous object
johnc@4580 4198 // we can (and should) give up the current region.
johnc@4580 4199 giveup_current_region();
johnc@4580 4200 regular_clock_call();
johnc@4580 4201 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
ysr@777 4202 giveup_current_region();
ysr@777 4203 regular_clock_call();
ysr@777 4204 } else {
tonyp@1458 4205 assert(has_aborted(), "currently the only way to do so");
ysr@777 4206 // The only way to abort the bitmap iteration is to return
ysr@777 4207 // false from the do_bit() method. However, inside the
ysr@777 4208 // do_bit() method we move the _finger to point to the
ysr@777 4209 // object currently being looked at. So, if we bail out, we
ysr@777 4210 // have definitely set _finger to something non-null.
tonyp@1458 4211 assert(_finger != NULL, "invariant");
ysr@777 4212
ysr@777 4213 // Region iteration was actually aborted. So now _finger
ysr@777 4214 // points to the address of the object we last scanned. If we
ysr@777 4215 // leave it there, when we restart this task, we will rescan
ysr@777 4216 // the object. It is easy to avoid this. We move the finger by
ysr@777 4217 // enough to point to the next possible object header (the
ysr@777 4218 // bitmap knows by how much we need to move it as it knows its
ysr@777 4219 // granularity).
apetrusenko@1749 4220 assert(_finger < _region_limit, "invariant");
tamao@4733 4221 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
apetrusenko@1749 4222 // Check if bitmap iteration was aborted while scanning the last object
apetrusenko@1749 4223 if (new_finger >= _region_limit) {
tonyp@3691 4224 giveup_current_region();
apetrusenko@1749 4225 } else {
tonyp@3691 4226 move_finger_to(new_finger);
apetrusenko@1749 4227 }
ysr@777 4228 }
ysr@777 4229 }
ysr@777 4230 // At this point we have either completed iterating over the
ysr@777 4231 // region we were holding on to, or we have aborted.
ysr@777 4232
ysr@777 4233 // We then partially drain the local queue and the global stack.
ysr@777 4234 // (Do we really need this?)
ysr@777 4235 drain_local_queue(true);
ysr@777 4236 drain_global_stack(true);
ysr@777 4237
ysr@777 4238 // Read the note on the claim_region() method on why it might
ysr@777 4239 // return NULL with potentially more regions available for
ysr@777 4240 // claiming and why we have to check out_of_regions() to determine
ysr@777 4241 // whether we're done or not.
ysr@777 4242 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
ysr@777 4243 // We are going to try to claim a new region. We should have
ysr@777 4244 // given up on the previous one.
tonyp@1458 4245 // Separated the asserts so that we know which one fires.
tonyp@1458 4246 assert(_curr_region == NULL, "invariant");
tonyp@1458 4247 assert(_finger == NULL, "invariant");
tonyp@1458 4248 assert(_region_limit == NULL, "invariant");
tonyp@2973 4249 if (_cm->verbose_low()) {
johnc@4173 4250 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
tonyp@2973 4251 }
johnc@4173 4252 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
ysr@777 4253 if (claimed_region != NULL) {
ysr@777 4254 // Yes, we managed to claim one
ysr@777 4255 statsOnly( ++_regions_claimed );
ysr@777 4256
tonyp@2973 4257 if (_cm->verbose_low()) {
johnc@4173 4258 gclog_or_tty->print_cr("[%u] we successfully claimed "
ysr@777 4259 "region "PTR_FORMAT,
drchase@6680 4260 _worker_id, p2i(claimed_region));
tonyp@2973 4261 }
ysr@777 4262
ysr@777 4263 setup_for_region(claimed_region);
tonyp@1458 4264 assert(_curr_region == claimed_region, "invariant");
ysr@777 4265 }
ysr@777 4266 // It is important to call the regular clock here. It might take
ysr@777 4267 // a while to claim a region if, for example, we hit a large
ysr@777 4268 // block of empty regions. So we need to call the regular clock
ysr@777 4269 // method once round the loop to make sure it's called
ysr@777 4270 // frequently enough.
ysr@777 4271 regular_clock_call();
ysr@777 4272 }
ysr@777 4273
ysr@777 4274 if (!has_aborted() && _curr_region == NULL) {
tonyp@1458 4275 assert(_cm->out_of_regions(),
tonyp@1458 4276 "at this point we should be out of regions");
ysr@777 4277 }
ysr@777 4278 } while ( _curr_region != NULL && !has_aborted());
ysr@777 4279
ysr@777 4280 if (!has_aborted()) {
ysr@777 4281 // We cannot check whether the global stack is empty, since other
tonyp@3691 4282 // tasks might be pushing objects to it concurrently.
tonyp@1458 4283 assert(_cm->out_of_regions(),
tonyp@1458 4284 "at this point we should be out of regions");
ysr@777 4285
tonyp@2973 4286 if (_cm->verbose_low()) {
johnc@4173 4287 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
tonyp@2973 4288 }
ysr@777 4289
ysr@777 4290 // Try to reduce the number of available SATB buffers so that
ysr@777 4291 // remark has less work to do.
ysr@777 4292 drain_satb_buffers();
ysr@777 4293 }
ysr@777 4294
ysr@777 4295 // Since we've done everything else, we can now totally drain the
ysr@777 4296 // local queue and global stack.
ysr@777 4297 drain_local_queue(false);
ysr@777 4298 drain_global_stack(false);
ysr@777 4299
ysr@777 4300 // Attempt at work stealing from other task's queues.
johnc@2494 4301 if (do_stealing && !has_aborted()) {
ysr@777 4302 // We have not aborted. This means that we have finished all that
ysr@777 4303 // we could. Let's try to do some stealing...
ysr@777 4304
ysr@777 4305 // We cannot check whether the global stack is empty, since other
tonyp@3691 4306 // tasks might be pushing objects to it concurrently.
tonyp@1458 4307 assert(_cm->out_of_regions() && _task_queue->size() == 0,
tonyp@1458 4308 "only way to reach here");
ysr@777 4309
tonyp@2973 4310 if (_cm->verbose_low()) {
johnc@4173 4311 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
tonyp@2973 4312 }
ysr@777 4313
ysr@777 4314 while (!has_aborted()) {
ysr@777 4315 oop obj;
ysr@777 4316 statsOnly( ++_steal_attempts );
ysr@777 4317
johnc@4173 4318 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
tonyp@2973 4319 if (_cm->verbose_medium()) {
johnc@4173 4320 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
drchase@6680 4321 _worker_id, p2i((void*) obj));
tonyp@2973 4322 }
ysr@777 4323
ysr@777 4324 statsOnly( ++_steals );
ysr@777 4325
tonyp@1458 4326 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
tonyp@1458 4327 "any stolen object should be marked");
ysr@777 4328 scan_object(obj);
ysr@777 4329
ysr@777 4330 // And since we're towards the end, let's totally drain the
ysr@777 4331 // local queue and global stack.
ysr@777 4332 drain_local_queue(false);
ysr@777 4333 drain_global_stack(false);
ysr@777 4334 } else {
ysr@777 4335 break;
ysr@777 4336 }
ysr@777 4337 }
ysr@777 4338 }
ysr@777 4339
tonyp@2848 4340 // If we are about to wrap up and go into termination, check if we
tonyp@2848 4341 // should raise the overflow flag.
tonyp@2848 4342 if (do_termination && !has_aborted()) {
tonyp@2848 4343 if (_cm->force_overflow()->should_force()) {
tonyp@2848 4344 _cm->set_has_overflown();
tonyp@2848 4345 regular_clock_call();
tonyp@2848 4346 }
tonyp@2848 4347 }
tonyp@2848 4348
ysr@777 4349 // We still haven't aborted. Now, let's try to get into the
ysr@777 4350 // termination protocol.
johnc@2494 4351 if (do_termination && !has_aborted()) {
ysr@777 4352 // We cannot check whether the global stack is empty, since other
tonyp@3691 4353 // tasks might be concurrently pushing objects on it.
tonyp@1458 4354 // Separated the asserts so that we know which one fires.
tonyp@1458 4355 assert(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4356 assert(_task_queue->size() == 0, "only way to reach here");
ysr@777 4357
tonyp@2973 4358 if (_cm->verbose_low()) {
johnc@4173 4359 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
tonyp@2973 4360 }
ysr@777 4361
ysr@777 4362 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
johnc@4787 4363
ysr@777 4364 // The CMTask class also extends the TerminatorTerminator class,
ysr@777 4365 // hence its should_exit_termination() method will also decide
ysr@777 4366 // whether to exit the termination protocol or not.
johnc@4787 4367 bool finished = (is_serial ||
johnc@4787 4368 _cm->terminator()->offer_termination(this));
ysr@777 4369 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4370 _termination_time_ms +=
ysr@777 4371 termination_end_time_ms - _termination_start_time_ms;
ysr@777 4372
ysr@777 4373 if (finished) {
ysr@777 4374 // We're all done.
ysr@777 4375
johnc@4173 4376 if (_worker_id == 0) {
ysr@777 4377 // let's allow task 0 to do this
ysr@777 4378 if (concurrent()) {
tonyp@1458 4379 assert(_cm->concurrent_marking_in_progress(), "invariant");
ysr@777 4380 // we need to set this to false before the next
ysr@777 4381 // safepoint. This way we ensure that the marking phase
ysr@777 4382 // doesn't observe any more heap expansions.
ysr@777 4383 _cm->clear_concurrent_marking_in_progress();
ysr@777 4384 }
ysr@777 4385 }
ysr@777 4386
ysr@777 4387 // We can now guarantee that the global stack is empty, since
tonyp@1458 4388 // all other tasks have finished. We separated the guarantees so
tonyp@1458 4389 // that, if a condition is false, we can immediately find out
tonyp@1458 4390 // which one.
tonyp@1458 4391 guarantee(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4392 guarantee(_cm->mark_stack_empty(), "only way to reach here");
tonyp@1458 4393 guarantee(_task_queue->size() == 0, "only way to reach here");
tonyp@1458 4394 guarantee(!_cm->has_overflown(), "only way to reach here");
tonyp@1458 4395 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
ysr@777 4396
tonyp@2973 4397 if (_cm->verbose_low()) {
johnc@4173 4398 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
tonyp@2973 4399 }
ysr@777 4400 } else {
ysr@777 4401 // Apparently there's more work to do. Let's abort this task. It
ysr@777 4402 // will restart it and we can hopefully find more things to do.
ysr@777 4403
tonyp@2973 4404 if (_cm->verbose_low()) {
johnc@4173 4405 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
johnc@4173 4406 _worker_id);
tonyp@2973 4407 }
ysr@777 4408
ysr@777 4409 set_has_aborted();
ysr@777 4410 statsOnly( ++_aborted_termination );
ysr@777 4411 }
ysr@777 4412 }
ysr@777 4413
ysr@777 4414 // Mainly for debugging purposes to make sure that a pointer to the
ysr@777 4415 // closure which was statically allocated in this frame doesn't
ysr@777 4416 // escape it by accident.
tonyp@2968 4417 set_cm_oop_closure(NULL);
ysr@777 4418 double end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4419 double elapsed_time_ms = end_time_ms - _start_time_ms;
ysr@777 4420 // Update the step history.
ysr@777 4421 _step_times_ms.add(elapsed_time_ms);
ysr@777 4422
ysr@777 4423 if (has_aborted()) {
ysr@777 4424 // The task was aborted for some reason.
ysr@777 4425
ysr@777 4426 statsOnly( ++_aborted );
ysr@777 4427
johnc@2494 4428 if (_has_timed_out) {
ysr@777 4429 double diff_ms = elapsed_time_ms - _time_target_ms;
ysr@777 4430 // Keep statistics of how well we did with respect to hitting
ysr@777 4431 // our target only if we actually timed out (if we aborted for
ysr@777 4432 // other reasons, then the results might get skewed).
ysr@777 4433 _marking_step_diffs_ms.add(diff_ms);
ysr@777 4434 }
ysr@777 4435
ysr@777 4436 if (_cm->has_overflown()) {
ysr@777 4437 // This is the interesting one. We aborted because a global
ysr@777 4438 // overflow was raised. This means we have to restart the
ysr@777 4439 // marking phase and start iterating over regions. However, in
ysr@777 4440 // order to do this we have to make sure that all tasks stop
ysr@777 4441 // what they are doing and re-initialise in a safe manner. We
ysr@777 4442 // will achieve this with the use of two barrier sync points.
ysr@777 4443
tonyp@2973 4444 if (_cm->verbose_low()) {
johnc@4173 4445 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
tonyp@2973 4446 }
ysr@777 4447
johnc@4787 4448 if (!is_serial) {
johnc@4787 4449 // We only need to enter the sync barrier if being called
johnc@4787 4450 // from a parallel context
johnc@4787 4451 _cm->enter_first_sync_barrier(_worker_id);
johnc@4787 4452
johnc@4787 4453 // When we exit this sync barrier we know that all tasks have
johnc@4787 4454 // stopped doing marking work. So, it's now safe to
johnc@4787 4455 // re-initialise our data structures. At the end of this method,
johnc@4787 4456 // task 0 will clear the global data structures.
johnc@4787 4457 }
ysr@777 4458
ysr@777 4459 statsOnly( ++_aborted_overflow );
ysr@777 4460
ysr@777 4461 // We clear the local state of this task...
ysr@777 4462 clear_region_fields();
ysr@777 4463
johnc@4787 4464 if (!is_serial) {
johnc@4787 4465 // ...and enter the second barrier.
johnc@4787 4466 _cm->enter_second_sync_barrier(_worker_id);
johnc@4787 4467 }
johnc@4788 4468 // At this point, if we're during the concurrent phase of
johnc@4788 4469 // marking, everything has been re-initialized and we're
ysr@777 4470 // ready to restart.
ysr@777 4471 }
ysr@777 4472
ysr@777 4473 if (_cm->verbose_low()) {
johnc@4173 4474 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
ysr@777 4475 "elapsed = %1.2lfms <<<<<<<<<<",
johnc@4173 4476 _worker_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4477 if (_cm->has_aborted()) {
johnc@4173 4478 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
johnc@4173 4479 _worker_id);
tonyp@2973 4480 }
ysr@777 4481 }
ysr@777 4482 } else {
tonyp@2973 4483 if (_cm->verbose_low()) {
johnc@4173 4484 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
ysr@777 4485 "elapsed = %1.2lfms <<<<<<<<<<",
johnc@4173 4486 _worker_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4487 }
ysr@777 4488 }
ysr@777 4489
ysr@777 4490 _claimed = false;
ysr@777 4491 }
ysr@777 4492
johnc@4173 4493 CMTask::CMTask(uint worker_id,
ysr@777 4494 ConcurrentMark* cm,
johnc@3463 4495 size_t* marked_bytes,
johnc@3463 4496 BitMap* card_bm,
ysr@777 4497 CMTaskQueue* task_queue,
ysr@777 4498 CMTaskQueueSet* task_queues)
ysr@777 4499 : _g1h(G1CollectedHeap::heap()),
johnc@4173 4500 _worker_id(worker_id), _cm(cm),
ysr@777 4501 _claimed(false),
ysr@777 4502 _nextMarkBitMap(NULL), _hash_seed(17),
ysr@777 4503 _task_queue(task_queue),
ysr@777 4504 _task_queues(task_queues),
tonyp@2968 4505 _cm_oop_closure(NULL),
johnc@3463 4506 _marked_bytes_array(marked_bytes),
johnc@3463 4507 _card_bm(card_bm) {
tonyp@1458 4508 guarantee(task_queue != NULL, "invariant");
tonyp@1458 4509 guarantee(task_queues != NULL, "invariant");
ysr@777 4510
ysr@777 4511 statsOnly( _clock_due_to_scanning = 0;
ysr@777 4512 _clock_due_to_marking = 0 );
ysr@777 4513
ysr@777 4514 _marking_step_diffs_ms.add(0.5);
ysr@777 4515 }
tonyp@2717 4516
tonyp@2717 4517 // These are formatting macros that are used below to ensure
tonyp@2717 4518 // consistent formatting. The *_H_* versions are used to format the
tonyp@2717 4519 // header for a particular value and they should be kept consistent
tonyp@2717 4520 // with the corresponding macro. Also note that most of the macros add
tonyp@2717 4521 // the necessary white space (as a prefix) which makes them a bit
tonyp@2717 4522 // easier to compose.
tonyp@2717 4523
tonyp@2717 4524 // All the output lines are prefixed with this string to be able to
tonyp@2717 4525 // identify them easily in a large log file.
tonyp@2717 4526 #define G1PPRL_LINE_PREFIX "###"
tonyp@2717 4527
tonyp@2717 4528 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
tonyp@2717 4529 #ifdef _LP64
tonyp@2717 4530 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
tonyp@2717 4531 #else // _LP64
tonyp@2717 4532 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
tonyp@2717 4533 #endif // _LP64
tonyp@2717 4534
tonyp@2717 4535 // For per-region info
tonyp@2717 4536 #define G1PPRL_TYPE_FORMAT " %-4s"
tonyp@2717 4537 #define G1PPRL_TYPE_H_FORMAT " %4s"
tonyp@2717 4538 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
tonyp@2717 4539 #define G1PPRL_BYTE_H_FORMAT " %9s"
tonyp@2717 4540 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
tonyp@2717 4541 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
tonyp@2717 4542
tonyp@2717 4543 // For summary info
tonyp@2717 4544 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4545 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
tonyp@2717 4546 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
tonyp@2717 4547 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
tonyp@2717 4548
tonyp@2717 4549 G1PrintRegionLivenessInfoClosure::
tonyp@2717 4550 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
tonyp@2717 4551 : _out(out),
tonyp@2717 4552 _total_used_bytes(0), _total_capacity_bytes(0),
tonyp@2717 4553 _total_prev_live_bytes(0), _total_next_live_bytes(0),
tonyp@2717 4554 _hum_used_bytes(0), _hum_capacity_bytes(0),
tschatzl@5122 4555 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
johnc@5548 4556 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
tonyp@2717 4557 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2717 4558 MemRegion g1_committed = g1h->g1_committed();
tonyp@2717 4559 MemRegion g1_reserved = g1h->g1_reserved();
tonyp@2717 4560 double now = os::elapsedTime();
tonyp@2717 4561
tonyp@2717 4562 // Print the header of the output.
tonyp@2717 4563 _out->cr();
tonyp@2717 4564 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
tonyp@2717 4565 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
tonyp@2717 4566 G1PPRL_SUM_ADDR_FORMAT("committed")
tonyp@2717 4567 G1PPRL_SUM_ADDR_FORMAT("reserved")
tonyp@2717 4568 G1PPRL_SUM_BYTE_FORMAT("region-size"),
drchase@6680 4569 p2i(g1_committed.start()), p2i(g1_committed.end()),
drchase@6680 4570 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
johnc@3182 4571 HeapRegion::GrainBytes);
tonyp@2717 4572 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4573 _out->print_cr(G1PPRL_LINE_PREFIX
tschatzl@5122 4574 G1PPRL_TYPE_H_FORMAT
tschatzl@5122 4575 G1PPRL_ADDR_BASE_H_FORMAT
tschatzl@5122 4576 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4577 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4578 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4579 G1PPRL_DOUBLE_H_FORMAT
johnc@5548 4580 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4581 G1PPRL_BYTE_H_FORMAT,
tschatzl@5122 4582 "type", "address-range",
johnc@5548 4583 "used", "prev-live", "next-live", "gc-eff",
johnc@5548 4584 "remset", "code-roots");
johnc@3173 4585 _out->print_cr(G1PPRL_LINE_PREFIX
tschatzl@5122 4586 G1PPRL_TYPE_H_FORMAT
tschatzl@5122 4587 G1PPRL_ADDR_BASE_H_FORMAT
tschatzl@5122 4588 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4589 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4590 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4591 G1PPRL_DOUBLE_H_FORMAT
johnc@5548 4592 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4593 G1PPRL_BYTE_H_FORMAT,
tschatzl@5122 4594 "", "",
johnc@5548 4595 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
johnc@5548 4596 "(bytes)", "(bytes)");
tonyp@2717 4597 }
tonyp@2717 4598
tonyp@2717 4599 // It takes as a parameter a reference to one of the _hum_* fields, it
tonyp@2717 4600 // deduces the corresponding value for a region in a humongous region
tonyp@2717 4601 // series (either the region size, or what's left if the _hum_* field
tonyp@2717 4602 // is < the region size), and updates the _hum_* field accordingly.
tonyp@2717 4603 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
tonyp@2717 4604 size_t bytes = 0;
tonyp@2717 4605 // The > 0 check is to deal with the prev and next live bytes which
tonyp@2717 4606 // could be 0.
tonyp@2717 4607 if (*hum_bytes > 0) {
johnc@3182 4608 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
tonyp@2717 4609 *hum_bytes -= bytes;
tonyp@2717 4610 }
tonyp@2717 4611 return bytes;
tonyp@2717 4612 }
tonyp@2717 4613
tonyp@2717 4614 // It deduces the values for a region in a humongous region series
tonyp@2717 4615 // from the _hum_* fields and updates those accordingly. It assumes
tonyp@2717 4616 // that that _hum_* fields have already been set up from the "starts
tonyp@2717 4617 // humongous" region and we visit the regions in address order.
tonyp@2717 4618 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
tonyp@2717 4619 size_t* capacity_bytes,
tonyp@2717 4620 size_t* prev_live_bytes,
tonyp@2717 4621 size_t* next_live_bytes) {
tonyp@2717 4622 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
tonyp@2717 4623 *used_bytes = get_hum_bytes(&_hum_used_bytes);
tonyp@2717 4624 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
tonyp@2717 4625 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
tonyp@2717 4626 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
tonyp@2717 4627 }
tonyp@2717 4628
tonyp@2717 4629 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
tonyp@2717 4630 const char* type = "";
tonyp@2717 4631 HeapWord* bottom = r->bottom();
tonyp@2717 4632 HeapWord* end = r->end();
tonyp@2717 4633 size_t capacity_bytes = r->capacity();
tonyp@2717 4634 size_t used_bytes = r->used();
tonyp@2717 4635 size_t prev_live_bytes = r->live_bytes();
tonyp@2717 4636 size_t next_live_bytes = r->next_live_bytes();
tonyp@2717 4637 double gc_eff = r->gc_efficiency();
tschatzl@5122 4638 size_t remset_bytes = r->rem_set()->mem_size();
johnc@5548 4639 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
johnc@5548 4640
tonyp@2717 4641 if (r->used() == 0) {
tonyp@2717 4642 type = "FREE";
tonyp@2717 4643 } else if (r->is_survivor()) {
tonyp@2717 4644 type = "SURV";
tonyp@2717 4645 } else if (r->is_young()) {
tonyp@2717 4646 type = "EDEN";
tonyp@2717 4647 } else if (r->startsHumongous()) {
tonyp@2717 4648 type = "HUMS";
tonyp@2717 4649
tonyp@2717 4650 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
tonyp@2717 4651 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
tonyp@2717 4652 "they should have been zeroed after the last time we used them");
tonyp@2717 4653 // Set up the _hum_* fields.
tonyp@2717 4654 _hum_capacity_bytes = capacity_bytes;
tonyp@2717 4655 _hum_used_bytes = used_bytes;
tonyp@2717 4656 _hum_prev_live_bytes = prev_live_bytes;
tonyp@2717 4657 _hum_next_live_bytes = next_live_bytes;
tonyp@2717 4658 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4659 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4660 end = bottom + HeapRegion::GrainWords;
tonyp@2717 4661 } else if (r->continuesHumongous()) {
tonyp@2717 4662 type = "HUMC";
tonyp@2717 4663 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4664 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4665 assert(end == bottom + HeapRegion::GrainWords, "invariant");
tonyp@2717 4666 } else {
tonyp@2717 4667 type = "OLD";
tonyp@2717 4668 }
tonyp@2717 4669
tonyp@2717 4670 _total_used_bytes += used_bytes;
tonyp@2717 4671 _total_capacity_bytes += capacity_bytes;
tonyp@2717 4672 _total_prev_live_bytes += prev_live_bytes;
tonyp@2717 4673 _total_next_live_bytes += next_live_bytes;
tschatzl@5122 4674 _total_remset_bytes += remset_bytes;
johnc@5548 4675 _total_strong_code_roots_bytes += strong_code_roots_bytes;
tonyp@2717 4676
tonyp@2717 4677 // Print a line for this particular region.
tonyp@2717 4678 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4679 G1PPRL_TYPE_FORMAT
tonyp@2717 4680 G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4681 G1PPRL_BYTE_FORMAT
tonyp@2717 4682 G1PPRL_BYTE_FORMAT
tonyp@2717 4683 G1PPRL_BYTE_FORMAT
tschatzl@5122 4684 G1PPRL_DOUBLE_FORMAT
johnc@5548 4685 G1PPRL_BYTE_FORMAT
tschatzl@5122 4686 G1PPRL_BYTE_FORMAT,
drchase@6680 4687 type, p2i(bottom), p2i(end),
johnc@5548 4688 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
johnc@5548 4689 remset_bytes, strong_code_roots_bytes);
tonyp@2717 4690
tonyp@2717 4691 return false;
tonyp@2717 4692 }
tonyp@2717 4693
tonyp@2717 4694 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
tschatzl@5122 4695 // add static memory usages to remembered set sizes
tschatzl@5122 4696 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
tonyp@2717 4697 // Print the footer of the output.
tonyp@2717 4698 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4699 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4700 " SUMMARY"
tonyp@2717 4701 G1PPRL_SUM_MB_FORMAT("capacity")
tonyp@2717 4702 G1PPRL_SUM_MB_PERC_FORMAT("used")
tonyp@2717 4703 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
tschatzl@5122 4704 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
johnc@5548 4705 G1PPRL_SUM_MB_FORMAT("remset")
johnc@5548 4706 G1PPRL_SUM_MB_FORMAT("code-roots"),
tonyp@2717 4707 bytes_to_mb(_total_capacity_bytes),
tonyp@2717 4708 bytes_to_mb(_total_used_bytes),
tonyp@2717 4709 perc(_total_used_bytes, _total_capacity_bytes),
tonyp@2717 4710 bytes_to_mb(_total_prev_live_bytes),
tonyp@2717 4711 perc(_total_prev_live_bytes, _total_capacity_bytes),
tonyp@2717 4712 bytes_to_mb(_total_next_live_bytes),
tschatzl@5122 4713 perc(_total_next_live_bytes, _total_capacity_bytes),
johnc@5548 4714 bytes_to_mb(_total_remset_bytes),
johnc@5548 4715 bytes_to_mb(_total_strong_code_roots_bytes));
tonyp@2717 4716 _out->cr();
tonyp@2717 4717 }

mercurial