src/share/vm/gc_implementation/g1/concurrentMark.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7195
c02ec279b062
child 7333
b12a2a9b05ca
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
stefank@6992 27 #include "code/codeCache.hpp"
tonyp@2968 28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
brutisso@3710 33 #include "gc_implementation/g1/g1Log.hpp"
tonyp@2968 34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 35 #include "gc_implementation/g1/g1RemSet.hpp"
tonyp@3416 36 #include "gc_implementation/g1/heapRegion.inline.hpp"
tschatzl@7091 37 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
stefank@2314 38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
tschatzl@7051 39 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
kamg@2445 40 #include "gc_implementation/shared/vmGCOperations.hpp"
sla@5237 41 #include "gc_implementation/shared/gcTimer.hpp"
sla@5237 42 #include "gc_implementation/shared/gcTrace.hpp"
sla@5237 43 #include "gc_implementation/shared/gcTraceTime.hpp"
stefank@6992 44 #include "memory/allocation.hpp"
stefank@2314 45 #include "memory/genOopClosures.inline.hpp"
stefank@2314 46 #include "memory/referencePolicy.hpp"
stefank@2314 47 #include "memory/resourceArea.hpp"
stefank@2314 48 #include "oops/oop.inline.hpp"
stefank@2314 49 #include "runtime/handles.inline.hpp"
stefank@2314 50 #include "runtime/java.hpp"
goetz@6912 51 #include "runtime/prefetch.inline.hpp"
zgu@3900 52 #include "services/memTracker.hpp"
ysr@777 53
brutisso@3455 54 // Concurrent marking bit map wrapper
ysr@777 55
johnc@4333 56 CMBitMapRO::CMBitMapRO(int shifter) :
johnc@4333 57 _bm(),
ysr@777 58 _shifter(shifter) {
johnc@4333 59 _bmStartWord = 0;
johnc@4333 60 _bmWordSize = 0;
ysr@777 61 }
ysr@777 62
stefank@6992 63 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
stefank@6992 64 const HeapWord* limit) const {
ysr@777 65 // First we must round addr *up* to a possible object boundary.
ysr@777 66 addr = (HeapWord*)align_size_up((intptr_t)addr,
ysr@777 67 HeapWordSize << _shifter);
ysr@777 68 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 69 if (limit == NULL) {
tonyp@2973 70 limit = _bmStartWord + _bmWordSize;
tonyp@2973 71 }
ysr@777 72 size_t limitOffset = heapWordToOffset(limit);
ysr@777 73 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
ysr@777 74 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 75 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 76 assert(nextAddr == limit || isMarked(nextAddr),
ysr@777 77 "get_next_one postcondition");
ysr@777 78 return nextAddr;
ysr@777 79 }
ysr@777 80
stefank@6992 81 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
stefank@6992 82 const HeapWord* limit) const {
ysr@777 83 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 84 if (limit == NULL) {
tonyp@2973 85 limit = _bmStartWord + _bmWordSize;
tonyp@2973 86 }
ysr@777 87 size_t limitOffset = heapWordToOffset(limit);
ysr@777 88 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
ysr@777 89 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 90 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 91 assert(nextAddr == limit || !isMarked(nextAddr),
ysr@777 92 "get_next_one postcondition");
ysr@777 93 return nextAddr;
ysr@777 94 }
ysr@777 95
ysr@777 96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
ysr@777 97 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
ysr@777 98 return (int) (diff >> _shifter);
ysr@777 99 }
ysr@777 100
ysr@777 101 #ifndef PRODUCT
tschatzl@7051 102 bool CMBitMapRO::covers(MemRegion heap_rs) const {
ysr@777 103 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
brutisso@4061 104 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
ysr@777 105 "size inconsistency");
tschatzl@7051 106 return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
tschatzl@7051 107 _bmWordSize == heap_rs.word_size();
ysr@777 108 }
ysr@777 109 #endif
ysr@777 110
stefank@4904 111 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
stefank@4904 112 _bm.print_on_error(st, prefix);
stefank@4904 113 }
stefank@4904 114
tschatzl@7051 115 size_t CMBitMap::compute_size(size_t heap_size) {
tschatzl@7051 116 return heap_size / mark_distance();
tschatzl@7051 117 }
tschatzl@7051 118
tschatzl@7051 119 size_t CMBitMap::mark_distance() {
tschatzl@7051 120 return MinObjAlignmentInBytes * BitsPerByte;
tschatzl@7051 121 }
tschatzl@7051 122
tschatzl@7051 123 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
tschatzl@7051 124 _bmStartWord = heap.start();
tschatzl@7051 125 _bmWordSize = heap.word_size();
tschatzl@7051 126
tschatzl@7051 127 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
tschatzl@7051 128 _bm.set_size(_bmWordSize >> _shifter);
tschatzl@7051 129
tschatzl@7051 130 storage->set_mapping_changed_listener(&_listener);
tschatzl@7051 131 }
tschatzl@7051 132
tschatzl@7257 133 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
tschatzl@7257 134 if (zero_filled) {
tschatzl@7257 135 return;
tschatzl@7257 136 }
tschatzl@7051 137 // We need to clear the bitmap on commit, removing any existing information.
tschatzl@7051 138 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
tschatzl@7051 139 _bm->clearRange(mr);
tschatzl@7051 140 }
tschatzl@7051 141
tschatzl@7051 142 // Closure used for clearing the given mark bitmap.
tschatzl@7051 143 class ClearBitmapHRClosure : public HeapRegionClosure {
tschatzl@7051 144 private:
tschatzl@7051 145 ConcurrentMark* _cm;
tschatzl@7051 146 CMBitMap* _bitmap;
tschatzl@7051 147 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
tschatzl@7051 148 public:
tschatzl@7051 149 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
tschatzl@7051 150 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
tschatzl@7051 151 }
tschatzl@7051 152
tschatzl@7051 153 virtual bool doHeapRegion(HeapRegion* r) {
tschatzl@7051 154 size_t const chunk_size_in_words = M / HeapWordSize;
tschatzl@7051 155
tschatzl@7051 156 HeapWord* cur = r->bottom();
tschatzl@7051 157 HeapWord* const end = r->end();
tschatzl@7051 158
tschatzl@7051 159 while (cur < end) {
tschatzl@7051 160 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
tschatzl@7051 161 _bitmap->clearRange(mr);
tschatzl@7051 162
tschatzl@7051 163 cur += chunk_size_in_words;
tschatzl@7051 164
tschatzl@7051 165 // Abort iteration if after yielding the marking has been aborted.
tschatzl@7051 166 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
tschatzl@7051 167 return true;
tschatzl@7051 168 }
tschatzl@7051 169 // Repeat the asserts from before the start of the closure. We will do them
tschatzl@7051 170 // as asserts here to minimize their overhead on the product. However, we
tschatzl@7051 171 // will have them as guarantees at the beginning / end of the bitmap
tschatzl@7051 172 // clearing to get some checking in the product.
tschatzl@7051 173 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
tschatzl@7051 174 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
tschatzl@7051 175 }
tschatzl@7051 176
johnc@4333 177 return false;
johnc@4333 178 }
tschatzl@7051 179 };
johnc@4333 180
ysr@777 181 void CMBitMap::clearAll() {
tschatzl@7051 182 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
tschatzl@7051 183 G1CollectedHeap::heap()->heap_region_iterate(&cl);
tschatzl@7051 184 guarantee(cl.complete(), "Must have completed iteration.");
ysr@777 185 return;
ysr@777 186 }
ysr@777 187
ysr@777 188 void CMBitMap::markRange(MemRegion mr) {
ysr@777 189 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 190 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 191 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
ysr@777 192 ((HeapWord *) mr.end())),
ysr@777 193 "markRange memory region end is not card aligned");
ysr@777 194 // convert address range into offset range
ysr@777 195 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 196 heapWordToOffset(mr.end()), true);
ysr@777 197 }
ysr@777 198
ysr@777 199 void CMBitMap::clearRange(MemRegion mr) {
ysr@777 200 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 201 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 202 // convert address range into offset range
ysr@777 203 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 204 heapWordToOffset(mr.end()), false);
ysr@777 205 }
ysr@777 206
ysr@777 207 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
ysr@777 208 HeapWord* end_addr) {
ysr@777 209 HeapWord* start = getNextMarkedWordAddress(addr);
ysr@777 210 start = MIN2(start, end_addr);
ysr@777 211 HeapWord* end = getNextUnmarkedWordAddress(start);
ysr@777 212 end = MIN2(end, end_addr);
ysr@777 213 assert(start <= end, "Consistency check");
ysr@777 214 MemRegion mr(start, end);
ysr@777 215 if (!mr.is_empty()) {
ysr@777 216 clearRange(mr);
ysr@777 217 }
ysr@777 218 return mr;
ysr@777 219 }
ysr@777 220
ysr@777 221 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
ysr@777 222 _base(NULL), _cm(cm)
ysr@777 223 #ifdef ASSERT
ysr@777 224 , _drain_in_progress(false)
ysr@777 225 , _drain_in_progress_yields(false)
ysr@777 226 #endif
ysr@777 227 {}
ysr@777 228
johnc@4333 229 bool CMMarkStack::allocate(size_t capacity) {
johnc@4333 230 // allocate a stack of the requisite depth
johnc@4333 231 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
johnc@4333 232 if (!rs.is_reserved()) {
johnc@4333 233 warning("ConcurrentMark MarkStack allocation failure");
johnc@4333 234 return false;
tonyp@2973 235 }
johnc@4333 236 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
johnc@4333 237 if (!_virtual_space.initialize(rs, rs.size())) {
johnc@4333 238 warning("ConcurrentMark MarkStack backing store failure");
johnc@4333 239 // Release the virtual memory reserved for the marking stack
johnc@4333 240 rs.release();
johnc@4333 241 return false;
johnc@4333 242 }
johnc@4333 243 assert(_virtual_space.committed_size() == rs.size(),
johnc@4333 244 "Didn't reserve backing store for all of ConcurrentMark stack?");
johnc@4333 245 _base = (oop*) _virtual_space.low();
johnc@4333 246 setEmpty();
johnc@4333 247 _capacity = (jint) capacity;
tonyp@3416 248 _saved_index = -1;
johnc@4386 249 _should_expand = false;
ysr@777 250 NOT_PRODUCT(_max_depth = 0);
johnc@4333 251 return true;
johnc@4333 252 }
johnc@4333 253
johnc@4333 254 void CMMarkStack::expand() {
johnc@4333 255 // Called, during remark, if we've overflown the marking stack during marking.
johnc@4333 256 assert(isEmpty(), "stack should been emptied while handling overflow");
johnc@4333 257 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
johnc@4333 258 // Clear expansion flag
johnc@4333 259 _should_expand = false;
johnc@4333 260 if (_capacity == (jint) MarkStackSizeMax) {
johnc@4333 261 if (PrintGCDetails && Verbose) {
johnc@4333 262 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
johnc@4333 263 }
johnc@4333 264 return;
johnc@4333 265 }
johnc@4333 266 // Double capacity if possible
johnc@4333 267 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
johnc@4333 268 // Do not give up existing stack until we have managed to
johnc@4333 269 // get the double capacity that we desired.
johnc@4333 270 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
johnc@4333 271 sizeof(oop)));
johnc@4333 272 if (rs.is_reserved()) {
johnc@4333 273 // Release the backing store associated with old stack
johnc@4333 274 _virtual_space.release();
johnc@4333 275 // Reinitialize virtual space for new stack
johnc@4333 276 if (!_virtual_space.initialize(rs, rs.size())) {
johnc@4333 277 fatal("Not enough swap for expanded marking stack capacity");
johnc@4333 278 }
johnc@4333 279 _base = (oop*)(_virtual_space.low());
johnc@4333 280 _index = 0;
johnc@4333 281 _capacity = new_capacity;
johnc@4333 282 } else {
johnc@4333 283 if (PrintGCDetails && Verbose) {
johnc@4333 284 // Failed to double capacity, continue;
johnc@4333 285 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
johnc@4333 286 SIZE_FORMAT"K to " SIZE_FORMAT"K",
johnc@4333 287 _capacity / K, new_capacity / K);
johnc@4333 288 }
johnc@4333 289 }
johnc@4333 290 }
johnc@4333 291
johnc@4333 292 void CMMarkStack::set_should_expand() {
johnc@4333 293 // If we're resetting the marking state because of an
johnc@4333 294 // marking stack overflow, record that we should, if
johnc@4333 295 // possible, expand the stack.
johnc@4333 296 _should_expand = _cm->has_overflown();
ysr@777 297 }
ysr@777 298
ysr@777 299 CMMarkStack::~CMMarkStack() {
tonyp@2973 300 if (_base != NULL) {
johnc@4333 301 _base = NULL;
johnc@4333 302 _virtual_space.release();
tonyp@2973 303 }
ysr@777 304 }
ysr@777 305
ysr@777 306 void CMMarkStack::par_push(oop ptr) {
ysr@777 307 while (true) {
ysr@777 308 if (isFull()) {
ysr@777 309 _overflow = true;
ysr@777 310 return;
ysr@777 311 }
ysr@777 312 // Otherwise...
ysr@777 313 jint index = _index;
ysr@777 314 jint next_index = index+1;
ysr@777 315 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 316 if (res == index) {
ysr@777 317 _base[index] = ptr;
ysr@777 318 // Note that we don't maintain this atomically. We could, but it
ysr@777 319 // doesn't seem necessary.
ysr@777 320 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 321 return;
ysr@777 322 }
ysr@777 323 // Otherwise, we need to try again.
ysr@777 324 }
ysr@777 325 }
ysr@777 326
ysr@777 327 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
ysr@777 328 while (true) {
ysr@777 329 if (isFull()) {
ysr@777 330 _overflow = true;
ysr@777 331 return;
ysr@777 332 }
ysr@777 333 // Otherwise...
ysr@777 334 jint index = _index;
ysr@777 335 jint next_index = index + n;
ysr@777 336 if (next_index > _capacity) {
ysr@777 337 _overflow = true;
ysr@777 338 return;
ysr@777 339 }
ysr@777 340 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 341 if (res == index) {
ysr@777 342 for (int i = 0; i < n; i++) {
johnc@4333 343 int ind = index + i;
ysr@777 344 assert(ind < _capacity, "By overflow test above.");
ysr@777 345 _base[ind] = ptr_arr[i];
ysr@777 346 }
ysr@777 347 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 348 return;
ysr@777 349 }
ysr@777 350 // Otherwise, we need to try again.
ysr@777 351 }
ysr@777 352 }
ysr@777 353
ysr@777 354 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
ysr@777 355 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 356 jint start = _index;
ysr@777 357 jint next_index = start + n;
ysr@777 358 if (next_index > _capacity) {
ysr@777 359 _overflow = true;
ysr@777 360 return;
ysr@777 361 }
ysr@777 362 // Otherwise.
ysr@777 363 _index = next_index;
ysr@777 364 for (int i = 0; i < n; i++) {
ysr@777 365 int ind = start + i;
tonyp@1458 366 assert(ind < _capacity, "By overflow test above.");
ysr@777 367 _base[ind] = ptr_arr[i];
ysr@777 368 }
johnc@4333 369 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 370 }
ysr@777 371
ysr@777 372 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
ysr@777 373 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 374 jint index = _index;
ysr@777 375 if (index == 0) {
ysr@777 376 *n = 0;
ysr@777 377 return false;
ysr@777 378 } else {
ysr@777 379 int k = MIN2(max, index);
johnc@4333 380 jint new_ind = index - k;
ysr@777 381 for (int j = 0; j < k; j++) {
ysr@777 382 ptr_arr[j] = _base[new_ind + j];
ysr@777 383 }
ysr@777 384 _index = new_ind;
ysr@777 385 *n = k;
ysr@777 386 return true;
ysr@777 387 }
ysr@777 388 }
ysr@777 389
ysr@777 390 template<class OopClosureClass>
ysr@777 391 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
ysr@777 392 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
ysr@777 393 || SafepointSynchronize::is_at_safepoint(),
ysr@777 394 "Drain recursion must be yield-safe.");
ysr@777 395 bool res = true;
ysr@777 396 debug_only(_drain_in_progress = true);
ysr@777 397 debug_only(_drain_in_progress_yields = yield_after);
ysr@777 398 while (!isEmpty()) {
ysr@777 399 oop newOop = pop();
ysr@777 400 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
ysr@777 401 assert(newOop->is_oop(), "Expected an oop");
ysr@777 402 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
ysr@777 403 "only grey objects on this stack");
ysr@777 404 newOop->oop_iterate(cl);
ysr@777 405 if (yield_after && _cm->do_yield_check()) {
tonyp@2973 406 res = false;
tonyp@2973 407 break;
ysr@777 408 }
ysr@777 409 }
ysr@777 410 debug_only(_drain_in_progress = false);
ysr@777 411 return res;
ysr@777 412 }
ysr@777 413
tonyp@3416 414 void CMMarkStack::note_start_of_gc() {
tonyp@3416 415 assert(_saved_index == -1,
tonyp@3416 416 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
tonyp@3416 417 _saved_index = _index;
tonyp@3416 418 }
tonyp@3416 419
tonyp@3416 420 void CMMarkStack::note_end_of_gc() {
tonyp@3416 421 // This is intentionally a guarantee, instead of an assert. If we
tonyp@3416 422 // accidentally add something to the mark stack during GC, it
tonyp@3416 423 // will be a correctness issue so it's better if we crash. we'll
tonyp@3416 424 // only check this once per GC anyway, so it won't be a performance
tonyp@3416 425 // issue in any way.
tonyp@3416 426 guarantee(_saved_index == _index,
tonyp@3416 427 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 428 _saved_index = -1;
tonyp@3416 429 }
tonyp@3416 430
ysr@777 431 void CMMarkStack::oops_do(OopClosure* f) {
tonyp@3416 432 assert(_saved_index == _index,
tonyp@3416 433 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 434 for (int i = 0; i < _index; i += 1) {
ysr@777 435 f->do_oop(&_base[i]);
ysr@777 436 }
ysr@777 437 }
ysr@777 438
tonyp@3464 439 CMRootRegions::CMRootRegions() :
tonyp@3464 440 _young_list(NULL), _cm(NULL), _scan_in_progress(false),
tonyp@3464 441 _should_abort(false), _next_survivor(NULL) { }
tonyp@3464 442
tonyp@3464 443 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
tonyp@3464 444 _young_list = g1h->young_list();
tonyp@3464 445 _cm = cm;
tonyp@3464 446 }
tonyp@3464 447
tonyp@3464 448 void CMRootRegions::prepare_for_scan() {
tonyp@3464 449 assert(!scan_in_progress(), "pre-condition");
tonyp@3464 450
tonyp@3464 451 // Currently, only survivors can be root regions.
tonyp@3464 452 assert(_next_survivor == NULL, "pre-condition");
tonyp@3464 453 _next_survivor = _young_list->first_survivor_region();
tonyp@3464 454 _scan_in_progress = (_next_survivor != NULL);
tonyp@3464 455 _should_abort = false;
tonyp@3464 456 }
tonyp@3464 457
tonyp@3464 458 HeapRegion* CMRootRegions::claim_next() {
tonyp@3464 459 if (_should_abort) {
tonyp@3464 460 // If someone has set the should_abort flag, we return NULL to
tonyp@3464 461 // force the caller to bail out of their loop.
tonyp@3464 462 return NULL;
tonyp@3464 463 }
tonyp@3464 464
tonyp@3464 465 // Currently, only survivors can be root regions.
tonyp@3464 466 HeapRegion* res = _next_survivor;
tonyp@3464 467 if (res != NULL) {
tonyp@3464 468 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 469 // Read it again in case it changed while we were waiting for the lock.
tonyp@3464 470 res = _next_survivor;
tonyp@3464 471 if (res != NULL) {
tonyp@3464 472 if (res == _young_list->last_survivor_region()) {
tonyp@3464 473 // We just claimed the last survivor so store NULL to indicate
tonyp@3464 474 // that we're done.
tonyp@3464 475 _next_survivor = NULL;
tonyp@3464 476 } else {
tonyp@3464 477 _next_survivor = res->get_next_young_region();
tonyp@3464 478 }
tonyp@3464 479 } else {
tonyp@3464 480 // Someone else claimed the last survivor while we were trying
tonyp@3464 481 // to take the lock so nothing else to do.
tonyp@3464 482 }
tonyp@3464 483 }
tonyp@3464 484 assert(res == NULL || res->is_survivor(), "post-condition");
tonyp@3464 485
tonyp@3464 486 return res;
tonyp@3464 487 }
tonyp@3464 488
tonyp@3464 489 void CMRootRegions::scan_finished() {
tonyp@3464 490 assert(scan_in_progress(), "pre-condition");
tonyp@3464 491
tonyp@3464 492 // Currently, only survivors can be root regions.
tonyp@3464 493 if (!_should_abort) {
tonyp@3464 494 assert(_next_survivor == NULL, "we should have claimed all survivors");
tonyp@3464 495 }
tonyp@3464 496 _next_survivor = NULL;
tonyp@3464 497
tonyp@3464 498 {
tonyp@3464 499 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 500 _scan_in_progress = false;
tonyp@3464 501 RootRegionScan_lock->notify_all();
tonyp@3464 502 }
tonyp@3464 503 }
tonyp@3464 504
tonyp@3464 505 bool CMRootRegions::wait_until_scan_finished() {
tonyp@3464 506 if (!scan_in_progress()) return false;
tonyp@3464 507
tonyp@3464 508 {
tonyp@3464 509 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
tonyp@3464 510 while (scan_in_progress()) {
tonyp@3464 511 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
tonyp@3464 512 }
tonyp@3464 513 }
tonyp@3464 514 return true;
tonyp@3464 515 }
tonyp@3464 516
ysr@777 517 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 518 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 519 #endif // _MSC_VER
ysr@777 520
jmasa@3357 521 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
jmasa@3357 522 return MAX2((n_par_threads + 2) / 4, 1U);
jmasa@3294 523 }
jmasa@3294 524
tschatzl@7051 525 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
johnc@4333 526 _g1h(g1h),
tschatzl@7051 527 _markBitMap1(),
tschatzl@7051 528 _markBitMap2(),
ysr@777 529 _parallel_marking_threads(0),
jmasa@3294 530 _max_parallel_marking_threads(0),
ysr@777 531 _sleep_factor(0.0),
ysr@777 532 _marking_task_overhead(1.0),
ysr@777 533 _cleanup_sleep_factor(0.0),
ysr@777 534 _cleanup_task_overhead(1.0),
tonyp@2472 535 _cleanup_list("Cleanup List"),
johnc@4333 536 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
tschatzl@7051 537 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
johnc@4333 538 CardTableModRefBS::card_shift,
johnc@4333 539 false /* in_resource_area*/),
johnc@3463 540
ysr@777 541 _prevMarkBitMap(&_markBitMap1),
ysr@777 542 _nextMarkBitMap(&_markBitMap2),
ysr@777 543
ysr@777 544 _markStack(this),
ysr@777 545 // _finger set in set_non_marking_state
ysr@777 546
johnc@4173 547 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
ysr@777 548 // _active_tasks set in set_non_marking_state
ysr@777 549 // _tasks set inside the constructor
johnc@4173 550 _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
johnc@4173 551 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
ysr@777 552
ysr@777 553 _has_overflown(false),
ysr@777 554 _concurrent(false),
tonyp@1054 555 _has_aborted(false),
brutisso@6904 556 _aborted_gc_id(GCId::undefined()),
tonyp@1054 557 _restart_for_overflow(false),
tonyp@1054 558 _concurrent_marking_in_progress(false),
ysr@777 559
ysr@777 560 // _verbose_level set below
ysr@777 561
ysr@777 562 _init_times(),
ysr@777 563 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
ysr@777 564 _cleanup_times(),
ysr@777 565 _total_counting_time(0.0),
ysr@777 566 _total_rs_scrub_time(0.0),
johnc@3463 567
johnc@3463 568 _parallel_workers(NULL),
johnc@3463 569
johnc@3463 570 _count_card_bitmaps(NULL),
johnc@4333 571 _count_marked_bytes(NULL),
johnc@4333 572 _completed_initialization(false) {
tonyp@2973 573 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
tonyp@2973 574 if (verbose_level < no_verbose) {
ysr@777 575 verbose_level = no_verbose;
tonyp@2973 576 }
tonyp@2973 577 if (verbose_level > high_verbose) {
ysr@777 578 verbose_level = high_verbose;
tonyp@2973 579 }
ysr@777 580 _verbose_level = verbose_level;
ysr@777 581
tonyp@2973 582 if (verbose_low()) {
ysr@777 583 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
drchase@6680 584 "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
tonyp@2973 585 }
ysr@777 586
tschatzl@7051 587 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
tschatzl@7051 588 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
ysr@777 589
ysr@777 590 // Create & start a ConcurrentMark thread.
ysr@1280 591 _cmThread = new ConcurrentMarkThread(this);
ysr@1280 592 assert(cmThread() != NULL, "CM Thread should have been created");
ysr@1280 593 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
ehelin@6168 594 if (_cmThread->osthread() == NULL) {
ehelin@6168 595 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
ehelin@6168 596 }
ysr@1280 597
ysr@777 598 assert(CGC_lock != NULL, "Where's the CGC_lock?");
tschatzl@7051 599 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
tschatzl@7051 600 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
ysr@777 601
ysr@777 602 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@1717 603 satb_qs.set_buffer_size(G1SATBBufferSize);
ysr@777 604
tonyp@3464 605 _root_regions.init(_g1h, this);
tonyp@3464 606
jmasa@1719 607 if (ConcGCThreads > ParallelGCThreads) {
drchase@6680 608 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
drchase@6680 609 "than ParallelGCThreads (" UINTX_FORMAT ").",
johnc@4333 610 ConcGCThreads, ParallelGCThreads);
johnc@4333 611 return;
ysr@777 612 }
ysr@777 613 if (ParallelGCThreads == 0) {
ysr@777 614 // if we are not running with any parallel GC threads we will not
ysr@777 615 // spawn any marking threads either
jmasa@3294 616 _parallel_marking_threads = 0;
jmasa@3294 617 _max_parallel_marking_threads = 0;
jmasa@3294 618 _sleep_factor = 0.0;
jmasa@3294 619 _marking_task_overhead = 1.0;
ysr@777 620 } else {
johnc@4547 621 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
johnc@4547 622 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
ysr@777 623 // if both are set
ysr@777 624 _sleep_factor = 0.0;
ysr@777 625 _marking_task_overhead = 1.0;
johnc@1186 626 } else if (G1MarkingOverheadPercent > 0) {
johnc@4547 627 // We will calculate the number of parallel marking threads based
johnc@4547 628 // on a target overhead with respect to the soft real-time goal
johnc@1186 629 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
ysr@777 630 double overall_cm_overhead =
johnc@1186 631 (double) MaxGCPauseMillis * marking_overhead /
johnc@1186 632 (double) GCPauseIntervalMillis;
ysr@777 633 double cpu_ratio = 1.0 / (double) os::processor_count();
ysr@777 634 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
ysr@777 635 double marking_task_overhead =
ysr@777 636 overall_cm_overhead / marking_thread_num *
ysr@777 637 (double) os::processor_count();
ysr@777 638 double sleep_factor =
ysr@777 639 (1.0 - marking_task_overhead) / marking_task_overhead;
ysr@777 640
johnc@4547 641 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
ysr@777 642 _sleep_factor = sleep_factor;
ysr@777 643 _marking_task_overhead = marking_task_overhead;
ysr@777 644 } else {
johnc@4547 645 // Calculate the number of parallel marking threads by scaling
johnc@4547 646 // the number of parallel GC threads.
johnc@4547 647 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
johnc@4547 648 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
ysr@777 649 _sleep_factor = 0.0;
ysr@777 650 _marking_task_overhead = 1.0;
ysr@777 651 }
ysr@777 652
johnc@4547 653 assert(ConcGCThreads > 0, "Should have been set");
johnc@4547 654 _parallel_marking_threads = (uint) ConcGCThreads;
johnc@4547 655 _max_parallel_marking_threads = _parallel_marking_threads;
johnc@4547 656
tonyp@2973 657 if (parallel_marking_threads() > 1) {
ysr@777 658 _cleanup_task_overhead = 1.0;
tonyp@2973 659 } else {
ysr@777 660 _cleanup_task_overhead = marking_task_overhead();
tonyp@2973 661 }
ysr@777 662 _cleanup_sleep_factor =
ysr@777 663 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
ysr@777 664
ysr@777 665 #if 0
ysr@777 666 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
ysr@777 667 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
ysr@777 668 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
ysr@777 669 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
ysr@777 670 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
ysr@777 671 #endif
ysr@777 672
tonyp@1458 673 guarantee(parallel_marking_threads() > 0, "peace of mind");
jmasa@2188 674 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
jmasa@3357 675 _max_parallel_marking_threads, false, true);
jmasa@2188 676 if (_parallel_workers == NULL) {
ysr@777 677 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 678 } else {
jmasa@2188 679 _parallel_workers->initialize_workers();
jmasa@2188 680 }
ysr@777 681 }
ysr@777 682
johnc@4333 683 if (FLAG_IS_DEFAULT(MarkStackSize)) {
johnc@4333 684 uintx mark_stack_size =
johnc@4333 685 MIN2(MarkStackSizeMax,
johnc@4333 686 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
johnc@4333 687 // Verify that the calculated value for MarkStackSize is in range.
johnc@4333 688 // It would be nice to use the private utility routine from Arguments.
johnc@4333 689 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
johnc@4333 690 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
johnc@4333 691 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
drchase@6680 692 mark_stack_size, (uintx) 1, MarkStackSizeMax);
johnc@4333 693 return;
johnc@4333 694 }
johnc@4333 695 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
johnc@4333 696 } else {
johnc@4333 697 // Verify MarkStackSize is in range.
johnc@4333 698 if (FLAG_IS_CMDLINE(MarkStackSize)) {
johnc@4333 699 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
johnc@4333 700 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
johnc@4333 701 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
johnc@4333 702 "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
drchase@6680 703 MarkStackSize, (uintx) 1, MarkStackSizeMax);
johnc@4333 704 return;
johnc@4333 705 }
johnc@4333 706 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
johnc@4333 707 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
johnc@4333 708 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
johnc@4333 709 " or for MarkStackSizeMax (" UINTX_FORMAT ")",
johnc@4333 710 MarkStackSize, MarkStackSizeMax);
johnc@4333 711 return;
johnc@4333 712 }
johnc@4333 713 }
johnc@4333 714 }
johnc@4333 715 }
johnc@4333 716
johnc@4333 717 if (!_markStack.allocate(MarkStackSize)) {
johnc@4333 718 warning("Failed to allocate CM marking stack");
johnc@4333 719 return;
johnc@4333 720 }
johnc@4333 721
johnc@4333 722 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
johnc@4333 723 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
johnc@4333 724
johnc@4333 725 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC);
johnc@4333 726 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
johnc@4333 727
johnc@4333 728 BitMap::idx_t card_bm_size = _card_bm.size();
johnc@4333 729
johnc@4333 730 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
johnc@4333 731 _active_tasks = _max_worker_id;
johnc@4333 732
johnc@4333 733 size_t max_regions = (size_t) _g1h->max_regions();
johnc@4333 734 for (uint i = 0; i < _max_worker_id; ++i) {
johnc@4333 735 CMTaskQueue* task_queue = new CMTaskQueue();
johnc@4333 736 task_queue->initialize();
johnc@4333 737 _task_queues->register_queue(i, task_queue);
johnc@4333 738
johnc@4333 739 _count_card_bitmaps[i] = BitMap(card_bm_size, false);
johnc@4333 740 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
johnc@4333 741
johnc@4333 742 _tasks[i] = new CMTask(i, this,
johnc@4333 743 _count_marked_bytes[i],
johnc@4333 744 &_count_card_bitmaps[i],
johnc@4333 745 task_queue, _task_queues);
johnc@4333 746
johnc@4333 747 _accum_task_vtime[i] = 0.0;
johnc@4333 748 }
johnc@4333 749
johnc@4333 750 // Calculate the card number for the bottom of the heap. Used
johnc@4333 751 // in biasing indexes into the accounting card bitmaps.
johnc@4333 752 _heap_bottom_card_num =
johnc@4333 753 intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
johnc@4333 754 CardTableModRefBS::card_shift);
johnc@4333 755
johnc@4333 756 // Clear all the liveness counting data
johnc@4333 757 clear_all_count_data();
johnc@4333 758
ysr@777 759 // so that the call below can read a sensible value
tschatzl@7051 760 _heap_start = g1h->reserved_region().start();
ysr@777 761 set_non_marking_state();
johnc@4333 762 _completed_initialization = true;
ysr@777 763 }
ysr@777 764
ysr@777 765 void ConcurrentMark::reset() {
ysr@777 766 // Starting values for these two. This should be called in a STW
tschatzl@7051 767 // phase.
tschatzl@7051 768 MemRegion reserved = _g1h->g1_reserved();
tschatzl@7051 769 _heap_start = reserved.start();
tschatzl@7051 770 _heap_end = reserved.end();
ysr@777 771
tonyp@1458 772 // Separated the asserts so that we know which one fires.
tonyp@1458 773 assert(_heap_start != NULL, "heap bounds should look ok");
tonyp@1458 774 assert(_heap_end != NULL, "heap bounds should look ok");
tonyp@1458 775 assert(_heap_start < _heap_end, "heap bounds should look ok");
ysr@777 776
johnc@4386 777 // Reset all the marking data structures and any necessary flags
johnc@4386 778 reset_marking_state();
ysr@777 779
tonyp@2973 780 if (verbose_low()) {
ysr@777 781 gclog_or_tty->print_cr("[global] resetting");
tonyp@2973 782 }
ysr@777 783
ysr@777 784 // We do reset all of them, since different phases will use
ysr@777 785 // different number of active threads. So, it's easiest to have all
ysr@777 786 // of them ready.
johnc@4173 787 for (uint i = 0; i < _max_worker_id; ++i) {
ysr@777 788 _tasks[i]->reset(_nextMarkBitMap);
johnc@2190 789 }
ysr@777 790
ysr@777 791 // we need this to make sure that the flag is on during the evac
ysr@777 792 // pause with initial mark piggy-backed
ysr@777 793 set_concurrent_marking_in_progress();
ysr@777 794 }
ysr@777 795
johnc@4386 796
johnc@4386 797 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
johnc@4386 798 _markStack.set_should_expand();
johnc@4386 799 _markStack.setEmpty(); // Also clears the _markStack overflow flag
johnc@4386 800 if (clear_overflow) {
johnc@4386 801 clear_has_overflown();
johnc@4386 802 } else {
johnc@4386 803 assert(has_overflown(), "pre-condition");
johnc@4386 804 }
johnc@4386 805 _finger = _heap_start;
johnc@4386 806
johnc@4386 807 for (uint i = 0; i < _max_worker_id; ++i) {
johnc@4386 808 CMTaskQueue* queue = _task_queues->queue(i);
johnc@4386 809 queue->set_empty();
johnc@4386 810 }
johnc@4386 811 }
johnc@4386 812
johnc@4788 813 void ConcurrentMark::set_concurrency(uint active_tasks) {
johnc@4173 814 assert(active_tasks <= _max_worker_id, "we should not have more");
ysr@777 815
ysr@777 816 _active_tasks = active_tasks;
ysr@777 817 // Need to update the three data structures below according to the
ysr@777 818 // number of active threads for this phase.
ysr@777 819 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
ysr@777 820 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 821 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
johnc@4788 822 }
johnc@4788 823
johnc@4788 824 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
johnc@4788 825 set_concurrency(active_tasks);
ysr@777 826
ysr@777 827 _concurrent = concurrent;
ysr@777 828 // We propagate this to all tasks, not just the active ones.
johnc@4173 829 for (uint i = 0; i < _max_worker_id; ++i)
ysr@777 830 _tasks[i]->set_concurrent(concurrent);
ysr@777 831
ysr@777 832 if (concurrent) {
ysr@777 833 set_concurrent_marking_in_progress();
ysr@777 834 } else {
ysr@777 835 // We currently assume that the concurrent flag has been set to
ysr@777 836 // false before we start remark. At this point we should also be
ysr@777 837 // in a STW phase.
tonyp@1458 838 assert(!concurrent_marking_in_progress(), "invariant");
pliden@6693 839 assert(out_of_regions(),
johnc@4788 840 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
drchase@6680 841 p2i(_finger), p2i(_heap_end)));
ysr@777 842 }
ysr@777 843 }
ysr@777 844
ysr@777 845 void ConcurrentMark::set_non_marking_state() {
ysr@777 846 // We set the global marking state to some default values when we're
ysr@777 847 // not doing marking.
johnc@4386 848 reset_marking_state();
ysr@777 849 _active_tasks = 0;
ysr@777 850 clear_concurrent_marking_in_progress();
ysr@777 851 }
ysr@777 852
ysr@777 853 ConcurrentMark::~ConcurrentMark() {
stefank@3364 854 // The ConcurrentMark instance is never freed.
stefank@3364 855 ShouldNotReachHere();
ysr@777 856 }
ysr@777 857
ysr@777 858 void ConcurrentMark::clearNextBitmap() {
tonyp@1794 859 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1794 860
tonyp@1794 861 // Make sure that the concurrent mark thread looks to still be in
tonyp@1794 862 // the current cycle.
tonyp@1794 863 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 864
tonyp@1794 865 // We are finishing up the current cycle by clearing the next
tonyp@1794 866 // marking bitmap and getting it ready for the next cycle. During
tonyp@1794 867 // this time no other cycle can start. So, let's make sure that this
tonyp@1794 868 // is the case.
tonyp@1794 869 guarantee(!g1h->mark_in_progress(), "invariant");
tonyp@1794 870
tschatzl@7051 871 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
tschatzl@7051 872 g1h->heap_region_iterate(&cl);
tschatzl@7051 873
tschatzl@7051 874 // Clear the liveness counting data. If the marking has been aborted, the abort()
tschatzl@7051 875 // call already did that.
tschatzl@7051 876 if (cl.complete()) {
tschatzl@7051 877 clear_all_count_data();
tonyp@1794 878 }
tonyp@1794 879
tonyp@1794 880 // Repeat the asserts from above.
tonyp@1794 881 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 882 guarantee(!g1h->mark_in_progress(), "invariant");
ysr@777 883 }
ysr@777 884
tschatzl@7051 885 class CheckBitmapClearHRClosure : public HeapRegionClosure {
tschatzl@7051 886 CMBitMap* _bitmap;
tschatzl@7051 887 bool _error;
tschatzl@7051 888 public:
tschatzl@7051 889 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
tschatzl@7051 890 }
tschatzl@7051 891
tschatzl@7051 892 virtual bool doHeapRegion(HeapRegion* r) {
tschatzl@7100 893 // This closure can be called concurrently to the mutator, so we must make sure
tschatzl@7100 894 // that the result of the getNextMarkedWordAddress() call is compared to the
tschatzl@7100 895 // value passed to it as limit to detect any found bits.
tschatzl@7100 896 // We can use the region's orig_end() for the limit and the comparison value
tschatzl@7100 897 // as it always contains the "real" end of the region that never changes and
tschatzl@7100 898 // has no side effects.
tschatzl@7100 899 // Due to the latter, there can also be no problem with the compiler generating
tschatzl@7100 900 // reloads of the orig_end() call.
tschatzl@7100 901 HeapWord* end = r->orig_end();
tschatzl@7100 902 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
tschatzl@7051 903 }
tschatzl@7051 904 };
tschatzl@7051 905
tschatzl@7016 906 bool ConcurrentMark::nextMarkBitmapIsClear() {
tschatzl@7051 907 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
tschatzl@7051 908 _g1h->heap_region_iterate(&cl);
tschatzl@7051 909 return cl.complete();
tschatzl@7016 910 }
tschatzl@7016 911
ysr@777 912 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
ysr@777 913 public:
ysr@777 914 bool doHeapRegion(HeapRegion* r) {
ysr@777 915 if (!r->continuesHumongous()) {
tonyp@3416 916 r->note_start_of_marking();
ysr@777 917 }
ysr@777 918 return false;
ysr@777 919 }
ysr@777 920 };
ysr@777 921
ysr@777 922 void ConcurrentMark::checkpointRootsInitialPre() {
ysr@777 923 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 924 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 925
ysr@777 926 _has_aborted = false;
ysr@777 927
jcoomes@1902 928 #ifndef PRODUCT
tonyp@1479 929 if (G1PrintReachableAtInitialMark) {
tonyp@1823 930 print_reachable("at-cycle-start",
johnc@2969 931 VerifyOption_G1UsePrevMarking, true /* all */);
tonyp@1479 932 }
jcoomes@1902 933 #endif
ysr@777 934
ysr@777 935 // Initialise marking structures. This has to be done in a STW phase.
ysr@777 936 reset();
tonyp@3416 937
tonyp@3416 938 // For each region note start of marking.
tonyp@3416 939 NoteStartOfMarkHRClosure startcl;
tonyp@3416 940 g1h->heap_region_iterate(&startcl);
ysr@777 941 }
ysr@777 942
ysr@777 943
ysr@777 944 void ConcurrentMark::checkpointRootsInitialPost() {
ysr@777 945 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 946
tonyp@2848 947 // If we force an overflow during remark, the remark operation will
tonyp@2848 948 // actually abort and we'll restart concurrent marking. If we always
tonyp@2848 949 // force an oveflow during remark we'll never actually complete the
tonyp@2848 950 // marking phase. So, we initilize this here, at the start of the
tonyp@2848 951 // cycle, so that at the remaining overflow number will decrease at
tonyp@2848 952 // every remark and we'll eventually not need to cause one.
tonyp@2848 953 force_overflow_stw()->init();
tonyp@2848 954
johnc@3175 955 // Start Concurrent Marking weak-reference discovery.
johnc@3175 956 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3175 957 // enable ("weak") refs discovery
johnc@3175 958 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ysr@892 959 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
ysr@777 960
ysr@777 961 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1752 962 // This is the start of the marking cycle, we're expected all
tonyp@1752 963 // threads to have SATB queues with active set to false.
tonyp@1752 964 satb_mq_set.set_active_all_threads(true, /* new active value */
tonyp@1752 965 false /* expected_active */);
ysr@777 966
tonyp@3464 967 _root_regions.prepare_for_scan();
tonyp@3464 968
ysr@777 969 // update_g1_committed() will be called at the end of an evac pause
ysr@777 970 // when marking is on. So, it's also called at the end of the
ysr@777 971 // initial-mark pause to update the heap end, if the heap expands
ysr@777 972 // during it. No need to call it here.
ysr@777 973 }
ysr@777 974
ysr@777 975 /*
tonyp@2848 976 * Notice that in the next two methods, we actually leave the STS
tonyp@2848 977 * during the barrier sync and join it immediately afterwards. If we
tonyp@2848 978 * do not do this, the following deadlock can occur: one thread could
tonyp@2848 979 * be in the barrier sync code, waiting for the other thread to also
tonyp@2848 980 * sync up, whereas another one could be trying to yield, while also
tonyp@2848 981 * waiting for the other threads to sync up too.
tonyp@2848 982 *
tonyp@2848 983 * Note, however, that this code is also used during remark and in
tonyp@2848 984 * this case we should not attempt to leave / enter the STS, otherwise
tonyp@2848 985 * we'll either hit an asseert (debug / fastdebug) or deadlock
tonyp@2848 986 * (product). So we should only leave / enter the STS if we are
tonyp@2848 987 * operating concurrently.
tonyp@2848 988 *
tonyp@2848 989 * Because the thread that does the sync barrier has left the STS, it
tonyp@2848 990 * is possible to be suspended for a Full GC or an evacuation pause
tonyp@2848 991 * could occur. This is actually safe, since the entering the sync
tonyp@2848 992 * barrier is one of the last things do_marking_step() does, and it
tonyp@2848 993 * doesn't manipulate any data structures afterwards.
tonyp@2848 994 */
ysr@777 995
johnc@4173 996 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
tonyp@2973 997 if (verbose_low()) {
johnc@4173 998 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
tonyp@2973 999 }
ysr@777 1000
tonyp@2848 1001 if (concurrent()) {
pliden@6906 1002 SuspendibleThreadSet::leave();
tonyp@2848 1003 }
pliden@6692 1004
pliden@6692 1005 bool barrier_aborted = !_first_overflow_barrier_sync.enter();
pliden@6692 1006
tonyp@2848 1007 if (concurrent()) {
pliden@6906 1008 SuspendibleThreadSet::join();
tonyp@2848 1009 }
ysr@777 1010 // at this point everyone should have synced up and not be doing any
ysr@777 1011 // more work
ysr@777 1012
tonyp@2973 1013 if (verbose_low()) {
pliden@6692 1014 if (barrier_aborted) {
pliden@6692 1015 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
pliden@6692 1016 } else {
pliden@6692 1017 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
pliden@6692 1018 }
pliden@6692 1019 }
pliden@6692 1020
pliden@6692 1021 if (barrier_aborted) {
pliden@6692 1022 // If the barrier aborted we ignore the overflow condition and
pliden@6692 1023 // just abort the whole marking phase as quickly as possible.
pliden@6692 1024 return;
tonyp@2973 1025 }
ysr@777 1026
johnc@4788 1027 // If we're executing the concurrent phase of marking, reset the marking
johnc@4788 1028 // state; otherwise the marking state is reset after reference processing,
johnc@4788 1029 // during the remark pause.
johnc@4788 1030 // If we reset here as a result of an overflow during the remark we will
johnc@4788 1031 // see assertion failures from any subsequent set_concurrency_and_phase()
johnc@4788 1032 // calls.
johnc@4788 1033 if (concurrent()) {
johnc@4788 1034 // let the task associated with with worker 0 do this
johnc@4788 1035 if (worker_id == 0) {
johnc@4788 1036 // task 0 is responsible for clearing the global data structures
johnc@4788 1037 // We should be here because of an overflow. During STW we should
johnc@4788 1038 // not clear the overflow flag since we rely on it being true when
johnc@4788 1039 // we exit this method to abort the pause and restart concurent
johnc@4788 1040 // marking.
johnc@4788 1041 reset_marking_state(true /* clear_overflow */);
johnc@4788 1042 force_overflow()->update();
johnc@4788 1043
johnc@4788 1044 if (G1Log::fine()) {
brutisso@6904 1045 gclog_or_tty->gclog_stamp(concurrent_gc_id());
johnc@4788 1046 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
johnc@4788 1047 }
ysr@777 1048 }
ysr@777 1049 }
ysr@777 1050
ysr@777 1051 // after this, each task should reset its own data structures then
ysr@777 1052 // then go into the second barrier
ysr@777 1053 }
ysr@777 1054
johnc@4173 1055 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
tonyp@2973 1056 if (verbose_low()) {
johnc@4173 1057 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
tonyp@2973 1058 }
ysr@777 1059
tonyp@2848 1060 if (concurrent()) {
pliden@6906 1061 SuspendibleThreadSet::leave();
tonyp@2848 1062 }
pliden@6692 1063
pliden@6692 1064 bool barrier_aborted = !_second_overflow_barrier_sync.enter();
pliden@6692 1065
tonyp@2848 1066 if (concurrent()) {
pliden@6906 1067 SuspendibleThreadSet::join();
tonyp@2848 1068 }
johnc@4788 1069 // at this point everything should be re-initialized and ready to go
ysr@777 1070
tonyp@2973 1071 if (verbose_low()) {
pliden@6692 1072 if (barrier_aborted) {
pliden@6692 1073 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
pliden@6692 1074 } else {
pliden@6692 1075 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
pliden@6692 1076 }
tonyp@2973 1077 }
ysr@777 1078 }
ysr@777 1079
tonyp@2848 1080 #ifndef PRODUCT
tonyp@2848 1081 void ForceOverflowSettings::init() {
tonyp@2848 1082 _num_remaining = G1ConcMarkForceOverflow;
tonyp@2848 1083 _force = false;
tonyp@2848 1084 update();
tonyp@2848 1085 }
tonyp@2848 1086
tonyp@2848 1087 void ForceOverflowSettings::update() {
tonyp@2848 1088 if (_num_remaining > 0) {
tonyp@2848 1089 _num_remaining -= 1;
tonyp@2848 1090 _force = true;
tonyp@2848 1091 } else {
tonyp@2848 1092 _force = false;
tonyp@2848 1093 }
tonyp@2848 1094 }
tonyp@2848 1095
tonyp@2848 1096 bool ForceOverflowSettings::should_force() {
tonyp@2848 1097 if (_force) {
tonyp@2848 1098 _force = false;
tonyp@2848 1099 return true;
tonyp@2848 1100 } else {
tonyp@2848 1101 return false;
tonyp@2848 1102 }
tonyp@2848 1103 }
tonyp@2848 1104 #endif // !PRODUCT
tonyp@2848 1105
ysr@777 1106 class CMConcurrentMarkingTask: public AbstractGangTask {
ysr@777 1107 private:
ysr@777 1108 ConcurrentMark* _cm;
ysr@777 1109 ConcurrentMarkThread* _cmt;
ysr@777 1110
ysr@777 1111 public:
jmasa@3357 1112 void work(uint worker_id) {
tonyp@1458 1113 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@1458 1114 "this should only be done by a conc GC thread");
johnc@2316 1115 ResourceMark rm;
ysr@777 1116
ysr@777 1117 double start_vtime = os::elapsedVTime();
ysr@777 1118
pliden@6906 1119 SuspendibleThreadSet::join();
ysr@777 1120
jmasa@3357 1121 assert(worker_id < _cm->active_tasks(), "invariant");
jmasa@3357 1122 CMTask* the_task = _cm->task(worker_id);
ysr@777 1123 the_task->record_start_time();
ysr@777 1124 if (!_cm->has_aborted()) {
ysr@777 1125 do {
ysr@777 1126 double start_vtime_sec = os::elapsedVTime();
johnc@2494 1127 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 1128
johnc@2494 1129 the_task->do_marking_step(mark_step_duration_ms,
johnc@4787 1130 true /* do_termination */,
johnc@4787 1131 false /* is_serial*/);
johnc@2494 1132
ysr@777 1133 double end_vtime_sec = os::elapsedVTime();
ysr@777 1134 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
ysr@777 1135 _cm->clear_has_overflown();
ysr@777 1136
tschatzl@7094 1137 _cm->do_yield_check(worker_id);
ysr@777 1138
ysr@777 1139 jlong sleep_time_ms;
ysr@777 1140 if (!_cm->has_aborted() && the_task->has_aborted()) {
ysr@777 1141 sleep_time_ms =
ysr@777 1142 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
pliden@6906 1143 SuspendibleThreadSet::leave();
ysr@777 1144 os::sleep(Thread::current(), sleep_time_ms, false);
pliden@6906 1145 SuspendibleThreadSet::join();
ysr@777 1146 }
ysr@777 1147 } while (!_cm->has_aborted() && the_task->has_aborted());
ysr@777 1148 }
ysr@777 1149 the_task->record_end_time();
tonyp@1458 1150 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
ysr@777 1151
pliden@6906 1152 SuspendibleThreadSet::leave();
ysr@777 1153
ysr@777 1154 double end_vtime = os::elapsedVTime();
jmasa@3357 1155 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
ysr@777 1156 }
ysr@777 1157
ysr@777 1158 CMConcurrentMarkingTask(ConcurrentMark* cm,
ysr@777 1159 ConcurrentMarkThread* cmt) :
ysr@777 1160 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
ysr@777 1161
ysr@777 1162 ~CMConcurrentMarkingTask() { }
ysr@777 1163 };
ysr@777 1164
jmasa@3294 1165 // Calculates the number of active workers for a concurrent
jmasa@3294 1166 // phase.
jmasa@3357 1167 uint ConcurrentMark::calc_parallel_marking_threads() {
johnc@3338 1168 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1169 uint n_conc_workers = 0;
jmasa@3294 1170 if (!UseDynamicNumberOfGCThreads ||
jmasa@3294 1171 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
jmasa@3294 1172 !ForceDynamicNumberOfGCThreads)) {
jmasa@3294 1173 n_conc_workers = max_parallel_marking_threads();
jmasa@3294 1174 } else {
jmasa@3294 1175 n_conc_workers =
jmasa@3294 1176 AdaptiveSizePolicy::calc_default_active_workers(
jmasa@3294 1177 max_parallel_marking_threads(),
jmasa@3294 1178 1, /* Minimum workers */
jmasa@3294 1179 parallel_marking_threads(),
jmasa@3294 1180 Threads::number_of_non_daemon_threads());
jmasa@3294 1181 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
jmasa@3294 1182 // that scaling has already gone into "_max_parallel_marking_threads".
jmasa@3294 1183 }
johnc@3338 1184 assert(n_conc_workers > 0, "Always need at least 1");
johnc@3338 1185 return n_conc_workers;
jmasa@3294 1186 }
johnc@3338 1187 // If we are not running with any parallel GC threads we will not
johnc@3338 1188 // have spawned any marking threads either. Hence the number of
johnc@3338 1189 // concurrent workers should be 0.
johnc@3338 1190 return 0;
jmasa@3294 1191 }
jmasa@3294 1192
tonyp@3464 1193 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
tonyp@3464 1194 // Currently, only survivors can be root regions.
tonyp@3464 1195 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
tonyp@3464 1196 G1RootRegionScanClosure cl(_g1h, this, worker_id);
tonyp@3464 1197
tonyp@3464 1198 const uintx interval = PrefetchScanIntervalInBytes;
tonyp@3464 1199 HeapWord* curr = hr->bottom();
tonyp@3464 1200 const HeapWord* end = hr->top();
tonyp@3464 1201 while (curr < end) {
tonyp@3464 1202 Prefetch::read(curr, interval);
tonyp@3464 1203 oop obj = oop(curr);
tonyp@3464 1204 int size = obj->oop_iterate(&cl);
tonyp@3464 1205 assert(size == obj->size(), "sanity");
tonyp@3464 1206 curr += size;
tonyp@3464 1207 }
tonyp@3464 1208 }
tonyp@3464 1209
tonyp@3464 1210 class CMRootRegionScanTask : public AbstractGangTask {
tonyp@3464 1211 private:
tonyp@3464 1212 ConcurrentMark* _cm;
tonyp@3464 1213
tonyp@3464 1214 public:
tonyp@3464 1215 CMRootRegionScanTask(ConcurrentMark* cm) :
tonyp@3464 1216 AbstractGangTask("Root Region Scan"), _cm(cm) { }
tonyp@3464 1217
tonyp@3464 1218 void work(uint worker_id) {
tonyp@3464 1219 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@3464 1220 "this should only be done by a conc GC thread");
tonyp@3464 1221
tonyp@3464 1222 CMRootRegions* root_regions = _cm->root_regions();
tonyp@3464 1223 HeapRegion* hr = root_regions->claim_next();
tonyp@3464 1224 while (hr != NULL) {
tonyp@3464 1225 _cm->scanRootRegion(hr, worker_id);
tonyp@3464 1226 hr = root_regions->claim_next();
tonyp@3464 1227 }
tonyp@3464 1228 }
tonyp@3464 1229 };
tonyp@3464 1230
tonyp@3464 1231 void ConcurrentMark::scanRootRegions() {
stefank@6992 1232 // Start of concurrent marking.
stefank@6992 1233 ClassLoaderDataGraph::clear_claimed_marks();
stefank@6992 1234
tonyp@3464 1235 // scan_in_progress() will have been set to true only if there was
tonyp@3464 1236 // at least one root region to scan. So, if it's false, we
tonyp@3464 1237 // should not attempt to do any further work.
tonyp@3464 1238 if (root_regions()->scan_in_progress()) {
tonyp@3464 1239 _parallel_marking_threads = calc_parallel_marking_threads();
tonyp@3464 1240 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
tonyp@3464 1241 "Maximum number of marking threads exceeded");
tonyp@3464 1242 uint active_workers = MAX2(1U, parallel_marking_threads());
tonyp@3464 1243
tonyp@3464 1244 CMRootRegionScanTask task(this);
johnc@4549 1245 if (use_parallel_marking_threads()) {
tonyp@3464 1246 _parallel_workers->set_active_workers((int) active_workers);
tonyp@3464 1247 _parallel_workers->run_task(&task);
tonyp@3464 1248 } else {
tonyp@3464 1249 task.work(0);
tonyp@3464 1250 }
tonyp@3464 1251
tonyp@3464 1252 // It's possible that has_aborted() is true here without actually
tonyp@3464 1253 // aborting the survivor scan earlier. This is OK as it's
tonyp@3464 1254 // mainly used for sanity checking.
tonyp@3464 1255 root_regions()->scan_finished();
tonyp@3464 1256 }
tonyp@3464 1257 }
tonyp@3464 1258
ysr@777 1259 void ConcurrentMark::markFromRoots() {
ysr@777 1260 // we might be tempted to assert that:
ysr@777 1261 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
ysr@777 1262 // "inconsistent argument?");
ysr@777 1263 // However that wouldn't be right, because it's possible that
ysr@777 1264 // a safepoint is indeed in progress as a younger generation
ysr@777 1265 // stop-the-world GC happens even as we mark in this generation.
ysr@777 1266
ysr@777 1267 _restart_for_overflow = false;
tonyp@2848 1268 force_overflow_conc()->init();
jmasa@3294 1269
jmasa@3294 1270 // _g1h has _n_par_threads
jmasa@3294 1271 _parallel_marking_threads = calc_parallel_marking_threads();
jmasa@3294 1272 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
jmasa@3294 1273 "Maximum number of marking threads exceeded");
johnc@3338 1274
jmasa@3357 1275 uint active_workers = MAX2(1U, parallel_marking_threads());
johnc@3338 1276
johnc@4788 1277 // Parallel task terminator is set in "set_concurrency_and_phase()"
johnc@4788 1278 set_concurrency_and_phase(active_workers, true /* concurrent */);
ysr@777 1279
ysr@777 1280 CMConcurrentMarkingTask markingTask(this, cmThread());
johnc@4549 1281 if (use_parallel_marking_threads()) {
johnc@3338 1282 _parallel_workers->set_active_workers((int)active_workers);
stefank@6992 1283 // Don't set _n_par_threads because it affects MT in process_roots()
johnc@3338 1284 // and the decisions on that MT processing is made elsewhere.
johnc@3338 1285 assert(_parallel_workers->active_workers() > 0, "Should have been set");
ysr@777 1286 _parallel_workers->run_task(&markingTask);
tonyp@2973 1287 } else {
ysr@777 1288 markingTask.work(0);
tonyp@2973 1289 }
ysr@777 1290 print_stats();
ysr@777 1291 }
ysr@777 1292
ysr@777 1293 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
ysr@777 1294 // world is stopped at this checkpoint
ysr@777 1295 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1296 "world should be stopped");
johnc@3175 1297
ysr@777 1298 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1299
ysr@777 1300 // If a full collection has happened, we shouldn't do this.
ysr@777 1301 if (has_aborted()) {
ysr@777 1302 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1303 return;
ysr@777 1304 }
ysr@777 1305
kamg@2445 1306 SvcGCMarker sgcm(SvcGCMarker::OTHER);
kamg@2445 1307
ysr@1280 1308 if (VerifyDuringGC) {
ysr@1280 1309 HandleMark hm; // handle scope
ysr@1280 1310 Universe::heap()->prepare_for_verify();
stefank@5018 1311 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 1312 " VerifyDuringGC:(before)");
ysr@1280 1313 }
brutisso@7005 1314 g1h->check_bitmaps("Remark Start");
ysr@1280 1315
ysr@777 1316 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 1317 g1p->record_concurrent_mark_remark_start();
ysr@777 1318
ysr@777 1319 double start = os::elapsedTime();
ysr@777 1320
ysr@777 1321 checkpointRootsFinalWork();
ysr@777 1322
ysr@777 1323 double mark_work_end = os::elapsedTime();
ysr@777 1324
ysr@777 1325 weakRefsWork(clear_all_soft_refs);
ysr@777 1326
ysr@777 1327 if (has_overflown()) {
ysr@777 1328 // Oops. We overflowed. Restart concurrent marking.
ysr@777 1329 _restart_for_overflow = true;
johnc@4789 1330 if (G1TraceMarkStackOverflow) {
johnc@4789 1331 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
johnc@4789 1332 }
johnc@4789 1333
johnc@4789 1334 // Verify the heap w.r.t. the previous marking bitmap.
johnc@4789 1335 if (VerifyDuringGC) {
johnc@4789 1336 HandleMark hm; // handle scope
johnc@4789 1337 Universe::heap()->prepare_for_verify();
stefank@5018 1338 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 1339 " VerifyDuringGC:(overflow)");
johnc@4789 1340 }
johnc@4789 1341
johnc@4386 1342 // Clear the marking state because we will be restarting
johnc@4386 1343 // marking due to overflowing the global mark stack.
johnc@4386 1344 reset_marking_state();
ysr@777 1345 } else {
johnc@3463 1346 // Aggregate the per-task counting data that we have accumulated
johnc@3463 1347 // while marking.
johnc@3463 1348 aggregate_count_data();
johnc@3463 1349
tonyp@2469 1350 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 1351 // We're done with marking.
tonyp@1752 1352 // This is the end of the marking cycle, we're expected all
tonyp@1752 1353 // threads to have SATB queues with active set to true.
tonyp@2469 1354 satb_mq_set.set_active_all_threads(false, /* new active value */
tonyp@2469 1355 true /* expected_active */);
tonyp@1246 1356
tonyp@1246 1357 if (VerifyDuringGC) {
ysr@1280 1358 HandleMark hm; // handle scope
ysr@1280 1359 Universe::heap()->prepare_for_verify();
stefank@5018 1360 Universe::verify(VerifyOption_G1UseNextMarking,
stefank@5018 1361 " VerifyDuringGC:(after)");
tonyp@1246 1362 }
brutisso@7005 1363 g1h->check_bitmaps("Remark End");
johnc@2494 1364 assert(!restart_for_overflow(), "sanity");
johnc@4386 1365 // Completely reset the marking state since marking completed
johnc@4386 1366 set_non_marking_state();
johnc@2494 1367 }
johnc@2494 1368
johnc@4333 1369 // Expand the marking stack, if we have to and if we can.
johnc@4333 1370 if (_markStack.should_expand()) {
johnc@4333 1371 _markStack.expand();
johnc@4333 1372 }
johnc@4333 1373
ysr@777 1374 // Statistics
ysr@777 1375 double now = os::elapsedTime();
ysr@777 1376 _remark_mark_times.add((mark_work_end - start) * 1000.0);
ysr@777 1377 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
ysr@777 1378 _remark_times.add((now - start) * 1000.0);
ysr@777 1379
ysr@777 1380 g1p->record_concurrent_mark_remark_end();
sla@5237 1381
sla@5237 1382 G1CMIsAliveClosure is_alive(g1h);
sla@5237 1383 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
ysr@777 1384 }
ysr@777 1385
johnc@3731 1386 // Base class of the closures that finalize and verify the
johnc@3731 1387 // liveness counting data.
johnc@3731 1388 class CMCountDataClosureBase: public HeapRegionClosure {
johnc@3731 1389 protected:
johnc@4123 1390 G1CollectedHeap* _g1h;
ysr@777 1391 ConcurrentMark* _cm;
johnc@4123 1392 CardTableModRefBS* _ct_bs;
johnc@4123 1393
johnc@3463 1394 BitMap* _region_bm;
johnc@3463 1395 BitMap* _card_bm;
johnc@3463 1396
johnc@4123 1397 // Takes a region that's not empty (i.e., it has at least one
tonyp@1264 1398 // live object in it and sets its corresponding bit on the region
tonyp@1264 1399 // bitmap to 1. If the region is "starts humongous" it will also set
tonyp@1264 1400 // to 1 the bits on the region bitmap that correspond to its
tonyp@1264 1401 // associated "continues humongous" regions.
tonyp@1264 1402 void set_bit_for_region(HeapRegion* hr) {
tonyp@1264 1403 assert(!hr->continuesHumongous(), "should have filtered those out");
tonyp@1264 1404
tschatzl@7091 1405 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
tonyp@1264 1406 if (!hr->startsHumongous()) {
tonyp@1264 1407 // Normal (non-humongous) case: just set the bit.
tonyp@3713 1408 _region_bm->par_at_put(index, true);
tonyp@1264 1409 } else {
tonyp@1264 1410 // Starts humongous case: calculate how many regions are part of
johnc@3463 1411 // this humongous region and then set the bit range.
tonyp@3957 1412 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
tonyp@3713 1413 _region_bm->par_at_put_range(index, end_index, true);
tonyp@1264 1414 }
tonyp@1264 1415 }
tonyp@1264 1416
johnc@3731 1417 public:
johnc@4123 1418 CMCountDataClosureBase(G1CollectedHeap* g1h,
johnc@3731 1419 BitMap* region_bm, BitMap* card_bm):
johnc@4123 1420 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 1421 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
johnc@4123 1422 _region_bm(region_bm), _card_bm(card_bm) { }
johnc@3731 1423 };
johnc@3731 1424
johnc@3731 1425 // Closure that calculates the # live objects per region. Used
johnc@3731 1426 // for verification purposes during the cleanup pause.
johnc@3731 1427 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
johnc@3731 1428 CMBitMapRO* _bm;
johnc@3731 1429 size_t _region_marked_bytes;
johnc@3731 1430
johnc@3731 1431 public:
johnc@4123 1432 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
johnc@3731 1433 BitMap* region_bm, BitMap* card_bm) :
johnc@4123 1434 CMCountDataClosureBase(g1h, region_bm, card_bm),
johnc@3731 1435 _bm(bm), _region_marked_bytes(0) { }
johnc@3731 1436
ysr@777 1437 bool doHeapRegion(HeapRegion* hr) {
ysr@777 1438
iveresov@1074 1439 if (hr->continuesHumongous()) {
tonyp@1264 1440 // We will ignore these here and process them when their
tonyp@1264 1441 // associated "starts humongous" region is processed (see
tonyp@1264 1442 // set_bit_for_heap_region()). Note that we cannot rely on their
tonyp@1264 1443 // associated "starts humongous" region to have their bit set to
tonyp@1264 1444 // 1 since, due to the region chunking in the parallel region
tonyp@1264 1445 // iteration, a "continues humongous" region might be visited
tonyp@1264 1446 // before its associated "starts humongous".
iveresov@1074 1447 return false;
iveresov@1074 1448 }
ysr@777 1449
johnc@4123 1450 HeapWord* ntams = hr->next_top_at_mark_start();
johnc@4123 1451 HeapWord* start = hr->bottom();
johnc@4123 1452
johnc@4123 1453 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
johnc@3463 1454 err_msg("Preconditions not met - "
johnc@4123 1455 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
drchase@6680 1456 p2i(start), p2i(ntams), p2i(hr->end())));
johnc@3463 1457
ysr@777 1458 // Find the first marked object at or after "start".
johnc@4123 1459 start = _bm->getNextMarkedWordAddress(start, ntams);
johnc@3463 1460
ysr@777 1461 size_t marked_bytes = 0;
ysr@777 1462
johnc@4123 1463 while (start < ntams) {
ysr@777 1464 oop obj = oop(start);
ysr@777 1465 int obj_sz = obj->size();
johnc@4123 1466 HeapWord* obj_end = start + obj_sz;
johnc@3731 1467
johnc@3731 1468 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
johnc@4123 1469 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
johnc@4123 1470
johnc@4123 1471 // Note: if we're looking at the last region in heap - obj_end
johnc@4123 1472 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1473 // will then correspond to a (non-existent) card that is also
johnc@4123 1474 // just beyond the heap.
johnc@4123 1475 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
johnc@4123 1476 // end of object is not card aligned - increment to cover
johnc@4123 1477 // all the cards spanned by the object
johnc@4123 1478 end_idx += 1;
johnc@4123 1479 }
johnc@4123 1480
johnc@4123 1481 // Set the bits in the card BM for the cards spanned by this object.
johnc@4123 1482 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
johnc@3731 1483
johnc@3731 1484 // Add the size of this object to the number of marked bytes.
apetrusenko@1465 1485 marked_bytes += (size_t)obj_sz * HeapWordSize;
johnc@3463 1486
ysr@777 1487 // Find the next marked object after this one.
johnc@4123 1488 start = _bm->getNextMarkedWordAddress(obj_end, ntams);
tonyp@2973 1489 }
johnc@3463 1490
johnc@3463 1491 // Mark the allocated-since-marking portion...
johnc@3463 1492 HeapWord* top = hr->top();
johnc@4123 1493 if (ntams < top) {
johnc@4123 1494 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
johnc@4123 1495 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
johnc@4123 1496
johnc@4123 1497 // Note: if we're looking at the last region in heap - top
johnc@4123 1498 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1499 // will then correspond to a (non-existent) card that is also
johnc@4123 1500 // just beyond the heap.
johnc@4123 1501 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
johnc@4123 1502 // end of object is not card aligned - increment to cover
johnc@4123 1503 // all the cards spanned by the object
johnc@4123 1504 end_idx += 1;
johnc@4123 1505 }
johnc@4123 1506 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
johnc@3463 1507
johnc@3463 1508 // This definitely means the region has live objects.
johnc@3463 1509 set_bit_for_region(hr);
ysr@777 1510 }
ysr@777 1511
ysr@777 1512 // Update the live region bitmap.
ysr@777 1513 if (marked_bytes > 0) {
tonyp@1264 1514 set_bit_for_region(hr);
ysr@777 1515 }
johnc@3463 1516
johnc@3463 1517 // Set the marked bytes for the current region so that
johnc@3463 1518 // it can be queried by a calling verificiation routine
johnc@3463 1519 _region_marked_bytes = marked_bytes;
johnc@3463 1520
johnc@3463 1521 return false;
johnc@3463 1522 }
johnc@3463 1523
johnc@3463 1524 size_t region_marked_bytes() const { return _region_marked_bytes; }
johnc@3463 1525 };
johnc@3463 1526
johnc@3463 1527 // Heap region closure used for verifying the counting data
johnc@3463 1528 // that was accumulated concurrently and aggregated during
johnc@3463 1529 // the remark pause. This closure is applied to the heap
johnc@3463 1530 // regions during the STW cleanup pause.
johnc@3463 1531
johnc@3463 1532 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
johnc@4123 1533 G1CollectedHeap* _g1h;
johnc@3463 1534 ConcurrentMark* _cm;
johnc@3463 1535 CalcLiveObjectsClosure _calc_cl;
johnc@3463 1536 BitMap* _region_bm; // Region BM to be verified
johnc@3463 1537 BitMap* _card_bm; // Card BM to be verified
johnc@3463 1538 bool _verbose; // verbose output?
johnc@3463 1539
johnc@3463 1540 BitMap* _exp_region_bm; // Expected Region BM values
johnc@3463 1541 BitMap* _exp_card_bm; // Expected card BM values
johnc@3463 1542
johnc@3463 1543 int _failures;
johnc@3463 1544
johnc@3463 1545 public:
johnc@4123 1546 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
johnc@3463 1547 BitMap* region_bm,
johnc@3463 1548 BitMap* card_bm,
johnc@3463 1549 BitMap* exp_region_bm,
johnc@3463 1550 BitMap* exp_card_bm,
johnc@3463 1551 bool verbose) :
johnc@4123 1552 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 1553 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
johnc@3463 1554 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
johnc@3463 1555 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
johnc@3463 1556 _failures(0) { }
johnc@3463 1557
johnc@3463 1558 int failures() const { return _failures; }
johnc@3463 1559
johnc@3463 1560 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 1561 if (hr->continuesHumongous()) {
johnc@3463 1562 // We will ignore these here and process them when their
johnc@3463 1563 // associated "starts humongous" region is processed (see
johnc@3463 1564 // set_bit_for_heap_region()). Note that we cannot rely on their
johnc@3463 1565 // associated "starts humongous" region to have their bit set to
johnc@3463 1566 // 1 since, due to the region chunking in the parallel region
johnc@3463 1567 // iteration, a "continues humongous" region might be visited
johnc@3463 1568 // before its associated "starts humongous".
johnc@3463 1569 return false;
johnc@3463 1570 }
johnc@3463 1571
johnc@3463 1572 int failures = 0;
johnc@3463 1573
johnc@3463 1574 // Call the CalcLiveObjectsClosure to walk the marking bitmap for
johnc@3463 1575 // this region and set the corresponding bits in the expected region
johnc@3463 1576 // and card bitmaps.
johnc@3463 1577 bool res = _calc_cl.doHeapRegion(hr);
johnc@3463 1578 assert(res == false, "should be continuing");
johnc@3463 1579
johnc@3463 1580 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
johnc@3463 1581 Mutex::_no_safepoint_check_flag);
johnc@3463 1582
johnc@3463 1583 // Verify the marked bytes for this region.
johnc@3463 1584 size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
johnc@3463 1585 size_t act_marked_bytes = hr->next_marked_bytes();
johnc@3463 1586
johnc@3463 1587 // We're not OK if expected marked bytes > actual marked bytes. It means
johnc@3463 1588 // we have missed accounting some objects during the actual marking.
johnc@3463 1589 if (exp_marked_bytes > act_marked_bytes) {
johnc@3463 1590 if (_verbose) {
tonyp@3713 1591 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
johnc@3463 1592 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
tschatzl@7091 1593 hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
johnc@3463 1594 }
johnc@3463 1595 failures += 1;
johnc@3463 1596 }
johnc@3463 1597
johnc@3463 1598 // Verify the bit, for this region, in the actual and expected
johnc@3463 1599 // (which was just calculated) region bit maps.
johnc@3463 1600 // We're not OK if the bit in the calculated expected region
johnc@3463 1601 // bitmap is set and the bit in the actual region bitmap is not.
tschatzl@7091 1602 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
johnc@3463 1603
johnc@3463 1604 bool expected = _exp_region_bm->at(index);
johnc@3463 1605 bool actual = _region_bm->at(index);
johnc@3463 1606 if (expected && !actual) {
johnc@3463 1607 if (_verbose) {
tonyp@3713 1608 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
tonyp@3713 1609 "expected: %s, actual: %s",
tschatzl@7091 1610 hr->hrm_index(),
tonyp@3713 1611 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
johnc@3463 1612 }
johnc@3463 1613 failures += 1;
johnc@3463 1614 }
johnc@3463 1615
johnc@3463 1616 // Verify that the card bit maps for the cards spanned by the current
johnc@3463 1617 // region match. We have an error if we have a set bit in the expected
johnc@3463 1618 // bit map and the corresponding bit in the actual bitmap is not set.
johnc@3463 1619
johnc@3463 1620 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
johnc@3463 1621 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
johnc@3463 1622
johnc@3463 1623 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
johnc@3463 1624 expected = _exp_card_bm->at(i);
johnc@3463 1625 actual = _card_bm->at(i);
johnc@3463 1626
johnc@3463 1627 if (expected && !actual) {
johnc@3463 1628 if (_verbose) {
tonyp@3713 1629 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
tonyp@3713 1630 "expected: %s, actual: %s",
tschatzl@7091 1631 hr->hrm_index(), i,
tonyp@3713 1632 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
ysr@777 1633 }
johnc@3463 1634 failures += 1;
ysr@777 1635 }
ysr@777 1636 }
ysr@777 1637
johnc@3463 1638 if (failures > 0 && _verbose) {
johnc@3463 1639 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
johnc@3463 1640 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
drchase@6680 1641 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
johnc@3463 1642 _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
johnc@3463 1643 }
johnc@3463 1644
johnc@3463 1645 _failures += failures;
johnc@3463 1646
johnc@3463 1647 // We could stop iteration over the heap when we
johnc@3731 1648 // find the first violating region by returning true.
ysr@777 1649 return false;
ysr@777 1650 }
ysr@777 1651 };
ysr@777 1652
johnc@3463 1653 class G1ParVerifyFinalCountTask: public AbstractGangTask {
johnc@3463 1654 protected:
johnc@3463 1655 G1CollectedHeap* _g1h;
johnc@3463 1656 ConcurrentMark* _cm;
johnc@3463 1657 BitMap* _actual_region_bm;
johnc@3463 1658 BitMap* _actual_card_bm;
johnc@3463 1659
johnc@3463 1660 uint _n_workers;
johnc@3463 1661
johnc@3463 1662 BitMap* _expected_region_bm;
johnc@3463 1663 BitMap* _expected_card_bm;
johnc@3463 1664
johnc@3463 1665 int _failures;
johnc@3463 1666 bool _verbose;
johnc@3463 1667
johnc@3463 1668 public:
johnc@3463 1669 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
johnc@3463 1670 BitMap* region_bm, BitMap* card_bm,
johnc@3463 1671 BitMap* expected_region_bm, BitMap* expected_card_bm)
johnc@3463 1672 : AbstractGangTask("G1 verify final counting"),
johnc@3463 1673 _g1h(g1h), _cm(_g1h->concurrent_mark()),
johnc@3463 1674 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
johnc@3463 1675 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
johnc@3463 1676 _failures(0), _verbose(false),
johnc@3463 1677 _n_workers(0) {
johnc@3463 1678 assert(VerifyDuringGC, "don't call this otherwise");
johnc@3463 1679
johnc@3463 1680 // Use the value already set as the number of active threads
johnc@3463 1681 // in the call to run_task().
johnc@3463 1682 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1683 assert( _g1h->workers()->active_workers() > 0,
johnc@3463 1684 "Should have been previously set");
johnc@3463 1685 _n_workers = _g1h->workers()->active_workers();
johnc@3463 1686 } else {
johnc@3463 1687 _n_workers = 1;
johnc@3463 1688 }
johnc@3463 1689
johnc@3463 1690 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
johnc@3463 1691 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
johnc@3463 1692
johnc@3463 1693 _verbose = _cm->verbose_medium();
johnc@3463 1694 }
johnc@3463 1695
johnc@3463 1696 void work(uint worker_id) {
johnc@3463 1697 assert(worker_id < _n_workers, "invariant");
johnc@3463 1698
johnc@4123 1699 VerifyLiveObjectDataHRClosure verify_cl(_g1h,
johnc@3463 1700 _actual_region_bm, _actual_card_bm,
johnc@3463 1701 _expected_region_bm,
johnc@3463 1702 _expected_card_bm,
johnc@3463 1703 _verbose);
johnc@3463 1704
johnc@3463 1705 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1706 _g1h->heap_region_par_iterate_chunked(&verify_cl,
johnc@3463 1707 worker_id,
johnc@3463 1708 _n_workers,
johnc@3463 1709 HeapRegion::VerifyCountClaimValue);
johnc@3463 1710 } else {
johnc@3463 1711 _g1h->heap_region_iterate(&verify_cl);
johnc@3463 1712 }
johnc@3463 1713
johnc@3463 1714 Atomic::add(verify_cl.failures(), &_failures);
johnc@3463 1715 }
johnc@3463 1716
johnc@3463 1717 int failures() const { return _failures; }
johnc@3463 1718 };
johnc@3463 1719
johnc@3731 1720 // Closure that finalizes the liveness counting data.
johnc@3731 1721 // Used during the cleanup pause.
johnc@3731 1722 // Sets the bits corresponding to the interval [NTAMS, top]
johnc@3731 1723 // (which contains the implicitly live objects) in the
johnc@3731 1724 // card liveness bitmap. Also sets the bit for each region,
johnc@3731 1725 // containing live data, in the region liveness bitmap.
johnc@3731 1726
johnc@3731 1727 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
johnc@3463 1728 public:
johnc@4123 1729 FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
johnc@3463 1730 BitMap* region_bm,
johnc@3463 1731 BitMap* card_bm) :
johnc@4123 1732 CMCountDataClosureBase(g1h, region_bm, card_bm) { }
johnc@3463 1733
johnc@3463 1734 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 1735
johnc@3463 1736 if (hr->continuesHumongous()) {
johnc@3463 1737 // We will ignore these here and process them when their
johnc@3463 1738 // associated "starts humongous" region is processed (see
johnc@3463 1739 // set_bit_for_heap_region()). Note that we cannot rely on their
johnc@3463 1740 // associated "starts humongous" region to have their bit set to
johnc@3463 1741 // 1 since, due to the region chunking in the parallel region
johnc@3463 1742 // iteration, a "continues humongous" region might be visited
johnc@3463 1743 // before its associated "starts humongous".
johnc@3463 1744 return false;
johnc@3463 1745 }
johnc@3463 1746
johnc@3463 1747 HeapWord* ntams = hr->next_top_at_mark_start();
johnc@3463 1748 HeapWord* top = hr->top();
johnc@3463 1749
johnc@3731 1750 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
johnc@3463 1751
johnc@3463 1752 // Mark the allocated-since-marking portion...
johnc@3463 1753 if (ntams < top) {
johnc@3463 1754 // This definitely means the region has live objects.
johnc@3463 1755 set_bit_for_region(hr);
johnc@4123 1756
johnc@4123 1757 // Now set the bits in the card bitmap for [ntams, top)
johnc@4123 1758 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
johnc@4123 1759 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
johnc@4123 1760
johnc@4123 1761 // Note: if we're looking at the last region in heap - top
johnc@4123 1762 // could be actually just beyond the end of the heap; end_idx
johnc@4123 1763 // will then correspond to a (non-existent) card that is also
johnc@4123 1764 // just beyond the heap.
johnc@4123 1765 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
johnc@4123 1766 // end of object is not card aligned - increment to cover
johnc@4123 1767 // all the cards spanned by the object
johnc@4123 1768 end_idx += 1;
johnc@4123 1769 }
johnc@4123 1770
johnc@4123 1771 assert(end_idx <= _card_bm->size(),
johnc@4123 1772 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
johnc@4123 1773 end_idx, _card_bm->size()));
johnc@4123 1774 assert(start_idx < _card_bm->size(),
johnc@4123 1775 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
johnc@4123 1776 start_idx, _card_bm->size()));
johnc@4123 1777
johnc@4123 1778 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
coleenp@4037 1779 }
johnc@3463 1780
johnc@3463 1781 // Set the bit for the region if it contains live data
johnc@3463 1782 if (hr->next_marked_bytes() > 0) {
johnc@3463 1783 set_bit_for_region(hr);
johnc@3463 1784 }
johnc@3463 1785
johnc@3463 1786 return false;
johnc@3463 1787 }
johnc@3463 1788 };
ysr@777 1789
ysr@777 1790 class G1ParFinalCountTask: public AbstractGangTask {
ysr@777 1791 protected:
ysr@777 1792 G1CollectedHeap* _g1h;
johnc@3463 1793 ConcurrentMark* _cm;
johnc@3463 1794 BitMap* _actual_region_bm;
johnc@3463 1795 BitMap* _actual_card_bm;
johnc@3463 1796
jmasa@3357 1797 uint _n_workers;
johnc@3463 1798
ysr@777 1799 public:
johnc@3463 1800 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
johnc@3463 1801 : AbstractGangTask("G1 final counting"),
johnc@3463 1802 _g1h(g1h), _cm(_g1h->concurrent_mark()),
johnc@3463 1803 _actual_region_bm(region_bm), _actual_card_bm(card_bm),
johnc@3463 1804 _n_workers(0) {
jmasa@3294 1805 // Use the value already set as the number of active threads
tonyp@3714 1806 // in the call to run_task().
jmasa@3294 1807 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1808 assert( _g1h->workers()->active_workers() > 0,
jmasa@3294 1809 "Should have been previously set");
jmasa@3294 1810 _n_workers = _g1h->workers()->active_workers();
tonyp@2973 1811 } else {
ysr@777 1812 _n_workers = 1;
tonyp@2973 1813 }
ysr@777 1814 }
ysr@777 1815
jmasa@3357 1816 void work(uint worker_id) {
johnc@3463 1817 assert(worker_id < _n_workers, "invariant");
johnc@3463 1818
johnc@4123 1819 FinalCountDataUpdateClosure final_update_cl(_g1h,
johnc@3463 1820 _actual_region_bm,
johnc@3463 1821 _actual_card_bm);
johnc@3463 1822
jmasa@2188 1823 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 1824 _g1h->heap_region_par_iterate_chunked(&final_update_cl,
johnc@3463 1825 worker_id,
johnc@3463 1826 _n_workers,
tonyp@790 1827 HeapRegion::FinalCountClaimValue);
ysr@777 1828 } else {
johnc@3463 1829 _g1h->heap_region_iterate(&final_update_cl);
ysr@777 1830 }
ysr@777 1831 }
ysr@777 1832 };
ysr@777 1833
ysr@777 1834 class G1ParNoteEndTask;
ysr@777 1835
ysr@777 1836 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
ysr@777 1837 G1CollectedHeap* _g1;
ysr@777 1838 size_t _max_live_bytes;
tonyp@3713 1839 uint _regions_claimed;
ysr@777 1840 size_t _freed_bytes;
tonyp@2493 1841 FreeRegionList* _local_cleanup_list;
brutisso@6385 1842 HeapRegionSetCount _old_regions_removed;
brutisso@6385 1843 HeapRegionSetCount _humongous_regions_removed;
tonyp@2493 1844 HRRSCleanupTask* _hrrs_cleanup_task;
ysr@777 1845 double _claimed_region_time;
ysr@777 1846 double _max_region_time;
ysr@777 1847
ysr@777 1848 public:
ysr@777 1849 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
tonyp@2493 1850 FreeRegionList* local_cleanup_list,
johnc@3292 1851 HRRSCleanupTask* hrrs_cleanup_task) :
vkempik@6552 1852 _g1(g1),
johnc@3292 1853 _max_live_bytes(0), _regions_claimed(0),
johnc@3292 1854 _freed_bytes(0),
johnc@3292 1855 _claimed_region_time(0.0), _max_region_time(0.0),
johnc@3292 1856 _local_cleanup_list(local_cleanup_list),
brutisso@6385 1857 _old_regions_removed(),
brutisso@6385 1858 _humongous_regions_removed(),
johnc@3292 1859 _hrrs_cleanup_task(hrrs_cleanup_task) { }
johnc@3292 1860
ysr@777 1861 size_t freed_bytes() { return _freed_bytes; }
brutisso@6385 1862 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
brutisso@6385 1863 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
ysr@777 1864
johnc@3292 1865 bool doHeapRegion(HeapRegion *hr) {
tonyp@3957 1866 if (hr->continuesHumongous()) {
tonyp@3957 1867 return false;
tonyp@3957 1868 }
johnc@3292 1869 // We use a claim value of zero here because all regions
johnc@3292 1870 // were claimed with value 1 in the FinalCount task.
tonyp@3957 1871 _g1->reset_gc_time_stamps(hr);
tonyp@3957 1872 double start = os::elapsedTime();
tonyp@3957 1873 _regions_claimed++;
tonyp@3957 1874 hr->note_end_of_marking();
tonyp@3957 1875 _max_live_bytes += hr->max_live_bytes();
brutisso@6385 1876
brutisso@6385 1877 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
brutisso@6385 1878 _freed_bytes += hr->used();
brutisso@6385 1879 hr->set_containing_set(NULL);
brutisso@6385 1880 if (hr->isHumongous()) {
brutisso@6385 1881 assert(hr->startsHumongous(), "we should only see starts humongous");
brutisso@6385 1882 _humongous_regions_removed.increment(1u, hr->capacity());
brutisso@6385 1883 _g1->free_humongous_region(hr, _local_cleanup_list, true);
brutisso@6385 1884 } else {
brutisso@6385 1885 _old_regions_removed.increment(1u, hr->capacity());
brutisso@6385 1886 _g1->free_region(hr, _local_cleanup_list, true);
brutisso@6385 1887 }
brutisso@6385 1888 } else {
brutisso@6385 1889 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
brutisso@6385 1890 }
brutisso@6385 1891
tonyp@3957 1892 double region_time = (os::elapsedTime() - start);
tonyp@3957 1893 _claimed_region_time += region_time;
tonyp@3957 1894 if (region_time > _max_region_time) {
tonyp@3957 1895 _max_region_time = region_time;
johnc@3292 1896 }
johnc@3292 1897 return false;
johnc@3292 1898 }
ysr@777 1899
ysr@777 1900 size_t max_live_bytes() { return _max_live_bytes; }
tonyp@3713 1901 uint regions_claimed() { return _regions_claimed; }
ysr@777 1902 double claimed_region_time_sec() { return _claimed_region_time; }
ysr@777 1903 double max_region_time_sec() { return _max_region_time; }
ysr@777 1904 };
ysr@777 1905
ysr@777 1906 class G1ParNoteEndTask: public AbstractGangTask {
ysr@777 1907 friend class G1NoteEndOfConcMarkClosure;
tonyp@2472 1908
ysr@777 1909 protected:
ysr@777 1910 G1CollectedHeap* _g1h;
ysr@777 1911 size_t _max_live_bytes;
ysr@777 1912 size_t _freed_bytes;
tonyp@2472 1913 FreeRegionList* _cleanup_list;
tonyp@2472 1914
ysr@777 1915 public:
ysr@777 1916 G1ParNoteEndTask(G1CollectedHeap* g1h,
tonyp@2472 1917 FreeRegionList* cleanup_list) :
ysr@777 1918 AbstractGangTask("G1 note end"), _g1h(g1h),
tonyp@2472 1919 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
ysr@777 1920
jmasa@3357 1921 void work(uint worker_id) {
ysr@777 1922 double start = os::elapsedTime();
tonyp@2493 1923 FreeRegionList local_cleanup_list("Local Cleanup List");
tonyp@2493 1924 HRRSCleanupTask hrrs_cleanup_task;
vkempik@6552 1925 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
tonyp@2493 1926 &hrrs_cleanup_task);
jmasa@2188 1927 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1928 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
jmasa@3294 1929 _g1h->workers()->active_workers(),
tonyp@790 1930 HeapRegion::NoteEndClaimValue);
ysr@777 1931 } else {
ysr@777 1932 _g1h->heap_region_iterate(&g1_note_end);
ysr@777 1933 }
ysr@777 1934 assert(g1_note_end.complete(), "Shouldn't have yielded!");
ysr@777 1935
tonyp@2472 1936 // Now update the lists
brutisso@6385 1937 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
ysr@777 1938 {
ysr@777 1939 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
brutisso@6385 1940 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
ysr@777 1941 _max_live_bytes += g1_note_end.max_live_bytes();
ysr@777 1942 _freed_bytes += g1_note_end.freed_bytes();
tonyp@2472 1943
tonyp@2975 1944 // If we iterate over the global cleanup list at the end of
tonyp@2975 1945 // cleanup to do this printing we will not guarantee to only
tonyp@2975 1946 // generate output for the newly-reclaimed regions (the list
tonyp@2975 1947 // might not be empty at the beginning of cleanup; we might
tonyp@2975 1948 // still be working on its previous contents). So we do the
tonyp@2975 1949 // printing here, before we append the new regions to the global
tonyp@2975 1950 // cleanup list.
tonyp@2975 1951
tonyp@2975 1952 G1HRPrinter* hr_printer = _g1h->hr_printer();
tonyp@2975 1953 if (hr_printer->is_active()) {
brutisso@6385 1954 FreeRegionListIterator iter(&local_cleanup_list);
tonyp@2975 1955 while (iter.more_available()) {
tonyp@2975 1956 HeapRegion* hr = iter.get_next();
tonyp@2975 1957 hr_printer->cleanup(hr);
tonyp@2975 1958 }
tonyp@2975 1959 }
tonyp@2975 1960
jwilhelm@6422 1961 _cleanup_list->add_ordered(&local_cleanup_list);
tonyp@2493 1962 assert(local_cleanup_list.is_empty(), "post-condition");
tonyp@2493 1963
tonyp@2493 1964 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
ysr@777 1965 }
ysr@777 1966 }
ysr@777 1967 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1968 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1969 };
ysr@777 1970
ysr@777 1971 class G1ParScrubRemSetTask: public AbstractGangTask {
ysr@777 1972 protected:
ysr@777 1973 G1RemSet* _g1rs;
ysr@777 1974 BitMap* _region_bm;
ysr@777 1975 BitMap* _card_bm;
ysr@777 1976 public:
ysr@777 1977 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
ysr@777 1978 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1979 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
johnc@3463 1980 _region_bm(region_bm), _card_bm(card_bm) { }
ysr@777 1981
jmasa@3357 1982 void work(uint worker_id) {
jmasa@2188 1983 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1984 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
tonyp@790 1985 HeapRegion::ScrubRemSetClaimValue);
ysr@777 1986 } else {
ysr@777 1987 _g1rs->scrub(_region_bm, _card_bm);
ysr@777 1988 }
ysr@777 1989 }
ysr@777 1990
ysr@777 1991 };
ysr@777 1992
ysr@777 1993 void ConcurrentMark::cleanup() {
ysr@777 1994 // world is stopped at this checkpoint
ysr@777 1995 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1996 "world should be stopped");
ysr@777 1997 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1998
ysr@777 1999 // If a full collection has happened, we shouldn't do this.
ysr@777 2000 if (has_aborted()) {
ysr@777 2001 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 2002 return;
ysr@777 2003 }
ysr@777 2004
tonyp@2472 2005 g1h->verify_region_sets_optional();
tonyp@2472 2006
ysr@1280 2007 if (VerifyDuringGC) {
ysr@1280 2008 HandleMark hm; // handle scope
ysr@1280 2009 Universe::heap()->prepare_for_verify();
stefank@5018 2010 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 2011 " VerifyDuringGC:(before)");
ysr@1280 2012 }
brutisso@7005 2013 g1h->check_bitmaps("Cleanup Start");
ysr@1280 2014
ysr@777 2015 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
ysr@777 2016 g1p->record_concurrent_mark_cleanup_start();
ysr@777 2017
ysr@777 2018 double start = os::elapsedTime();
ysr@777 2019
tonyp@2493 2020 HeapRegionRemSet::reset_for_cleanup_tasks();
tonyp@2493 2021
jmasa@3357 2022 uint n_workers;
jmasa@3294 2023
ysr@777 2024 // Do counting once more with the world stopped for good measure.
johnc@3463 2025 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
johnc@3463 2026
jmasa@2188 2027 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 2028 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2029 "sanity check");
tonyp@790 2030
johnc@3338 2031 g1h->set_par_threads();
johnc@3338 2032 n_workers = g1h->n_par_threads();
jmasa@3357 2033 assert(g1h->n_par_threads() == n_workers,
johnc@3338 2034 "Should not have been reset");
ysr@777 2035 g1h->workers()->run_task(&g1_par_count_task);
jmasa@3294 2036 // Done with the parallel phase so reset to 0.
ysr@777 2037 g1h->set_par_threads(0);
tonyp@790 2038
johnc@3463 2039 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
tonyp@790 2040 "sanity check");
ysr@777 2041 } else {
johnc@3338 2042 n_workers = 1;
ysr@777 2043 g1_par_count_task.work(0);
ysr@777 2044 }
ysr@777 2045
johnc@3463 2046 if (VerifyDuringGC) {
johnc@3463 2047 // Verify that the counting data accumulated during marking matches
johnc@3463 2048 // that calculated by walking the marking bitmap.
johnc@3463 2049
johnc@3463 2050 // Bitmaps to hold expected values
mgerdin@6977 2051 BitMap expected_region_bm(_region_bm.size(), true);
mgerdin@6977 2052 BitMap expected_card_bm(_card_bm.size(), true);
johnc@3463 2053
johnc@3463 2054 G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
johnc@3463 2055 &_region_bm,
johnc@3463 2056 &_card_bm,
johnc@3463 2057 &expected_region_bm,
johnc@3463 2058 &expected_card_bm);
johnc@3463 2059
johnc@3463 2060 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 2061 g1h->set_par_threads((int)n_workers);
johnc@3463 2062 g1h->workers()->run_task(&g1_par_verify_task);
johnc@3463 2063 // Done with the parallel phase so reset to 0.
johnc@3463 2064 g1h->set_par_threads(0);
johnc@3463 2065
johnc@3463 2066 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
johnc@3463 2067 "sanity check");
johnc@3463 2068 } else {
johnc@3463 2069 g1_par_verify_task.work(0);
johnc@3463 2070 }
johnc@3463 2071
johnc@3463 2072 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
johnc@3463 2073 }
johnc@3463 2074
ysr@777 2075 size_t start_used_bytes = g1h->used();
ysr@777 2076 g1h->set_marking_complete();
ysr@777 2077
ysr@777 2078 double count_end = os::elapsedTime();
ysr@777 2079 double this_final_counting_time = (count_end - start);
ysr@777 2080 _total_counting_time += this_final_counting_time;
ysr@777 2081
tonyp@2717 2082 if (G1PrintRegionLivenessInfo) {
tonyp@2717 2083 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
tonyp@2717 2084 _g1h->heap_region_iterate(&cl);
tonyp@2717 2085 }
tonyp@2717 2086
ysr@777 2087 // Install newly created mark bitMap as "prev".
ysr@777 2088 swapMarkBitMaps();
ysr@777 2089
ysr@777 2090 g1h->reset_gc_time_stamp();
ysr@777 2091
ysr@777 2092 // Note end of marking in all heap regions.
tonyp@2472 2093 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
jmasa@2188 2094 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 2095 g1h->set_par_threads((int)n_workers);
ysr@777 2096 g1h->workers()->run_task(&g1_par_note_end_task);
ysr@777 2097 g1h->set_par_threads(0);
tonyp@790 2098
tonyp@790 2099 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
tonyp@790 2100 "sanity check");
ysr@777 2101 } else {
ysr@777 2102 g1_par_note_end_task.work(0);
ysr@777 2103 }
tonyp@3957 2104 g1h->check_gc_time_stamps();
tonyp@2472 2105
tonyp@2472 2106 if (!cleanup_list_is_empty()) {
tonyp@2472 2107 // The cleanup list is not empty, so we'll have to process it
tonyp@2472 2108 // concurrently. Notify anyone else that might be wanting free
tonyp@2472 2109 // regions that there will be more free regions coming soon.
tonyp@2472 2110 g1h->set_free_regions_coming();
tonyp@2472 2111 }
ysr@777 2112
ysr@777 2113 // call below, since it affects the metric by which we sort the heap
ysr@777 2114 // regions.
ysr@777 2115 if (G1ScrubRemSets) {
ysr@777 2116 double rs_scrub_start = os::elapsedTime();
ysr@777 2117 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
jmasa@2188 2118 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 2119 g1h->set_par_threads((int)n_workers);
ysr@777 2120 g1h->workers()->run_task(&g1_par_scrub_rs_task);
ysr@777 2121 g1h->set_par_threads(0);
tonyp@790 2122
tonyp@790 2123 assert(g1h->check_heap_region_claim_values(
tonyp@790 2124 HeapRegion::ScrubRemSetClaimValue),
tonyp@790 2125 "sanity check");
ysr@777 2126 } else {
ysr@777 2127 g1_par_scrub_rs_task.work(0);
ysr@777 2128 }
ysr@777 2129
ysr@777 2130 double rs_scrub_end = os::elapsedTime();
ysr@777 2131 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
ysr@777 2132 _total_rs_scrub_time += this_rs_scrub_time;
ysr@777 2133 }
ysr@777 2134
ysr@777 2135 // this will also free any regions totally full of garbage objects,
ysr@777 2136 // and sort the regions.
jmasa@3294 2137 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
ysr@777 2138
ysr@777 2139 // Statistics.
ysr@777 2140 double end = os::elapsedTime();
ysr@777 2141 _cleanup_times.add((end - start) * 1000.0);
ysr@777 2142
brutisso@3710 2143 if (G1Log::fine()) {
ysr@777 2144 g1h->print_size_transition(gclog_or_tty,
ysr@777 2145 start_used_bytes,
ysr@777 2146 g1h->used(),
ysr@777 2147 g1h->capacity());
ysr@777 2148 }
ysr@777 2149
johnc@3175 2150 // Clean up will have freed any regions completely full of garbage.
johnc@3175 2151 // Update the soft reference policy with the new heap occupancy.
johnc@3175 2152 Universe::update_heap_info_at_gc();
johnc@3175 2153
johnc@1186 2154 if (VerifyDuringGC) {
ysr@1280 2155 HandleMark hm; // handle scope
ysr@1280 2156 Universe::heap()->prepare_for_verify();
stefank@5018 2157 Universe::verify(VerifyOption_G1UsePrevMarking,
stefank@5018 2158 " VerifyDuringGC:(after)");
ysr@777 2159 }
brutisso@7005 2160 g1h->check_bitmaps("Cleanup End");
tonyp@2472 2161
tonyp@2472 2162 g1h->verify_region_sets_optional();
stefank@6992 2163
stefank@6992 2164 // We need to make this be a "collection" so any collection pause that
stefank@6992 2165 // races with it goes around and waits for completeCleanup to finish.
stefank@6992 2166 g1h->increment_total_collections();
stefank@6992 2167
stefank@6992 2168 // Clean out dead classes and update Metaspace sizes.
stefank@6996 2169 if (ClassUnloadingWithConcurrentMark) {
stefank@6996 2170 ClassLoaderDataGraph::purge();
stefank@6996 2171 }
stefank@6992 2172 MetaspaceGC::compute_new_size();
stefank@6992 2173
stefank@6992 2174 // We reclaimed old regions so we should calculate the sizes to make
stefank@6992 2175 // sure we update the old gen/space data.
stefank@6992 2176 g1h->g1mm()->update_sizes();
stefank@6992 2177
sla@5237 2178 g1h->trace_heap_after_concurrent_cycle();
ysr@777 2179 }
ysr@777 2180
ysr@777 2181 void ConcurrentMark::completeCleanup() {
ysr@777 2182 if (has_aborted()) return;
ysr@777 2183
tonyp@2472 2184 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2472 2185
jwilhelm@6549 2186 _cleanup_list.verify_optional();
tonyp@2643 2187 FreeRegionList tmp_free_list("Tmp Free List");
tonyp@2472 2188
tonyp@2472 2189 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 2190 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@3713 2191 "cleanup list has %u entries",
tonyp@2472 2192 _cleanup_list.length());
tonyp@2472 2193 }
tonyp@2472 2194
tschatzl@7051 2195 // No one else should be accessing the _cleanup_list at this point,
tschatzl@7051 2196 // so it is not necessary to take any locks
tonyp@2472 2197 while (!_cleanup_list.is_empty()) {
tschatzl@7050 2198 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
jwilhelm@6422 2199 assert(hr != NULL, "Got NULL from a non-empty list");
tonyp@2849 2200 hr->par_clear();
jwilhelm@6422 2201 tmp_free_list.add_ordered(hr);
tonyp@2472 2202
tonyp@2472 2203 // Instead of adding one region at a time to the secondary_free_list,
tonyp@2472 2204 // we accumulate them in the local list and move them a few at a
tonyp@2472 2205 // time. This also cuts down on the number of notify_all() calls
tonyp@2472 2206 // we do during this process. We'll also append the local list when
tonyp@2472 2207 // _cleanup_list is empty (which means we just removed the last
tonyp@2472 2208 // region from the _cleanup_list).
tonyp@2643 2209 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
tonyp@2472 2210 _cleanup_list.is_empty()) {
tonyp@2472 2211 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 2212 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@3713 2213 "appending %u entries to the secondary_free_list, "
tonyp@3713 2214 "cleanup list still has %u entries",
tonyp@2643 2215 tmp_free_list.length(),
tonyp@2472 2216 _cleanup_list.length());
ysr@777 2217 }
tonyp@2472 2218
tonyp@2472 2219 {
tonyp@2472 2220 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
jwilhelm@6422 2221 g1h->secondary_free_list_add(&tmp_free_list);
tonyp@2472 2222 SecondaryFreeList_lock->notify_all();
tonyp@2472 2223 }
tonyp@2472 2224
tonyp@2472 2225 if (G1StressConcRegionFreeing) {
tonyp@2472 2226 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
tonyp@2472 2227 os::sleep(Thread::current(), (jlong) 1, false);
tonyp@2472 2228 }
tonyp@2472 2229 }
ysr@777 2230 }
ysr@777 2231 }
tonyp@2643 2232 assert(tmp_free_list.is_empty(), "post-condition");
ysr@777 2233 }
ysr@777 2234
johnc@4555 2235 // Supporting Object and Oop closures for reference discovery
johnc@4555 2236 // and processing in during marking
johnc@2494 2237
johnc@2379 2238 bool G1CMIsAliveClosure::do_object_b(oop obj) {
johnc@2379 2239 HeapWord* addr = (HeapWord*)obj;
johnc@2379 2240 return addr != NULL &&
johnc@2379 2241 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
johnc@2379 2242 }
ysr@777 2243
johnc@4555 2244 // 'Keep Alive' oop closure used by both serial parallel reference processing.
johnc@4555 2245 // Uses the CMTask associated with a worker thread (for serial reference
johnc@4555 2246 // processing the CMTask for worker 0 is used) to preserve (mark) and
johnc@4555 2247 // trace referent objects.
johnc@4555 2248 //
johnc@4555 2249 // Using the CMTask and embedded local queues avoids having the worker
johnc@4555 2250 // threads operating on the global mark stack. This reduces the risk
johnc@4555 2251 // of overflowing the stack - which we would rather avoid at this late
johnc@4555 2252 // state. Also using the tasks' local queues removes the potential
johnc@4555 2253 // of the workers interfering with each other that could occur if
johnc@4555 2254 // operating on the global stack.
johnc@4555 2255
johnc@4555 2256 class G1CMKeepAliveAndDrainClosure: public OopClosure {
johnc@4787 2257 ConcurrentMark* _cm;
johnc@4787 2258 CMTask* _task;
johnc@4787 2259 int _ref_counter_limit;
johnc@4787 2260 int _ref_counter;
johnc@4787 2261 bool _is_serial;
johnc@2494 2262 public:
johnc@4787 2263 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
johnc@4787 2264 _cm(cm), _task(task), _is_serial(is_serial),
johnc@4787 2265 _ref_counter_limit(G1RefProcDrainInterval) {
johnc@2494 2266 assert(_ref_counter_limit > 0, "sanity");
johnc@4787 2267 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
johnc@2494 2268 _ref_counter = _ref_counter_limit;
johnc@2494 2269 }
johnc@2494 2270
johnc@2494 2271 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@2494 2272 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@2494 2273
johnc@2494 2274 template <class T> void do_oop_work(T* p) {
johnc@2494 2275 if (!_cm->has_overflown()) {
johnc@2494 2276 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@2973 2277 if (_cm->verbose_high()) {
johnc@4173 2278 gclog_or_tty->print_cr("\t[%u] we're looking at location "
johnc@2494 2279 "*"PTR_FORMAT" = "PTR_FORMAT,
drchase@6680 2280 _task->worker_id(), p2i(p), p2i((void*) obj));
tonyp@2973 2281 }
johnc@2494 2282
johnc@2494 2283 _task->deal_with_reference(obj);
johnc@2494 2284 _ref_counter--;
johnc@2494 2285
johnc@2494 2286 if (_ref_counter == 0) {
johnc@4555 2287 // We have dealt with _ref_counter_limit references, pushing them
johnc@4555 2288 // and objects reachable from them on to the local stack (and
johnc@4555 2289 // possibly the global stack). Call CMTask::do_marking_step() to
johnc@4555 2290 // process these entries.
johnc@4555 2291 //
johnc@4555 2292 // We call CMTask::do_marking_step() in a loop, which we'll exit if
johnc@4555 2293 // there's nothing more to do (i.e. we're done with the entries that
johnc@4555 2294 // were pushed as a result of the CMTask::deal_with_reference() calls
johnc@4555 2295 // above) or we overflow.
johnc@4555 2296 //
johnc@4555 2297 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
johnc@4555 2298 // flag while there may still be some work to do. (See the comment at
johnc@4555 2299 // the beginning of CMTask::do_marking_step() for those conditions -
johnc@4555 2300 // one of which is reaching the specified time target.) It is only
johnc@4555 2301 // when CMTask::do_marking_step() returns without setting the
johnc@4555 2302 // has_aborted() flag that the marking step has completed.
johnc@2494 2303 do {
johnc@2494 2304 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 2305 _task->do_marking_step(mark_step_duration_ms,
johnc@4787 2306 false /* do_termination */,
johnc@4787 2307 _is_serial);
johnc@2494 2308 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2309 _ref_counter = _ref_counter_limit;
johnc@2494 2310 }
johnc@2494 2311 } else {
tonyp@2973 2312 if (_cm->verbose_high()) {
johnc@4173 2313 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
tonyp@2973 2314 }
johnc@2494 2315 }
johnc@2494 2316 }
johnc@2494 2317 };
johnc@2494 2318
johnc@4555 2319 // 'Drain' oop closure used by both serial and parallel reference processing.
johnc@4555 2320 // Uses the CMTask associated with a given worker thread (for serial
johnc@4555 2321 // reference processing the CMtask for worker 0 is used). Calls the
johnc@4555 2322 // do_marking_step routine, with an unbelievably large timeout value,
johnc@4555 2323 // to drain the marking data structures of the remaining entries
johnc@4555 2324 // added by the 'keep alive' oop closure above.
johnc@4555 2325
johnc@4555 2326 class G1CMDrainMarkingStackClosure: public VoidClosure {
johnc@2494 2327 ConcurrentMark* _cm;
johnc@4555 2328 CMTask* _task;
johnc@4787 2329 bool _is_serial;
johnc@2494 2330 public:
johnc@4787 2331 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
johnc@4787 2332 _cm(cm), _task(task), _is_serial(is_serial) {
johnc@4787 2333 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
johnc@4555 2334 }
johnc@2494 2335
johnc@2494 2336 void do_void() {
johnc@2494 2337 do {
tonyp@2973 2338 if (_cm->verbose_high()) {
johnc@4787 2339 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
johnc@4787 2340 _task->worker_id(), BOOL_TO_STR(_is_serial));
tonyp@2973 2341 }
johnc@2494 2342
johnc@4555 2343 // We call CMTask::do_marking_step() to completely drain the local
johnc@4555 2344 // and global marking stacks of entries pushed by the 'keep alive'
johnc@4555 2345 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
johnc@4555 2346 //
johnc@4555 2347 // CMTask::do_marking_step() is called in a loop, which we'll exit
johnc@4555 2348 // if there's nothing more to do (i.e. we'completely drained the
johnc@4555 2349 // entries that were pushed as a a result of applying the 'keep alive'
johnc@4555 2350 // closure to the entries on the discovered ref lists) or we overflow
johnc@4555 2351 // the global marking stack.
johnc@4555 2352 //
johnc@4555 2353 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
johnc@4555 2354 // flag while there may still be some work to do. (See the comment at
johnc@4555 2355 // the beginning of CMTask::do_marking_step() for those conditions -
johnc@4555 2356 // one of which is reaching the specified time target.) It is only
johnc@4555 2357 // when CMTask::do_marking_step() returns without setting the
johnc@4555 2358 // has_aborted() flag that the marking step has completed.
johnc@2494 2359
johnc@2494 2360 _task->do_marking_step(1000000000.0 /* something very large */,
johnc@4787 2361 true /* do_termination */,
johnc@4787 2362 _is_serial);
johnc@2494 2363 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2364 }
johnc@2494 2365 };
johnc@2494 2366
johnc@3175 2367 // Implementation of AbstractRefProcTaskExecutor for parallel
johnc@3175 2368 // reference processing at the end of G1 concurrent marking
johnc@3175 2369
johnc@3175 2370 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
johnc@2494 2371 private:
johnc@2494 2372 G1CollectedHeap* _g1h;
johnc@2494 2373 ConcurrentMark* _cm;
johnc@2494 2374 WorkGang* _workers;
johnc@2494 2375 int _active_workers;
johnc@2494 2376
johnc@2494 2377 public:
johnc@3175 2378 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
johnc@2494 2379 ConcurrentMark* cm,
johnc@2494 2380 WorkGang* workers,
johnc@2494 2381 int n_workers) :
johnc@3292 2382 _g1h(g1h), _cm(cm),
johnc@3292 2383 _workers(workers), _active_workers(n_workers) { }
johnc@2494 2384
johnc@2494 2385 // Executes the given task using concurrent marking worker threads.
johnc@2494 2386 virtual void execute(ProcessTask& task);
johnc@2494 2387 virtual void execute(EnqueueTask& task);
johnc@2494 2388 };
johnc@2494 2389
johnc@3175 2390 class G1CMRefProcTaskProxy: public AbstractGangTask {
johnc@2494 2391 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
johnc@2494 2392 ProcessTask& _proc_task;
johnc@2494 2393 G1CollectedHeap* _g1h;
johnc@2494 2394 ConcurrentMark* _cm;
johnc@2494 2395
johnc@2494 2396 public:
johnc@3175 2397 G1CMRefProcTaskProxy(ProcessTask& proc_task,
johnc@2494 2398 G1CollectedHeap* g1h,
johnc@3292 2399 ConcurrentMark* cm) :
johnc@2494 2400 AbstractGangTask("Process reference objects in parallel"),
johnc@4555 2401 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
johnc@4787 2402 ReferenceProcessor* rp = _g1h->ref_processor_cm();
johnc@4787 2403 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
johnc@4787 2404 }
johnc@2494 2405
jmasa@3357 2406 virtual void work(uint worker_id) {
mdoerr@7020 2407 ResourceMark rm;
mdoerr@7020 2408 HandleMark hm;
johnc@4787 2409 CMTask* task = _cm->task(worker_id);
johnc@2494 2410 G1CMIsAliveClosure g1_is_alive(_g1h);
johnc@4787 2411 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
johnc@4787 2412 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
johnc@2494 2413
jmasa@3357 2414 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
johnc@2494 2415 }
johnc@2494 2416 };
johnc@2494 2417
johnc@3175 2418 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
johnc@2494 2419 assert(_workers != NULL, "Need parallel worker threads.");
johnc@4555 2420 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
johnc@2494 2421
johnc@3292 2422 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
johnc@2494 2423
johnc@4788 2424 // We need to reset the concurrency level before each
johnc@4788 2425 // proxy task execution, so that the termination protocol
johnc@4788 2426 // and overflow handling in CMTask::do_marking_step() knows
johnc@4788 2427 // how many workers to wait for.
johnc@4788 2428 _cm->set_concurrency(_active_workers);
johnc@2494 2429 _g1h->set_par_threads(_active_workers);
johnc@2494 2430 _workers->run_task(&proc_task_proxy);
johnc@2494 2431 _g1h->set_par_threads(0);
johnc@2494 2432 }
johnc@2494 2433
johnc@3175 2434 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
johnc@2494 2435 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
johnc@2494 2436 EnqueueTask& _enq_task;
johnc@2494 2437
johnc@2494 2438 public:
johnc@3175 2439 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
johnc@2494 2440 AbstractGangTask("Enqueue reference objects in parallel"),
johnc@3292 2441 _enq_task(enq_task) { }
johnc@2494 2442
jmasa@3357 2443 virtual void work(uint worker_id) {
jmasa@3357 2444 _enq_task.work(worker_id);
johnc@2494 2445 }
johnc@2494 2446 };
johnc@2494 2447
johnc@3175 2448 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
johnc@2494 2449 assert(_workers != NULL, "Need parallel worker threads.");
johnc@4555 2450 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
johnc@2494 2451
johnc@3175 2452 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
johnc@2494 2453
johnc@4788 2454 // Not strictly necessary but...
johnc@4788 2455 //
johnc@4788 2456 // We need to reset the concurrency level before each
johnc@4788 2457 // proxy task execution, so that the termination protocol
johnc@4788 2458 // and overflow handling in CMTask::do_marking_step() knows
johnc@4788 2459 // how many workers to wait for.
johnc@4788 2460 _cm->set_concurrency(_active_workers);
johnc@2494 2461 _g1h->set_par_threads(_active_workers);
johnc@2494 2462 _workers->run_task(&enq_task_proxy);
johnc@2494 2463 _g1h->set_par_threads(0);
johnc@2494 2464 }
johnc@2494 2465
stefank@6992 2466 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
stefank@6992 2467 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
stefank@6992 2468 }
stefank@6992 2469
stefank@6992 2470 // Helper class to get rid of some boilerplate code.
stefank@6992 2471 class G1RemarkGCTraceTime : public GCTraceTime {
stefank@6992 2472 static bool doit_and_prepend(bool doit) {
stefank@6992 2473 if (doit) {
stefank@6992 2474 gclog_or_tty->put(' ');
stefank@6992 2475 }
stefank@6992 2476 return doit;
stefank@6992 2477 }
stefank@6992 2478
stefank@6992 2479 public:
stefank@6992 2480 G1RemarkGCTraceTime(const char* title, bool doit)
stefank@6992 2481 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
stefank@6992 2482 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
stefank@6992 2483 }
stefank@6992 2484 };
stefank@6992 2485
ysr@777 2486 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
johnc@4788 2487 if (has_overflown()) {
johnc@4788 2488 // Skip processing the discovered references if we have
johnc@4788 2489 // overflown the global marking stack. Reference objects
johnc@4788 2490 // only get discovered once so it is OK to not
johnc@4788 2491 // de-populate the discovered reference lists. We could have,
johnc@4788 2492 // but the only benefit would be that, when marking restarts,
johnc@4788 2493 // less reference objects are discovered.
johnc@4788 2494 return;
johnc@4788 2495 }
johnc@4788 2496
ysr@777 2497 ResourceMark rm;
ysr@777 2498 HandleMark hm;
johnc@3171 2499
johnc@3171 2500 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@3171 2501
johnc@3171 2502 // Is alive closure.
johnc@3171 2503 G1CMIsAliveClosure g1_is_alive(g1h);
johnc@3171 2504
johnc@3171 2505 // Inner scope to exclude the cleaning of the string and symbol
johnc@3171 2506 // tables from the displayed time.
johnc@3171 2507 {
brutisso@3710 2508 if (G1Log::finer()) {
johnc@3171 2509 gclog_or_tty->put(' ');
johnc@3171 2510 }
brutisso@6904 2511 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
johnc@3171 2512
johnc@3175 2513 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3171 2514
johnc@3171 2515 // See the comment in G1CollectedHeap::ref_processing_init()
johnc@3171 2516 // about how reference processing currently works in G1.
johnc@3171 2517
johnc@4555 2518 // Set the soft reference policy
johnc@3171 2519 rp->setup_policy(clear_all_soft_refs);
johnc@3171 2520 assert(_markStack.isEmpty(), "mark stack should be empty");
johnc@3171 2521
johnc@4787 2522 // Instances of the 'Keep Alive' and 'Complete GC' closures used
johnc@4787 2523 // in serial reference processing. Note these closures are also
johnc@4787 2524 // used for serially processing (by the the current thread) the
johnc@4787 2525 // JNI references during parallel reference processing.
johnc@4787 2526 //
johnc@4787 2527 // These closures do not need to synchronize with the worker
johnc@4787 2528 // threads involved in parallel reference processing as these
johnc@4787 2529 // instances are executed serially by the current thread (e.g.
johnc@4787 2530 // reference processing is not multi-threaded and is thus
johnc@4787 2531 // performed by the current thread instead of a gang worker).
johnc@4787 2532 //
johnc@4787 2533 // The gang tasks involved in parallel reference procssing create
johnc@4787 2534 // their own instances of these closures, which do their own
johnc@4787 2535 // synchronization among themselves.
johnc@4787 2536 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
johnc@4787 2537 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
johnc@4787 2538
johnc@4787 2539 // We need at least one active thread. If reference processing
johnc@4787 2540 // is not multi-threaded we use the current (VMThread) thread,
johnc@4787 2541 // otherwise we use the work gang from the G1CollectedHeap and
johnc@4787 2542 // we utilize all the worker threads we can.
johnc@4787 2543 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
johnc@4787 2544 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
johnc@4173 2545 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
johnc@3171 2546
johnc@4787 2547 // Parallel processing task executor.
johnc@3292 2548 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
johnc@3175 2549 g1h->workers(), active_workers);
johnc@4787 2550 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
johnc@4555 2551
johnc@4788 2552 // Set the concurrency level. The phase was already set prior to
johnc@4788 2553 // executing the remark task.
johnc@4788 2554 set_concurrency(active_workers);
johnc@4788 2555
johnc@4555 2556 // Set the degree of MT processing here. If the discovery was done MT,
johnc@4555 2557 // the number of threads involved during discovery could differ from
johnc@4555 2558 // the number of active workers. This is OK as long as the discovered
johnc@4555 2559 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
johnc@4555 2560 rp->set_active_mt_degree(active_workers);
johnc@4555 2561
johnc@4555 2562 // Process the weak references.
sla@5237 2563 const ReferenceProcessorStats& stats =
sla@5237 2564 rp->process_discovered_references(&g1_is_alive,
sla@5237 2565 &g1_keep_alive,
sla@5237 2566 &g1_drain_mark_stack,
sla@5237 2567 executor,
brutisso@6904 2568 g1h->gc_timer_cm(),
brutisso@6904 2569 concurrent_gc_id());
sla@5237 2570 g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
johnc@4555 2571
johnc@4555 2572 // The do_oop work routines of the keep_alive and drain_marking_stack
johnc@4555 2573 // oop closures will set the has_overflown flag if we overflow the
johnc@4555 2574 // global marking stack.
johnc@3171 2575
johnc@3171 2576 assert(_markStack.overflow() || _markStack.isEmpty(),
johnc@3171 2577 "mark stack should be empty (unless it overflowed)");
johnc@4787 2578
johnc@3171 2579 if (_markStack.overflow()) {
johnc@4555 2580 // This should have been done already when we tried to push an
johnc@3171 2581 // entry on to the global mark stack. But let's do it again.
johnc@3171 2582 set_has_overflown();
johnc@3171 2583 }
johnc@3171 2584
johnc@4555 2585 assert(rp->num_q() == active_workers, "why not");
johnc@4555 2586
johnc@4555 2587 rp->enqueue_discovered_references(executor);
johnc@3171 2588
johnc@3171 2589 rp->verify_no_references_recorded();
johnc@3175 2590 assert(!rp->discovery_enabled(), "Post condition");
johnc@2494 2591 }
johnc@2494 2592
pliden@6399 2593 if (has_overflown()) {
pliden@6399 2594 // We can not trust g1_is_alive if the marking stack overflowed
pliden@6399 2595 return;
pliden@6399 2596 }
pliden@6399 2597
stefank@6992 2598 assert(_markStack.isEmpty(), "Marking should have completed");
stefank@6992 2599
stefank@6992 2600 // Unload Klasses, String, Symbols, Code Cache, etc.
stefank@6992 2601 {
stefank@6996 2602 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
stefank@6996 2603
stefank@6996 2604 if (ClassUnloadingWithConcurrentMark) {
stefank@6996 2605 bool purged_classes;
stefank@6996 2606
stefank@6996 2607 {
stefank@6996 2608 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
stefank@6996 2609 purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
stefank@6996 2610 }
stefank@6996 2611
stefank@6996 2612 {
stefank@6996 2613 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
stefank@6996 2614 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
stefank@6996 2615 }
stefank@6996 2616 }
stefank@6996 2617
stefank@6996 2618 if (G1StringDedup::is_enabled()) {
stefank@6996 2619 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
stefank@6996 2620 G1StringDedup::unlink(&g1_is_alive);
stefank@6996 2621 }
stefank@6992 2622 }
ysr@777 2623 }
ysr@777 2624
ysr@777 2625 void ConcurrentMark::swapMarkBitMaps() {
ysr@777 2626 CMBitMapRO* temp = _prevMarkBitMap;
ysr@777 2627 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
ysr@777 2628 _nextMarkBitMap = (CMBitMap*) temp;
ysr@777 2629 }
ysr@777 2630
stefank@6992 2631 class CMObjectClosure;
stefank@6992 2632
stefank@6992 2633 // Closure for iterating over objects, currently only used for
stefank@6992 2634 // processing SATB buffers.
stefank@6992 2635 class CMObjectClosure : public ObjectClosure {
stefank@6992 2636 private:
stefank@6992 2637 CMTask* _task;
stefank@6992 2638
stefank@6992 2639 public:
stefank@6992 2640 void do_object(oop obj) {
stefank@6992 2641 _task->deal_with_reference(obj);
stefank@6992 2642 }
stefank@6992 2643
stefank@6992 2644 CMObjectClosure(CMTask* task) : _task(task) { }
stefank@6992 2645 };
stefank@6992 2646
stefank@6992 2647 class G1RemarkThreadsClosure : public ThreadClosure {
stefank@6992 2648 CMObjectClosure _cm_obj;
stefank@6992 2649 G1CMOopClosure _cm_cl;
stefank@6992 2650 MarkingCodeBlobClosure _code_cl;
stefank@6992 2651 int _thread_parity;
stefank@6992 2652 bool _is_par;
stefank@6992 2653
stefank@6992 2654 public:
stefank@6992 2655 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
stefank@6992 2656 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
stefank@6992 2657 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
stefank@6992 2658
stefank@6992 2659 void do_thread(Thread* thread) {
stefank@6992 2660 if (thread->is_Java_thread()) {
stefank@6992 2661 if (thread->claim_oops_do(_is_par, _thread_parity)) {
stefank@6992 2662 JavaThread* jt = (JavaThread*)thread;
stefank@6992 2663
stefank@6992 2664 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
stefank@6992 2665 // however the liveness of oops reachable from nmethods have very complex lifecycles:
stefank@6992 2666 // * Alive if on the stack of an executing method
stefank@6992 2667 // * Weakly reachable otherwise
stefank@6992 2668 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
stefank@6992 2669 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
stefank@6992 2670 jt->nmethods_do(&_code_cl);
stefank@6992 2671
stefank@6992 2672 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
stefank@6992 2673 }
stefank@6992 2674 } else if (thread->is_VM_thread()) {
stefank@6992 2675 if (thread->claim_oops_do(_is_par, _thread_parity)) {
stefank@6992 2676 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
stefank@6992 2677 }
stefank@6992 2678 }
stefank@6992 2679 }
stefank@6992 2680 };
stefank@6992 2681
ysr@777 2682 class CMRemarkTask: public AbstractGangTask {
ysr@777 2683 private:
johnc@4787 2684 ConcurrentMark* _cm;
johnc@4787 2685 bool _is_serial;
ysr@777 2686 public:
jmasa@3357 2687 void work(uint worker_id) {
ysr@777 2688 // Since all available tasks are actually started, we should
ysr@777 2689 // only proceed if we're supposed to be actived.
jmasa@3357 2690 if (worker_id < _cm->active_tasks()) {
jmasa@3357 2691 CMTask* task = _cm->task(worker_id);
ysr@777 2692 task->record_start_time();
stefank@6992 2693 {
stefank@6992 2694 ResourceMark rm;
stefank@6992 2695 HandleMark hm;
stefank@6992 2696
stefank@6992 2697 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
stefank@6992 2698 Threads::threads_do(&threads_f);
stefank@6992 2699 }
stefank@6992 2700
ysr@777 2701 do {
johnc@2494 2702 task->do_marking_step(1000000000.0 /* something very large */,
johnc@4787 2703 true /* do_termination */,
johnc@4787 2704 _is_serial);
ysr@777 2705 } while (task->has_aborted() && !_cm->has_overflown());
ysr@777 2706 // If we overflow, then we do not want to restart. We instead
ysr@777 2707 // want to abort remark and do concurrent marking again.
ysr@777 2708 task->record_end_time();
ysr@777 2709 }
ysr@777 2710 }
ysr@777 2711
johnc@4787 2712 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
johnc@4787 2713 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
johnc@3338 2714 _cm->terminator()->reset_for_reuse(active_workers);
jmasa@3294 2715 }
ysr@777 2716 };
ysr@777 2717
ysr@777 2718 void ConcurrentMark::checkpointRootsFinalWork() {
ysr@777 2719 ResourceMark rm;
ysr@777 2720 HandleMark hm;
ysr@777 2721 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 2722
stefank@6992 2723 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
stefank@6992 2724
ysr@777 2725 g1h->ensure_parsability(false);
ysr@777 2726
jmasa@2188 2727 if (G1CollectedHeap::use_parallel_gc_threads()) {
jrose@1424 2728 G1CollectedHeap::StrongRootsScope srs(g1h);
jmasa@3294 2729 // this is remark, so we'll use up all active threads
jmasa@3357 2730 uint active_workers = g1h->workers()->active_workers();
jmasa@3294 2731 if (active_workers == 0) {
jmasa@3294 2732 assert(active_workers > 0, "Should have been set earlier");
jmasa@3357 2733 active_workers = (uint) ParallelGCThreads;
jmasa@3294 2734 g1h->workers()->set_active_workers(active_workers);
jmasa@3294 2735 }
johnc@4788 2736 set_concurrency_and_phase(active_workers, false /* concurrent */);
jmasa@3294 2737 // Leave _parallel_marking_threads at it's
jmasa@3294 2738 // value originally calculated in the ConcurrentMark
jmasa@3294 2739 // constructor and pass values of the active workers
jmasa@3294 2740 // through the gang in the task.
ysr@777 2741
johnc@4787 2742 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
johnc@4787 2743 // We will start all available threads, even if we decide that the
johnc@4787 2744 // active_workers will be fewer. The extra ones will just bail out
johnc@4787 2745 // immediately.
jmasa@3294 2746 g1h->set_par_threads(active_workers);
ysr@777 2747 g1h->workers()->run_task(&remarkTask);
ysr@777 2748 g1h->set_par_threads(0);
ysr@777 2749 } else {
jrose@1424 2750 G1CollectedHeap::StrongRootsScope srs(g1h);
jmasa@3357 2751 uint active_workers = 1;
johnc@4788 2752 set_concurrency_and_phase(active_workers, false /* concurrent */);
ysr@777 2753
johnc@4787 2754 // Note - if there's no work gang then the VMThread will be
johnc@4787 2755 // the thread to execute the remark - serially. We have
johnc@4787 2756 // to pass true for the is_serial parameter so that
johnc@4787 2757 // CMTask::do_marking_step() doesn't enter the sync
johnc@4787 2758 // barriers in the event of an overflow. Doing so will
johnc@4787 2759 // cause an assert that the current thread is not a
johnc@4787 2760 // concurrent GC thread.
johnc@4787 2761 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
ysr@777 2762 remarkTask.work(0);
ysr@777 2763 }
tonyp@1458 2764 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
johnc@4789 2765 guarantee(has_overflown() ||
johnc@4789 2766 satb_mq_set.completed_buffers_num() == 0,
johnc@4789 2767 err_msg("Invariant: has_overflown = %s, num buffers = %d",
johnc@4789 2768 BOOL_TO_STR(has_overflown()),
johnc@4789 2769 satb_mq_set.completed_buffers_num()));
ysr@777 2770
ysr@777 2771 print_stats();
ysr@777 2772 }
ysr@777 2773
tonyp@1479 2774 #ifndef PRODUCT
tonyp@1479 2775
tonyp@1823 2776 class PrintReachableOopClosure: public OopClosure {
ysr@777 2777 private:
ysr@777 2778 G1CollectedHeap* _g1h;
ysr@777 2779 outputStream* _out;
johnc@2969 2780 VerifyOption _vo;
tonyp@1823 2781 bool _all;
ysr@777 2782
ysr@777 2783 public:
johnc@2969 2784 PrintReachableOopClosure(outputStream* out,
johnc@2969 2785 VerifyOption vo,
tonyp@1823 2786 bool all) :
tonyp@1479 2787 _g1h(G1CollectedHeap::heap()),
johnc@2969 2788 _out(out), _vo(vo), _all(all) { }
ysr@777 2789
ysr@1280 2790 void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 2791 void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 2792
ysr@1280 2793 template <class T> void do_oop_work(T* p) {
ysr@1280 2794 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 2795 const char* str = NULL;
ysr@777 2796 const char* str2 = "";
ysr@777 2797
tonyp@1823 2798 if (obj == NULL) {
tonyp@1823 2799 str = "";
tonyp@1823 2800 } else if (!_g1h->is_in_g1_reserved(obj)) {
tonyp@1823 2801 str = " O";
tonyp@1823 2802 } else {
ysr@777 2803 HeapRegion* hr = _g1h->heap_region_containing(obj);
tonyp@3957 2804 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
tonyp@3957 2805 bool marked = _g1h->is_marked(obj, _vo);
tonyp@1479 2806
tonyp@1479 2807 if (over_tams) {
tonyp@1823 2808 str = " >";
tonyp@1823 2809 if (marked) {
ysr@777 2810 str2 = " AND MARKED";
tonyp@1479 2811 }
tonyp@1823 2812 } else if (marked) {
tonyp@1823 2813 str = " M";
tonyp@1479 2814 } else {
tonyp@1823 2815 str = " NOT";
tonyp@1479 2816 }
ysr@777 2817 }
ysr@777 2818
tonyp@1823 2819 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
drchase@6680 2820 p2i(p), p2i((void*) obj), str, str2);
ysr@777 2821 }
ysr@777 2822 };
ysr@777 2823
tonyp@1823 2824 class PrintReachableObjectClosure : public ObjectClosure {
ysr@777 2825 private:
johnc@2969 2826 G1CollectedHeap* _g1h;
johnc@2969 2827 outputStream* _out;
johnc@2969 2828 VerifyOption _vo;
johnc@2969 2829 bool _all;
johnc@2969 2830 HeapRegion* _hr;
ysr@777 2831
ysr@777 2832 public:
johnc@2969 2833 PrintReachableObjectClosure(outputStream* out,
johnc@2969 2834 VerifyOption vo,
tonyp@1823 2835 bool all,
tonyp@1823 2836 HeapRegion* hr) :
johnc@2969 2837 _g1h(G1CollectedHeap::heap()),
johnc@2969 2838 _out(out), _vo(vo), _all(all), _hr(hr) { }
tonyp@1823 2839
tonyp@1823 2840 void do_object(oop o) {
tonyp@3957 2841 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
tonyp@3957 2842 bool marked = _g1h->is_marked(o, _vo);
tonyp@1823 2843 bool print_it = _all || over_tams || marked;
tonyp@1823 2844
tonyp@1823 2845 if (print_it) {
tonyp@1823 2846 _out->print_cr(" "PTR_FORMAT"%s",
drchase@6680 2847 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
johnc@2969 2848 PrintReachableOopClosure oopCl(_out, _vo, _all);
coleenp@4037 2849 o->oop_iterate_no_header(&oopCl);
tonyp@1823 2850 }
ysr@777 2851 }
ysr@777 2852 };
ysr@777 2853
tonyp@1823 2854 class PrintReachableRegionClosure : public HeapRegionClosure {
ysr@777 2855 private:
tonyp@3957 2856 G1CollectedHeap* _g1h;
tonyp@3957 2857 outputStream* _out;
tonyp@3957 2858 VerifyOption _vo;
tonyp@3957 2859 bool _all;
ysr@777 2860
ysr@777 2861 public:
ysr@777 2862 bool doHeapRegion(HeapRegion* hr) {
ysr@777 2863 HeapWord* b = hr->bottom();
ysr@777 2864 HeapWord* e = hr->end();
ysr@777 2865 HeapWord* t = hr->top();
tonyp@3957 2866 HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
ysr@777 2867 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
drchase@6680 2868 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
tonyp@1823 2869 _out->cr();
tonyp@1823 2870
tonyp@1823 2871 HeapWord* from = b;
tonyp@1823 2872 HeapWord* to = t;
tonyp@1823 2873
tonyp@1823 2874 if (to > from) {
drchase@6680 2875 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
tonyp@1823 2876 _out->cr();
johnc@2969 2877 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
tonyp@1823 2878 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
tonyp@1823 2879 _out->cr();
tonyp@1823 2880 }
ysr@777 2881
ysr@777 2882 return false;
ysr@777 2883 }
ysr@777 2884
johnc@2969 2885 PrintReachableRegionClosure(outputStream* out,
johnc@2969 2886 VerifyOption vo,
tonyp@1823 2887 bool all) :
tonyp@3957 2888 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
ysr@777 2889 };
ysr@777 2890
tonyp@1823 2891 void ConcurrentMark::print_reachable(const char* str,
johnc@2969 2892 VerifyOption vo,
tonyp@1823 2893 bool all) {
tonyp@1823 2894 gclog_or_tty->cr();
tonyp@1823 2895 gclog_or_tty->print_cr("== Doing heap dump... ");
tonyp@1479 2896
tonyp@1479 2897 if (G1PrintReachableBaseFile == NULL) {
tonyp@1479 2898 gclog_or_tty->print_cr(" #### error: no base file defined");
tonyp@1479 2899 return;
tonyp@1479 2900 }
tonyp@1479 2901
tonyp@1479 2902 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
tonyp@1479 2903 (JVM_MAXPATHLEN - 1)) {
tonyp@1479 2904 gclog_or_tty->print_cr(" #### error: file name too long");
tonyp@1479 2905 return;
tonyp@1479 2906 }
tonyp@1479 2907
tonyp@1479 2908 char file_name[JVM_MAXPATHLEN];
tonyp@1479 2909 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
tonyp@1479 2910 gclog_or_tty->print_cr(" dumping to file %s", file_name);
tonyp@1479 2911
tonyp@1479 2912 fileStream fout(file_name);
tonyp@1479 2913 if (!fout.is_open()) {
tonyp@1479 2914 gclog_or_tty->print_cr(" #### error: could not open file");
tonyp@1479 2915 return;
tonyp@1479 2916 }
tonyp@1479 2917
tonyp@1479 2918 outputStream* out = &fout;
tonyp@3957 2919 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
tonyp@1479 2920 out->cr();
tonyp@1479 2921
tonyp@1823 2922 out->print_cr("--- ITERATING OVER REGIONS");
tonyp@1479 2923 out->cr();
johnc@2969 2924 PrintReachableRegionClosure rcl(out, vo, all);
ysr@777 2925 _g1h->heap_region_iterate(&rcl);
tonyp@1479 2926 out->cr();
tonyp@1479 2927
tonyp@1479 2928 gclog_or_tty->print_cr(" done");
tonyp@1823 2929 gclog_or_tty->flush();
ysr@777 2930 }
ysr@777 2931
tonyp@1479 2932 #endif // PRODUCT
tonyp@1479 2933
tonyp@3416 2934 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
ysr@777 2935 // Note we are overriding the read-only view of the prev map here, via
ysr@777 2936 // the cast.
ysr@777 2937 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
tonyp@3416 2938 }
tonyp@3416 2939
tonyp@3416 2940 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
ysr@777 2941 _nextMarkBitMap->clearRange(mr);
ysr@777 2942 }
ysr@777 2943
ysr@777 2944 HeapRegion*
johnc@4173 2945 ConcurrentMark::claim_region(uint worker_id) {
ysr@777 2946 // "checkpoint" the finger
ysr@777 2947 HeapWord* finger = _finger;
ysr@777 2948
ysr@777 2949 // _heap_end will not change underneath our feet; it only changes at
ysr@777 2950 // yield points.
ysr@777 2951 while (finger < _heap_end) {
tonyp@1458 2952 assert(_g1h->is_in_g1_reserved(finger), "invariant");
ysr@777 2953
tonyp@2968 2954 // Note on how this code handles humongous regions. In the
tonyp@2968 2955 // normal case the finger will reach the start of a "starts
tonyp@2968 2956 // humongous" (SH) region. Its end will either be the end of the
tonyp@2968 2957 // last "continues humongous" (CH) region in the sequence, or the
tonyp@2968 2958 // standard end of the SH region (if the SH is the only region in
tonyp@2968 2959 // the sequence). That way claim_region() will skip over the CH
tonyp@2968 2960 // regions. However, there is a subtle race between a CM thread
tonyp@2968 2961 // executing this method and a mutator thread doing a humongous
tonyp@2968 2962 // object allocation. The two are not mutually exclusive as the CM
tonyp@2968 2963 // thread does not need to hold the Heap_lock when it gets
tonyp@2968 2964 // here. So there is a chance that claim_region() will come across
tonyp@2968 2965 // a free region that's in the progress of becoming a SH or a CH
tonyp@2968 2966 // region. In the former case, it will either
tonyp@2968 2967 // a) Miss the update to the region's end, in which case it will
tonyp@2968 2968 // visit every subsequent CH region, will find their bitmaps
tonyp@2968 2969 // empty, and do nothing, or
tonyp@2968 2970 // b) Will observe the update of the region's end (in which case
tonyp@2968 2971 // it will skip the subsequent CH regions).
tonyp@2968 2972 // If it comes across a region that suddenly becomes CH, the
tonyp@2968 2973 // scenario will be similar to b). So, the race between
tonyp@2968 2974 // claim_region() and a humongous object allocation might force us
tonyp@2968 2975 // to do a bit of unnecessary work (due to some unnecessary bitmap
tonyp@2968 2976 // iterations) but it should not introduce and correctness issues.
tschatzl@7051 2977 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
tschatzl@7051 2978
tschatzl@7051 2979 // Above heap_region_containing_raw may return NULL as we always scan claim
tschatzl@7051 2980 // until the end of the heap. In this case, just jump to the next region.
tschatzl@7051 2981 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
tonyp@2968 2982
tonyp@2968 2983 // Is the gap between reading the finger and doing the CAS too long?
tonyp@2968 2984 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
tschatzl@7051 2985 if (res == finger && curr_region != NULL) {
ysr@777 2986 // we succeeded
tschatzl@7051 2987 HeapWord* bottom = curr_region->bottom();
tschatzl@7051 2988 HeapWord* limit = curr_region->next_top_at_mark_start();
tschatzl@7051 2989
tschatzl@7051 2990 if (verbose_low()) {
tschatzl@7051 2991 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
tschatzl@7051 2992 "["PTR_FORMAT", "PTR_FORMAT"), "
tschatzl@7051 2993 "limit = "PTR_FORMAT,
tschatzl@7051 2994 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
tschatzl@7051 2995 }
ysr@777 2996
ysr@777 2997 // notice that _finger == end cannot be guaranteed here since,
ysr@777 2998 // someone else might have moved the finger even further
tonyp@1458 2999 assert(_finger >= end, "the finger should have moved forward");
ysr@777 3000
tonyp@2973 3001 if (verbose_low()) {
johnc@4173 3002 gclog_or_tty->print_cr("[%u] we were successful with region = "
drchase@6680 3003 PTR_FORMAT, worker_id, p2i(curr_region));
tonyp@2973 3004 }
ysr@777 3005
ysr@777 3006 if (limit > bottom) {
tonyp@2973 3007 if (verbose_low()) {
johnc@4173 3008 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
drchase@6680 3009 "returning it ", worker_id, p2i(curr_region));
tonyp@2973 3010 }
ysr@777 3011 return curr_region;
ysr@777 3012 } else {
tonyp@1458 3013 assert(limit == bottom,
tonyp@1458 3014 "the region limit should be at bottom");
tonyp@2973 3015 if (verbose_low()) {
johnc@4173 3016 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
drchase@6680 3017 "returning NULL", worker_id, p2i(curr_region));
tonyp@2973 3018 }
ysr@777 3019 // we return NULL and the caller should try calling
ysr@777 3020 // claim_region() again.
ysr@777 3021 return NULL;
ysr@777 3022 }
ysr@777 3023 } else {
tonyp@1458 3024 assert(_finger > finger, "the finger should have moved forward");
tonyp@2973 3025 if (verbose_low()) {
tschatzl@7051 3026 if (curr_region == NULL) {
tschatzl@7051 3027 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
tschatzl@7051 3028 "global finger = "PTR_FORMAT", "
tschatzl@7051 3029 "our finger = "PTR_FORMAT,
tschatzl@7051 3030 worker_id, p2i(_finger), p2i(finger));
tschatzl@7051 3031 } else {
tschatzl@7051 3032 gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
tschatzl@7051 3033 "global finger = "PTR_FORMAT", "
tschatzl@7051 3034 "our finger = "PTR_FORMAT,
tschatzl@7051 3035 worker_id, p2i(_finger), p2i(finger));
tschatzl@7051 3036 }
tonyp@2973 3037 }
ysr@777 3038
ysr@777 3039 // read it again
ysr@777 3040 finger = _finger;
ysr@777 3041 }
ysr@777 3042 }
ysr@777 3043
ysr@777 3044 return NULL;
ysr@777 3045 }
ysr@777 3046
tonyp@3416 3047 #ifndef PRODUCT
tonyp@3416 3048 enum VerifyNoCSetOopsPhase {
tonyp@3416 3049 VerifyNoCSetOopsStack,
tonyp@3416 3050 VerifyNoCSetOopsQueues,
tonyp@3416 3051 VerifyNoCSetOopsSATBCompleted,
tonyp@3416 3052 VerifyNoCSetOopsSATBThread
tonyp@3416 3053 };
tonyp@3416 3054
tonyp@3416 3055 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
tonyp@3416 3056 private:
tonyp@3416 3057 G1CollectedHeap* _g1h;
tonyp@3416 3058 VerifyNoCSetOopsPhase _phase;
tonyp@3416 3059 int _info;
tonyp@3416 3060
tonyp@3416 3061 const char* phase_str() {
tonyp@3416 3062 switch (_phase) {
tonyp@3416 3063 case VerifyNoCSetOopsStack: return "Stack";
tonyp@3416 3064 case VerifyNoCSetOopsQueues: return "Queue";
tonyp@3416 3065 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
tonyp@3416 3066 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
tonyp@3416 3067 default: ShouldNotReachHere();
tonyp@3416 3068 }
tonyp@3416 3069 return NULL;
ysr@777 3070 }
johnc@2190 3071
tonyp@3416 3072 void do_object_work(oop obj) {
tonyp@3416 3073 guarantee(!_g1h->obj_in_cs(obj),
tonyp@3416 3074 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
drchase@6680 3075 p2i((void*) obj), phase_str(), _info));
johnc@2190 3076 }
johnc@2190 3077
tonyp@3416 3078 public:
tonyp@3416 3079 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
tonyp@3416 3080
tonyp@3416 3081 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
tonyp@3416 3082 _phase = phase;
tonyp@3416 3083 _info = info;
tonyp@3416 3084 }
tonyp@3416 3085
tonyp@3416 3086 virtual void do_oop(oop* p) {
tonyp@3416 3087 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@3416 3088 do_object_work(obj);
tonyp@3416 3089 }
tonyp@3416 3090
tonyp@3416 3091 virtual void do_oop(narrowOop* p) {
tonyp@3416 3092 // We should not come across narrow oops while scanning marking
tonyp@3416 3093 // stacks and SATB buffers.
tonyp@3416 3094 ShouldNotReachHere();
tonyp@3416 3095 }
tonyp@3416 3096
tonyp@3416 3097 virtual void do_object(oop obj) {
tonyp@3416 3098 do_object_work(obj);
tonyp@3416 3099 }
tonyp@3416 3100 };
tonyp@3416 3101
tonyp@3416 3102 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
tonyp@3416 3103 bool verify_enqueued_buffers,
tonyp@3416 3104 bool verify_thread_buffers,
tonyp@3416 3105 bool verify_fingers) {
tonyp@3416 3106 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3416 3107 if (!G1CollectedHeap::heap()->mark_in_progress()) {
tonyp@3416 3108 return;
tonyp@3416 3109 }
tonyp@3416 3110
tonyp@3416 3111 VerifyNoCSetOopsClosure cl;
tonyp@3416 3112
tonyp@3416 3113 if (verify_stacks) {
tonyp@3416 3114 // Verify entries on the global mark stack
tonyp@3416 3115 cl.set_phase(VerifyNoCSetOopsStack);
tonyp@3416 3116 _markStack.oops_do(&cl);
tonyp@3416 3117
tonyp@3416 3118 // Verify entries on the task queues
johnc@4173 3119 for (uint i = 0; i < _max_worker_id; i += 1) {
tonyp@3416 3120 cl.set_phase(VerifyNoCSetOopsQueues, i);
johnc@4333 3121 CMTaskQueue* queue = _task_queues->queue(i);
tonyp@3416 3122 queue->oops_do(&cl);
tonyp@3416 3123 }
tonyp@3416 3124 }
tonyp@3416 3125
tonyp@3416 3126 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@3416 3127
tonyp@3416 3128 // Verify entries on the enqueued SATB buffers
tonyp@3416 3129 if (verify_enqueued_buffers) {
tonyp@3416 3130 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
tonyp@3416 3131 satb_qs.iterate_completed_buffers_read_only(&cl);
tonyp@3416 3132 }
tonyp@3416 3133
tonyp@3416 3134 // Verify entries on the per-thread SATB buffers
tonyp@3416 3135 if (verify_thread_buffers) {
tonyp@3416 3136 cl.set_phase(VerifyNoCSetOopsSATBThread);
tonyp@3416 3137 satb_qs.iterate_thread_buffers_read_only(&cl);
tonyp@3416 3138 }
tonyp@3416 3139
tonyp@3416 3140 if (verify_fingers) {
tonyp@3416 3141 // Verify the global finger
tonyp@3416 3142 HeapWord* global_finger = finger();
tonyp@3416 3143 if (global_finger != NULL && global_finger < _heap_end) {
tonyp@3416 3144 // The global finger always points to a heap region boundary. We
tonyp@3416 3145 // use heap_region_containing_raw() to get the containing region
tonyp@3416 3146 // given that the global finger could be pointing to a free region
tonyp@3416 3147 // which subsequently becomes continues humongous. If that
tonyp@3416 3148 // happens, heap_region_containing() will return the bottom of the
tonyp@3416 3149 // corresponding starts humongous region and the check below will
tonyp@3416 3150 // not hold any more.
tschatzl@7051 3151 // Since we always iterate over all regions, we might get a NULL HeapRegion
tschatzl@7051 3152 // here.
tonyp@3416 3153 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
tschatzl@7051 3154 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
tonyp@3416 3155 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
drchase@6680 3156 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
tonyp@3416 3157 }
tonyp@3416 3158
tonyp@3416 3159 // Verify the task fingers
johnc@4173 3160 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
tonyp@3416 3161 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
tonyp@3416 3162 CMTask* task = _tasks[i];
tonyp@3416 3163 HeapWord* task_finger = task->finger();
tonyp@3416 3164 if (task_finger != NULL && task_finger < _heap_end) {
tonyp@3416 3165 // See above note on the global finger verification.
tonyp@3416 3166 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
tschatzl@7051 3167 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
tonyp@3416 3168 !task_hr->in_collection_set(),
tonyp@3416 3169 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
drchase@6680 3170 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
tonyp@3416 3171 }
tonyp@3416 3172 }
tonyp@3416 3173 }
ysr@777 3174 }
tonyp@3416 3175 #endif // PRODUCT
ysr@777 3176
johnc@3463 3177 // Aggregate the counting data that was constructed concurrently
johnc@3463 3178 // with marking.
johnc@3463 3179 class AggregateCountDataHRClosure: public HeapRegionClosure {
johnc@4123 3180 G1CollectedHeap* _g1h;
johnc@3463 3181 ConcurrentMark* _cm;
johnc@4123 3182 CardTableModRefBS* _ct_bs;
johnc@3463 3183 BitMap* _cm_card_bm;
johnc@4173 3184 uint _max_worker_id;
johnc@3463 3185
johnc@3463 3186 public:
johnc@4123 3187 AggregateCountDataHRClosure(G1CollectedHeap* g1h,
johnc@3463 3188 BitMap* cm_card_bm,
johnc@4173 3189 uint max_worker_id) :
johnc@4123 3190 _g1h(g1h), _cm(g1h->concurrent_mark()),
johnc@4123 3191 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
johnc@4173 3192 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
johnc@3463 3193
johnc@3463 3194 bool doHeapRegion(HeapRegion* hr) {
johnc@3463 3195 if (hr->continuesHumongous()) {
johnc@3463 3196 // We will ignore these here and process them when their
johnc@3463 3197 // associated "starts humongous" region is processed.
johnc@3463 3198 // Note that we cannot rely on their associated
johnc@3463 3199 // "starts humongous" region to have their bit set to 1
johnc@3463 3200 // since, due to the region chunking in the parallel region
johnc@3463 3201 // iteration, a "continues humongous" region might be visited
johnc@3463 3202 // before its associated "starts humongous".
johnc@3463 3203 return false;
johnc@3463 3204 }
johnc@3463 3205
johnc@3463 3206 HeapWord* start = hr->bottom();
johnc@3463 3207 HeapWord* limit = hr->next_top_at_mark_start();
johnc@3463 3208 HeapWord* end = hr->end();
johnc@3463 3209
johnc@3463 3210 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
johnc@3463 3211 err_msg("Preconditions not met - "
johnc@3463 3212 "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
johnc@3463 3213 "top: "PTR_FORMAT", end: "PTR_FORMAT,
drchase@6680 3214 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
johnc@3463 3215
johnc@3463 3216 assert(hr->next_marked_bytes() == 0, "Precondition");
johnc@3463 3217
johnc@3463 3218 if (start == limit) {
johnc@3463 3219 // NTAMS of this region has not been set so nothing to do.
johnc@3463 3220 return false;
johnc@3463 3221 }
johnc@3463 3222
johnc@4123 3223 // 'start' should be in the heap.
johnc@4123 3224 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
johnc@4123 3225 // 'end' *may* be just beyone the end of the heap (if hr is the last region)
johnc@4123 3226 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
johnc@3463 3227
johnc@3463 3228 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
johnc@3463 3229 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
johnc@3463 3230 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
johnc@3463 3231
johnc@4123 3232 // If ntams is not card aligned then we bump card bitmap index
johnc@4123 3233 // for limit so that we get the all the cards spanned by
johnc@4123 3234 // the object ending at ntams.
johnc@4123 3235 // Note: if this is the last region in the heap then ntams
johnc@4123 3236 // could be actually just beyond the end of the the heap;
johnc@4123 3237 // limit_idx will then correspond to a (non-existent) card
johnc@4123 3238 // that is also outside the heap.
johnc@4123 3239 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
johnc@3463 3240 limit_idx += 1;
johnc@3463 3241 }
johnc@3463 3242
johnc@3463 3243 assert(limit_idx <= end_idx, "or else use atomics");
johnc@3463 3244
johnc@3463 3245 // Aggregate the "stripe" in the count data associated with hr.
tschatzl@7091 3246 uint hrm_index = hr->hrm_index();
johnc@3463 3247 size_t marked_bytes = 0;
johnc@3463 3248
johnc@4173 3249 for (uint i = 0; i < _max_worker_id; i += 1) {
johnc@3463 3250 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
johnc@3463 3251 BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
johnc@3463 3252
johnc@3463 3253 // Fetch the marked_bytes in this region for task i and
johnc@3463 3254 // add it to the running total for this region.
tschatzl@7091 3255 marked_bytes += marked_bytes_array[hrm_index];
johnc@3463 3256
johnc@4173 3257 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
johnc@3463 3258 // into the global card bitmap.
johnc@3463 3259 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
johnc@3463 3260
johnc@3463 3261 while (scan_idx < limit_idx) {
johnc@3463 3262 assert(task_card_bm->at(scan_idx) == true, "should be");
johnc@3463 3263 _cm_card_bm->set_bit(scan_idx);
johnc@3463 3264 assert(_cm_card_bm->at(scan_idx) == true, "should be");
johnc@3463 3265
johnc@3463 3266 // BitMap::get_next_one_offset() can handle the case when
johnc@3463 3267 // its left_offset parameter is greater than its right_offset
johnc@4123 3268 // parameter. It does, however, have an early exit if
johnc@3463 3269 // left_offset == right_offset. So let's limit the value
johnc@3463 3270 // passed in for left offset here.
johnc@3463 3271 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
johnc@3463 3272 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
johnc@3463 3273 }
johnc@3463 3274 }
johnc@3463 3275
johnc@3463 3276 // Update the marked bytes for this region.
johnc@3463 3277 hr->add_to_marked_bytes(marked_bytes);
johnc@3463 3278
johnc@3463 3279 // Next heap region
johnc@3463 3280 return false;
johnc@3463 3281 }
johnc@3463 3282 };
johnc@3463 3283
johnc@3463 3284 class G1AggregateCountDataTask: public AbstractGangTask {
johnc@3463 3285 protected:
johnc@3463 3286 G1CollectedHeap* _g1h;
johnc@3463 3287 ConcurrentMark* _cm;
johnc@3463 3288 BitMap* _cm_card_bm;
johnc@4173 3289 uint _max_worker_id;
johnc@3463 3290 int _active_workers;
johnc@3463 3291
johnc@3463 3292 public:
johnc@3463 3293 G1AggregateCountDataTask(G1CollectedHeap* g1h,
johnc@3463 3294 ConcurrentMark* cm,
johnc@3463 3295 BitMap* cm_card_bm,
johnc@4173 3296 uint max_worker_id,
johnc@3463 3297 int n_workers) :
johnc@3463 3298 AbstractGangTask("Count Aggregation"),
johnc@3463 3299 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
johnc@4173 3300 _max_worker_id(max_worker_id),
johnc@3463 3301 _active_workers(n_workers) { }
johnc@3463 3302
johnc@3463 3303 void work(uint worker_id) {
johnc@4173 3304 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
johnc@3463 3305
johnc@3463 3306 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 3307 _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
johnc@3463 3308 _active_workers,
johnc@3463 3309 HeapRegion::AggregateCountClaimValue);
johnc@3463 3310 } else {
johnc@3463 3311 _g1h->heap_region_iterate(&cl);
johnc@3463 3312 }
johnc@3463 3313 }
johnc@3463 3314 };
johnc@3463 3315
johnc@3463 3316
johnc@3463 3317 void ConcurrentMark::aggregate_count_data() {
johnc@3463 3318 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
johnc@3463 3319 _g1h->workers()->active_workers() :
johnc@3463 3320 1);
johnc@3463 3321
johnc@3463 3322 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
johnc@4173 3323 _max_worker_id, n_workers);
johnc@3463 3324
johnc@3463 3325 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3463 3326 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
johnc@3463 3327 "sanity check");
johnc@3463 3328 _g1h->set_par_threads(n_workers);
johnc@3463 3329 _g1h->workers()->run_task(&g1_par_agg_task);
johnc@3463 3330 _g1h->set_par_threads(0);
johnc@3463 3331
johnc@3463 3332 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
johnc@3463 3333 "sanity check");
johnc@3463 3334 _g1h->reset_heap_region_claim_values();
johnc@3463 3335 } else {
johnc@3463 3336 g1_par_agg_task.work(0);
johnc@3463 3337 }
jcoomes@7159 3338 _g1h->allocation_context_stats().update_at_remark();
johnc@3463 3339 }
johnc@3463 3340
johnc@3463 3341 // Clear the per-worker arrays used to store the per-region counting data
johnc@3463 3342 void ConcurrentMark::clear_all_count_data() {
johnc@3463 3343 // Clear the global card bitmap - it will be filled during
johnc@3463 3344 // liveness count aggregation (during remark) and the
johnc@3463 3345 // final counting task.
johnc@3463 3346 _card_bm.clear();
johnc@3463 3347
johnc@3463 3348 // Clear the global region bitmap - it will be filled as part
johnc@3463 3349 // of the final counting task.
johnc@3463 3350 _region_bm.clear();
johnc@3463 3351
tonyp@3713 3352 uint max_regions = _g1h->max_regions();
johnc@4173 3353 assert(_max_worker_id > 0, "uninitialized");
johnc@4173 3354
johnc@4173 3355 for (uint i = 0; i < _max_worker_id; i += 1) {
johnc@3463 3356 BitMap* task_card_bm = count_card_bitmap_for(i);
johnc@3463 3357 size_t* marked_bytes_array = count_marked_bytes_array_for(i);
johnc@3463 3358
johnc@3463 3359 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
johnc@3463 3360 assert(marked_bytes_array != NULL, "uninitialized");
johnc@3463 3361
tonyp@3713 3362 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
johnc@3463 3363 task_card_bm->clear();
johnc@3463 3364 }
johnc@3463 3365 }
johnc@3463 3366
ysr@777 3367 void ConcurrentMark::print_stats() {
ysr@777 3368 if (verbose_stats()) {
ysr@777 3369 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3370 for (size_t i = 0; i < _active_tasks; ++i) {
ysr@777 3371 _tasks[i]->print_stats();
ysr@777 3372 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3373 }
ysr@777 3374 }
ysr@777 3375 }
ysr@777 3376
ysr@777 3377 // abandon current marking iteration due to a Full GC
ysr@777 3378 void ConcurrentMark::abort() {
tschatzl@7016 3379 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
tschatzl@7016 3380 // concurrent bitmap clearing.
ysr@777 3381 _nextMarkBitMap->clearAll();
brutisso@7005 3382
brutisso@7005 3383 // Note we cannot clear the previous marking bitmap here
brutisso@7005 3384 // since VerifyDuringGC verifies the objects marked during
brutisso@7005 3385 // a full GC against the previous bitmap.
brutisso@7005 3386
johnc@3463 3387 // Clear the liveness counting data
johnc@3463 3388 clear_all_count_data();
ysr@777 3389 // Empty mark stack
johnc@4386 3390 reset_marking_state();
johnc@4173 3391 for (uint i = 0; i < _max_worker_id; ++i) {
ysr@777 3392 _tasks[i]->clear_region_fields();
johnc@2190 3393 }
pliden@6692 3394 _first_overflow_barrier_sync.abort();
pliden@6692 3395 _second_overflow_barrier_sync.abort();
brutisso@6904 3396 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
brutisso@6904 3397 if (!gc_id.is_undefined()) {
brutisso@6904 3398 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
brutisso@6904 3399 // to detect that it was aborted. Only keep track of the first GC id that we aborted.
brutisso@6904 3400 _aborted_gc_id = gc_id;
brutisso@6904 3401 }
ysr@777 3402 _has_aborted = true;
ysr@777 3403
ysr@777 3404 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3405 satb_mq_set.abandon_partial_marking();
tonyp@1752 3406 // This can be called either during or outside marking, we'll read
tonyp@1752 3407 // the expected_active value from the SATB queue set.
tonyp@1752 3408 satb_mq_set.set_active_all_threads(
tonyp@1752 3409 false, /* new active value */
tonyp@1752 3410 satb_mq_set.is_active() /* expected_active */);
sla@5237 3411
sla@5237 3412 _g1h->trace_heap_after_concurrent_cycle();
sla@5237 3413 _g1h->register_concurrent_cycle_end();
ysr@777 3414 }
ysr@777 3415
brutisso@6904 3416 const GCId& ConcurrentMark::concurrent_gc_id() {
brutisso@6904 3417 if (has_aborted()) {
brutisso@6904 3418 return _aborted_gc_id;
brutisso@6904 3419 }
brutisso@6904 3420 return _g1h->gc_tracer_cm()->gc_id();
brutisso@6904 3421 }
brutisso@6904 3422
ysr@777 3423 static void print_ms_time_info(const char* prefix, const char* name,
ysr@777 3424 NumberSeq& ns) {
ysr@777 3425 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3426 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
ysr@777 3427 if (ns.num() > 0) {
ysr@777 3428 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 3429 prefix, ns.sd(), ns.maximum());
ysr@777 3430 }
ysr@777 3431 }
ysr@777 3432
ysr@777 3433 void ConcurrentMark::print_summary_info() {
ysr@777 3434 gclog_or_tty->print_cr(" Concurrent marking:");
ysr@777 3435 print_ms_time_info(" ", "init marks", _init_times);
ysr@777 3436 print_ms_time_info(" ", "remarks", _remark_times);
ysr@777 3437 {
ysr@777 3438 print_ms_time_info(" ", "final marks", _remark_mark_times);
ysr@777 3439 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
ysr@777 3440
ysr@777 3441 }
ysr@777 3442 print_ms_time_info(" ", "cleanups", _cleanup_times);
ysr@777 3443 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3444 _total_counting_time,
ysr@777 3445 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
ysr@777 3446 (double)_cleanup_times.num()
ysr@777 3447 : 0.0));
ysr@777 3448 if (G1ScrubRemSets) {
ysr@777 3449 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3450 _total_rs_scrub_time,
ysr@777 3451 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
ysr@777 3452 (double)_cleanup_times.num()
ysr@777 3453 : 0.0));
ysr@777 3454 }
ysr@777 3455 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
ysr@777 3456 (_init_times.sum() + _remark_times.sum() +
ysr@777 3457 _cleanup_times.sum())/1000.0);
ysr@777 3458 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
johnc@3463 3459 "(%8.2f s marking).",
ysr@777 3460 cmThread()->vtime_accum(),
johnc@3463 3461 cmThread()->vtime_mark_accum());
ysr@777 3462 }
ysr@777 3463
tonyp@1454 3464 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
johnc@4549 3465 if (use_parallel_marking_threads()) {
johnc@4549 3466 _parallel_workers->print_worker_threads_on(st);
johnc@4549 3467 }
tonyp@1454 3468 }
tonyp@1454 3469
stefank@4904 3470 void ConcurrentMark::print_on_error(outputStream* st) const {
stefank@4904 3471 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
drchase@6680 3472 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
stefank@4904 3473 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
stefank@4904 3474 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
stefank@4904 3475 }
stefank@4904 3476
ysr@777 3477 // We take a break if someone is trying to stop the world.
jmasa@3357 3478 bool ConcurrentMark::do_yield_check(uint worker_id) {
pliden@6906 3479 if (SuspendibleThreadSet::should_yield()) {
jmasa@3357 3480 if (worker_id == 0) {
ysr@777 3481 _g1h->g1_policy()->record_concurrent_pause();
tonyp@2973 3482 }
pliden@6906 3483 SuspendibleThreadSet::yield();
ysr@777 3484 return true;
ysr@777 3485 } else {
ysr@777 3486 return false;
ysr@777 3487 }
ysr@777 3488 }
ysr@777 3489
ysr@777 3490 #ifndef PRODUCT
ysr@777 3491 // for debugging purposes
ysr@777 3492 void ConcurrentMark::print_finger() {
ysr@777 3493 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
drchase@6680 3494 p2i(_heap_start), p2i(_heap_end), p2i(_finger));
johnc@4173 3495 for (uint i = 0; i < _max_worker_id; ++i) {
drchase@6680 3496 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
ysr@777 3497 }
drchase@6680 3498 gclog_or_tty->cr();
ysr@777 3499 }
ysr@777 3500 #endif
ysr@777 3501
tonyp@2968 3502 void CMTask::scan_object(oop obj) {
tonyp@2968 3503 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
tonyp@2968 3504
tonyp@2968 3505 if (_cm->verbose_high()) {
johnc@4173 3506 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
drchase@6680 3507 _worker_id, p2i((void*) obj));
tonyp@2968 3508 }
tonyp@2968 3509
tonyp@2968 3510 size_t obj_size = obj->size();
tonyp@2968 3511 _words_scanned += obj_size;
tonyp@2968 3512
tonyp@2968 3513 obj->oop_iterate(_cm_oop_closure);
tonyp@2968 3514 statsOnly( ++_objs_scanned );
tonyp@2968 3515 check_limits();
tonyp@2968 3516 }
tonyp@2968 3517
ysr@777 3518 // Closure for iteration over bitmaps
ysr@777 3519 class CMBitMapClosure : public BitMapClosure {
ysr@777 3520 private:
ysr@777 3521 // the bitmap that is being iterated over
ysr@777 3522 CMBitMap* _nextMarkBitMap;
ysr@777 3523 ConcurrentMark* _cm;
ysr@777 3524 CMTask* _task;
ysr@777 3525
ysr@777 3526 public:
tonyp@3691 3527 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
tonyp@3691 3528 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
ysr@777 3529
ysr@777 3530 bool do_bit(size_t offset) {
ysr@777 3531 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
tonyp@1458 3532 assert(_nextMarkBitMap->isMarked(addr), "invariant");
tonyp@1458 3533 assert( addr < _cm->finger(), "invariant");
ysr@777 3534
tonyp@3691 3535 statsOnly( _task->increase_objs_found_on_bitmap() );
tonyp@3691 3536 assert(addr >= _task->finger(), "invariant");
tonyp@3691 3537
tonyp@3691 3538 // We move that task's local finger along.
tonyp@3691 3539 _task->move_finger_to(addr);
ysr@777 3540
ysr@777 3541 _task->scan_object(oop(addr));
ysr@777 3542 // we only partially drain the local queue and global stack
ysr@777 3543 _task->drain_local_queue(true);
ysr@777 3544 _task->drain_global_stack(true);
ysr@777 3545
ysr@777 3546 // if the has_aborted flag has been raised, we need to bail out of
ysr@777 3547 // the iteration
ysr@777 3548 return !_task->has_aborted();
ysr@777 3549 }
ysr@777 3550 };
ysr@777 3551
tonyp@2968 3552 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
tonyp@2968 3553 ConcurrentMark* cm,
tonyp@2968 3554 CMTask* task)
tonyp@2968 3555 : _g1h(g1h), _cm(cm), _task(task) {
tonyp@2968 3556 assert(_ref_processor == NULL, "should be initialized to NULL");
tonyp@2968 3557
tonyp@2968 3558 if (G1UseConcMarkReferenceProcessing) {
johnc@3175 3559 _ref_processor = g1h->ref_processor_cm();
tonyp@2968 3560 assert(_ref_processor != NULL, "should not be NULL");
ysr@777 3561 }
tonyp@2968 3562 }
ysr@777 3563
ysr@777 3564 void CMTask::setup_for_region(HeapRegion* hr) {
tonyp@1458 3565 assert(hr != NULL,
brutisso@7049 3566 "claim_region() should have filtered out NULL regions");
tonyp@1458 3567 assert(!hr->continuesHumongous(),
tonyp@1458 3568 "claim_region() should have filtered out continues humongous regions");
ysr@777 3569
tonyp@2973 3570 if (_cm->verbose_low()) {
johnc@4173 3571 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
drchase@6680 3572 _worker_id, p2i(hr));
tonyp@2973 3573 }
ysr@777 3574
ysr@777 3575 _curr_region = hr;
ysr@777 3576 _finger = hr->bottom();
ysr@777 3577 update_region_limit();
ysr@777 3578 }
ysr@777 3579
ysr@777 3580 void CMTask::update_region_limit() {
ysr@777 3581 HeapRegion* hr = _curr_region;
ysr@777 3582 HeapWord* bottom = hr->bottom();
ysr@777 3583 HeapWord* limit = hr->next_top_at_mark_start();
ysr@777 3584
ysr@777 3585 if (limit == bottom) {
tonyp@2973 3586 if (_cm->verbose_low()) {
johnc@4173 3587 gclog_or_tty->print_cr("[%u] found an empty region "
ysr@777 3588 "["PTR_FORMAT", "PTR_FORMAT")",
drchase@6680 3589 _worker_id, p2i(bottom), p2i(limit));
tonyp@2973 3590 }
ysr@777 3591 // The region was collected underneath our feet.
ysr@777 3592 // We set the finger to bottom to ensure that the bitmap
ysr@777 3593 // iteration that will follow this will not do anything.
ysr@777 3594 // (this is not a condition that holds when we set the region up,
ysr@777 3595 // as the region is not supposed to be empty in the first place)
ysr@777 3596 _finger = bottom;
ysr@777 3597 } else if (limit >= _region_limit) {
tonyp@1458 3598 assert(limit >= _finger, "peace of mind");
ysr@777 3599 } else {
tonyp@1458 3600 assert(limit < _region_limit, "only way to get here");
ysr@777 3601 // This can happen under some pretty unusual circumstances. An
ysr@777 3602 // evacuation pause empties the region underneath our feet (NTAMS
ysr@777 3603 // at bottom). We then do some allocation in the region (NTAMS
ysr@777 3604 // stays at bottom), followed by the region being used as a GC
ysr@777 3605 // alloc region (NTAMS will move to top() and the objects
ysr@777 3606 // originally below it will be grayed). All objects now marked in
ysr@777 3607 // the region are explicitly grayed, if below the global finger,
ysr@777 3608 // and we do not need in fact to scan anything else. So, we simply
ysr@777 3609 // set _finger to be limit to ensure that the bitmap iteration
ysr@777 3610 // doesn't do anything.
ysr@777 3611 _finger = limit;
ysr@777 3612 }
ysr@777 3613
ysr@777 3614 _region_limit = limit;
ysr@777 3615 }
ysr@777 3616
ysr@777 3617 void CMTask::giveup_current_region() {
tonyp@1458 3618 assert(_curr_region != NULL, "invariant");
tonyp@2973 3619 if (_cm->verbose_low()) {
johnc@4173 3620 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
drchase@6680 3621 _worker_id, p2i(_curr_region));
tonyp@2973 3622 }
ysr@777 3623 clear_region_fields();
ysr@777 3624 }
ysr@777 3625
ysr@777 3626 void CMTask::clear_region_fields() {
ysr@777 3627 // Values for these three fields that indicate that we're not
ysr@777 3628 // holding on to a region.
ysr@777 3629 _curr_region = NULL;
ysr@777 3630 _finger = NULL;
ysr@777 3631 _region_limit = NULL;
ysr@777 3632 }
ysr@777 3633
tonyp@2968 3634 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
tonyp@2968 3635 if (cm_oop_closure == NULL) {
tonyp@2968 3636 assert(_cm_oop_closure != NULL, "invariant");
tonyp@2968 3637 } else {
tonyp@2968 3638 assert(_cm_oop_closure == NULL, "invariant");
tonyp@2968 3639 }
tonyp@2968 3640 _cm_oop_closure = cm_oop_closure;
tonyp@2968 3641 }
tonyp@2968 3642
ysr@777 3643 void CMTask::reset(CMBitMap* nextMarkBitMap) {
tonyp@1458 3644 guarantee(nextMarkBitMap != NULL, "invariant");
ysr@777 3645
tonyp@2973 3646 if (_cm->verbose_low()) {
johnc@4173 3647 gclog_or_tty->print_cr("[%u] resetting", _worker_id);
tonyp@2973 3648 }
ysr@777 3649
ysr@777 3650 _nextMarkBitMap = nextMarkBitMap;
ysr@777 3651 clear_region_fields();
ysr@777 3652
ysr@777 3653 _calls = 0;
ysr@777 3654 _elapsed_time_ms = 0.0;
ysr@777 3655 _termination_time_ms = 0.0;
ysr@777 3656 _termination_start_time_ms = 0.0;
ysr@777 3657
ysr@777 3658 #if _MARKING_STATS_
ysr@777 3659 _local_pushes = 0;
ysr@777 3660 _local_pops = 0;
ysr@777 3661 _local_max_size = 0;
ysr@777 3662 _objs_scanned = 0;
ysr@777 3663 _global_pushes = 0;
ysr@777 3664 _global_pops = 0;
ysr@777 3665 _global_max_size = 0;
ysr@777 3666 _global_transfers_to = 0;
ysr@777 3667 _global_transfers_from = 0;
ysr@777 3668 _regions_claimed = 0;
ysr@777 3669 _objs_found_on_bitmap = 0;
ysr@777 3670 _satb_buffers_processed = 0;
ysr@777 3671 _steal_attempts = 0;
ysr@777 3672 _steals = 0;
ysr@777 3673 _aborted = 0;
ysr@777 3674 _aborted_overflow = 0;
ysr@777 3675 _aborted_cm_aborted = 0;
ysr@777 3676 _aborted_yield = 0;
ysr@777 3677 _aborted_timed_out = 0;
ysr@777 3678 _aborted_satb = 0;
ysr@777 3679 _aborted_termination = 0;
ysr@777 3680 #endif // _MARKING_STATS_
ysr@777 3681 }
ysr@777 3682
ysr@777 3683 bool CMTask::should_exit_termination() {
ysr@777 3684 regular_clock_call();
ysr@777 3685 // This is called when we are in the termination protocol. We should
ysr@777 3686 // quit if, for some reason, this task wants to abort or the global
ysr@777 3687 // stack is not empty (this means that we can get work from it).
ysr@777 3688 return !_cm->mark_stack_empty() || has_aborted();
ysr@777 3689 }
ysr@777 3690
ysr@777 3691 void CMTask::reached_limit() {
tonyp@1458 3692 assert(_words_scanned >= _words_scanned_limit ||
tonyp@1458 3693 _refs_reached >= _refs_reached_limit ,
tonyp@1458 3694 "shouldn't have been called otherwise");
ysr@777 3695 regular_clock_call();
ysr@777 3696 }
ysr@777 3697
ysr@777 3698 void CMTask::regular_clock_call() {
tonyp@2973 3699 if (has_aborted()) return;
ysr@777 3700
ysr@777 3701 // First, we need to recalculate the words scanned and refs reached
ysr@777 3702 // limits for the next clock call.
ysr@777 3703 recalculate_limits();
ysr@777 3704
ysr@777 3705 // During the regular clock call we do the following
ysr@777 3706
ysr@777 3707 // (1) If an overflow has been flagged, then we abort.
ysr@777 3708 if (_cm->has_overflown()) {
ysr@777 3709 set_has_aborted();
ysr@777 3710 return;
ysr@777 3711 }
ysr@777 3712
ysr@777 3713 // If we are not concurrent (i.e. we're doing remark) we don't need
ysr@777 3714 // to check anything else. The other steps are only needed during
ysr@777 3715 // the concurrent marking phase.
tonyp@2973 3716 if (!concurrent()) return;
ysr@777 3717
ysr@777 3718 // (2) If marking has been aborted for Full GC, then we also abort.
ysr@777 3719 if (_cm->has_aborted()) {
ysr@777 3720 set_has_aborted();
ysr@777 3721 statsOnly( ++_aborted_cm_aborted );
ysr@777 3722 return;
ysr@777 3723 }
ysr@777 3724
ysr@777 3725 double curr_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 3726
ysr@777 3727 // (3) If marking stats are enabled, then we update the step history.
ysr@777 3728 #if _MARKING_STATS_
tonyp@2973 3729 if (_words_scanned >= _words_scanned_limit) {
ysr@777 3730 ++_clock_due_to_scanning;
tonyp@2973 3731 }
tonyp@2973 3732 if (_refs_reached >= _refs_reached_limit) {
ysr@777 3733 ++_clock_due_to_marking;
tonyp@2973 3734 }
ysr@777 3735
ysr@777 3736 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
ysr@777 3737 _interval_start_time_ms = curr_time_ms;
ysr@777 3738 _all_clock_intervals_ms.add(last_interval_ms);
ysr@777 3739
ysr@777 3740 if (_cm->verbose_medium()) {
johnc@4173 3741 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
tschatzl@7094 3742 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
johnc@4173 3743 _worker_id, last_interval_ms,
tonyp@2973 3744 _words_scanned,
tonyp@2973 3745 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
tonyp@2973 3746 _refs_reached,
tonyp@2973 3747 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
ysr@777 3748 }
ysr@777 3749 #endif // _MARKING_STATS_
ysr@777 3750
ysr@777 3751 // (4) We check whether we should yield. If we have to, then we abort.
pliden@6906 3752 if (SuspendibleThreadSet::should_yield()) {
ysr@777 3753 // We should yield. To do this we abort the task. The caller is
ysr@777 3754 // responsible for yielding.
ysr@777 3755 set_has_aborted();
ysr@777 3756 statsOnly( ++_aborted_yield );
ysr@777 3757 return;
ysr@777 3758 }
ysr@777 3759
ysr@777 3760 // (5) We check whether we've reached our time quota. If we have,
ysr@777 3761 // then we abort.
ysr@777 3762 double elapsed_time_ms = curr_time_ms - _start_time_ms;
ysr@777 3763 if (elapsed_time_ms > _time_target_ms) {
ysr@777 3764 set_has_aborted();
johnc@2494 3765 _has_timed_out = true;
ysr@777 3766 statsOnly( ++_aborted_timed_out );
ysr@777 3767 return;
ysr@777 3768 }
ysr@777 3769
ysr@777 3770 // (6) Finally, we check whether there are enough completed STAB
ysr@777 3771 // buffers available for processing. If there are, we abort.
ysr@777 3772 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3773 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
tonyp@2973 3774 if (_cm->verbose_low()) {
johnc@4173 3775 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
johnc@4173 3776 _worker_id);
tonyp@2973 3777 }
ysr@777 3778 // we do need to process SATB buffers, we'll abort and restart
ysr@777 3779 // the marking task to do so
ysr@777 3780 set_has_aborted();
ysr@777 3781 statsOnly( ++_aborted_satb );
ysr@777 3782 return;
ysr@777 3783 }
ysr@777 3784 }
ysr@777 3785
ysr@777 3786 void CMTask::recalculate_limits() {
ysr@777 3787 _real_words_scanned_limit = _words_scanned + words_scanned_period;
ysr@777 3788 _words_scanned_limit = _real_words_scanned_limit;
ysr@777 3789
ysr@777 3790 _real_refs_reached_limit = _refs_reached + refs_reached_period;
ysr@777 3791 _refs_reached_limit = _real_refs_reached_limit;
ysr@777 3792 }
ysr@777 3793
ysr@777 3794 void CMTask::decrease_limits() {
ysr@777 3795 // This is called when we believe that we're going to do an infrequent
ysr@777 3796 // operation which will increase the per byte scanned cost (i.e. move
ysr@777 3797 // entries to/from the global stack). It basically tries to decrease the
ysr@777 3798 // scanning limit so that the clock is called earlier.
ysr@777 3799
tonyp@2973 3800 if (_cm->verbose_medium()) {
johnc@4173 3801 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
tonyp@2973 3802 }
ysr@777 3803
ysr@777 3804 _words_scanned_limit = _real_words_scanned_limit -
ysr@777 3805 3 * words_scanned_period / 4;
ysr@777 3806 _refs_reached_limit = _real_refs_reached_limit -
ysr@777 3807 3 * refs_reached_period / 4;
ysr@777 3808 }
ysr@777 3809
ysr@777 3810 void CMTask::move_entries_to_global_stack() {
ysr@777 3811 // local array where we'll store the entries that will be popped
ysr@777 3812 // from the local queue
ysr@777 3813 oop buffer[global_stack_transfer_size];
ysr@777 3814
ysr@777 3815 int n = 0;
ysr@777 3816 oop obj;
ysr@777 3817 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
ysr@777 3818 buffer[n] = obj;
ysr@777 3819 ++n;
ysr@777 3820 }
ysr@777 3821
ysr@777 3822 if (n > 0) {
ysr@777 3823 // we popped at least one entry from the local queue
ysr@777 3824
ysr@777 3825 statsOnly( ++_global_transfers_to; _local_pops += n );
ysr@777 3826
ysr@777 3827 if (!_cm->mark_stack_push(buffer, n)) {
tonyp@2973 3828 if (_cm->verbose_low()) {
johnc@4173 3829 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
johnc@4173 3830 _worker_id);
tonyp@2973 3831 }
ysr@777 3832 set_has_aborted();
ysr@777 3833 } else {
ysr@777 3834 // the transfer was successful
ysr@777 3835
tonyp@2973 3836 if (_cm->verbose_medium()) {
johnc@4173 3837 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
johnc@4173 3838 _worker_id, n);
tonyp@2973 3839 }
ysr@777 3840 statsOnly( int tmp_size = _cm->mark_stack_size();
tonyp@2973 3841 if (tmp_size > _global_max_size) {
ysr@777 3842 _global_max_size = tmp_size;
tonyp@2973 3843 }
ysr@777 3844 _global_pushes += n );
ysr@777 3845 }
ysr@777 3846 }
ysr@777 3847
ysr@777 3848 // this operation was quite expensive, so decrease the limits
ysr@777 3849 decrease_limits();
ysr@777 3850 }
ysr@777 3851
ysr@777 3852 void CMTask::get_entries_from_global_stack() {
ysr@777 3853 // local array where we'll store the entries that will be popped
ysr@777 3854 // from the global stack.
ysr@777 3855 oop buffer[global_stack_transfer_size];
ysr@777 3856 int n;
ysr@777 3857 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
tonyp@1458 3858 assert(n <= global_stack_transfer_size,
tonyp@1458 3859 "we should not pop more than the given limit");
ysr@777 3860 if (n > 0) {
ysr@777 3861 // yes, we did actually pop at least one entry
ysr@777 3862
ysr@777 3863 statsOnly( ++_global_transfers_from; _global_pops += n );
tonyp@2973 3864 if (_cm->verbose_medium()) {
johnc@4173 3865 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
johnc@4173 3866 _worker_id, n);
tonyp@2973 3867 }
ysr@777 3868 for (int i = 0; i < n; ++i) {
ysr@777 3869 bool success = _task_queue->push(buffer[i]);
ysr@777 3870 // We only call this when the local queue is empty or under a
ysr@777 3871 // given target limit. So, we do not expect this push to fail.
tonyp@1458 3872 assert(success, "invariant");
ysr@777 3873 }
ysr@777 3874
ysr@777 3875 statsOnly( int tmp_size = _task_queue->size();
tonyp@2973 3876 if (tmp_size > _local_max_size) {
ysr@777 3877 _local_max_size = tmp_size;
tonyp@2973 3878 }
ysr@777 3879 _local_pushes += n );
ysr@777 3880 }
ysr@777 3881
ysr@777 3882 // this operation was quite expensive, so decrease the limits
ysr@777 3883 decrease_limits();
ysr@777 3884 }
ysr@777 3885
ysr@777 3886 void CMTask::drain_local_queue(bool partially) {
tonyp@2973 3887 if (has_aborted()) return;
ysr@777 3888
ysr@777 3889 // Decide what the target size is, depending whether we're going to
ysr@777 3890 // drain it partially (so that other tasks can steal if they run out
ysr@777 3891 // of things to do) or totally (at the very end).
ysr@777 3892 size_t target_size;
tonyp@2973 3893 if (partially) {
ysr@777 3894 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
tonyp@2973 3895 } else {
ysr@777 3896 target_size = 0;
tonyp@2973 3897 }
ysr@777 3898
ysr@777 3899 if (_task_queue->size() > target_size) {
tonyp@2973 3900 if (_cm->verbose_high()) {
drchase@6680 3901 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
johnc@4173 3902 _worker_id, target_size);
tonyp@2973 3903 }
ysr@777 3904
ysr@777 3905 oop obj;
ysr@777 3906 bool ret = _task_queue->pop_local(obj);
ysr@777 3907 while (ret) {
ysr@777 3908 statsOnly( ++_local_pops );
ysr@777 3909
tonyp@2973 3910 if (_cm->verbose_high()) {
johnc@4173 3911 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
drchase@6680 3912 p2i((void*) obj));
tonyp@2973 3913 }
ysr@777 3914
tonyp@1458 3915 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
tonyp@2643 3916 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3917 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
ysr@777 3918
ysr@777 3919 scan_object(obj);
ysr@777 3920
tonyp@2973 3921 if (_task_queue->size() <= target_size || has_aborted()) {
ysr@777 3922 ret = false;
tonyp@2973 3923 } else {
ysr@777 3924 ret = _task_queue->pop_local(obj);
tonyp@2973 3925 }
ysr@777 3926 }
ysr@777 3927
tonyp@2973 3928 if (_cm->verbose_high()) {
johnc@4173 3929 gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
johnc@4173 3930 _worker_id, _task_queue->size());
tonyp@2973 3931 }
ysr@777 3932 }
ysr@777 3933 }
ysr@777 3934
ysr@777 3935 void CMTask::drain_global_stack(bool partially) {
tonyp@2973 3936 if (has_aborted()) return;
ysr@777 3937
ysr@777 3938 // We have a policy to drain the local queue before we attempt to
ysr@777 3939 // drain the global stack.
tonyp@1458 3940 assert(partially || _task_queue->size() == 0, "invariant");
ysr@777 3941
ysr@777 3942 // Decide what the target size is, depending whether we're going to
ysr@777 3943 // drain it partially (so that other tasks can steal if they run out
ysr@777 3944 // of things to do) or totally (at the very end). Notice that,
ysr@777 3945 // because we move entries from the global stack in chunks or
ysr@777 3946 // because another task might be doing the same, we might in fact
ysr@777 3947 // drop below the target. But, this is not a problem.
ysr@777 3948 size_t target_size;
tonyp@2973 3949 if (partially) {
ysr@777 3950 target_size = _cm->partial_mark_stack_size_target();
tonyp@2973 3951 } else {
ysr@777 3952 target_size = 0;
tonyp@2973 3953 }
ysr@777 3954
ysr@777 3955 if (_cm->mark_stack_size() > target_size) {
tonyp@2973 3956 if (_cm->verbose_low()) {
drchase@6680 3957 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
johnc@4173 3958 _worker_id, target_size);
tonyp@2973 3959 }
ysr@777 3960
ysr@777 3961 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
ysr@777 3962 get_entries_from_global_stack();
ysr@777 3963 drain_local_queue(partially);
ysr@777 3964 }
ysr@777 3965
tonyp@2973 3966 if (_cm->verbose_low()) {
drchase@6680 3967 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
johnc@4173 3968 _worker_id, _cm->mark_stack_size());
tonyp@2973 3969 }
ysr@777 3970 }
ysr@777 3971 }
ysr@777 3972
ysr@777 3973 // SATB Queue has several assumptions on whether to call the par or
ysr@777 3974 // non-par versions of the methods. this is why some of the code is
ysr@777 3975 // replicated. We should really get rid of the single-threaded version
ysr@777 3976 // of the code to simplify things.
ysr@777 3977 void CMTask::drain_satb_buffers() {
tonyp@2973 3978 if (has_aborted()) return;
ysr@777 3979
ysr@777 3980 // We set this so that the regular clock knows that we're in the
ysr@777 3981 // middle of draining buffers and doesn't set the abort flag when it
ysr@777 3982 // notices that SATB buffers are available for draining. It'd be
ysr@777 3983 // very counter productive if it did that. :-)
ysr@777 3984 _draining_satb_buffers = true;
ysr@777 3985
ysr@777 3986 CMObjectClosure oc(this);
ysr@777 3987 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@2973 3988 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@4173 3989 satb_mq_set.set_par_closure(_worker_id, &oc);
tonyp@2973 3990 } else {
ysr@777 3991 satb_mq_set.set_closure(&oc);
tonyp@2973 3992 }
ysr@777 3993
ysr@777 3994 // This keeps claiming and applying the closure to completed buffers
ysr@777 3995 // until we run out of buffers or we need to abort.
jmasa@2188 3996 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3997 while (!has_aborted() &&
johnc@4173 3998 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
tonyp@2973 3999 if (_cm->verbose_medium()) {
johnc@4173 4000 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
tonyp@2973 4001 }
ysr@777 4002 statsOnly( ++_satb_buffers_processed );
ysr@777 4003 regular_clock_call();
ysr@777 4004 }
ysr@777 4005 } else {
ysr@777 4006 while (!has_aborted() &&
ysr@777 4007 satb_mq_set.apply_closure_to_completed_buffer()) {
tonyp@2973 4008 if (_cm->verbose_medium()) {
johnc@4173 4009 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
tonyp@2973 4010 }
ysr@777 4011 statsOnly( ++_satb_buffers_processed );
ysr@777 4012 regular_clock_call();
ysr@777 4013 }
ysr@777 4014 }
ysr@777 4015
ysr@777 4016 _draining_satb_buffers = false;
ysr@777 4017
tonyp@1458 4018 assert(has_aborted() ||
tonyp@1458 4019 concurrent() ||
tonyp@1458 4020 satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 4021
tonyp@2973 4022 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@4173 4023 satb_mq_set.set_par_closure(_worker_id, NULL);
tonyp@2973 4024 } else {
ysr@777 4025 satb_mq_set.set_closure(NULL);
tonyp@2973 4026 }
ysr@777 4027
ysr@777 4028 // again, this was a potentially expensive operation, decrease the
ysr@777 4029 // limits to get the regular clock call early
ysr@777 4030 decrease_limits();
ysr@777 4031 }
ysr@777 4032
ysr@777 4033 void CMTask::print_stats() {
johnc@4173 4034 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
johnc@4173 4035 _worker_id, _calls);
ysr@777 4036 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
ysr@777 4037 _elapsed_time_ms, _termination_time_ms);
ysr@777 4038 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 4039 _step_times_ms.num(), _step_times_ms.avg(),
ysr@777 4040 _step_times_ms.sd());
ysr@777 4041 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 4042 _step_times_ms.maximum(), _step_times_ms.sum());
ysr@777 4043
ysr@777 4044 #if _MARKING_STATS_
ysr@777 4045 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 4046 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
ysr@777 4047 _all_clock_intervals_ms.sd());
ysr@777 4048 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 4049 _all_clock_intervals_ms.maximum(),
ysr@777 4050 _all_clock_intervals_ms.sum());
ysr@777 4051 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
ysr@777 4052 _clock_due_to_scanning, _clock_due_to_marking);
ysr@777 4053 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
ysr@777 4054 _objs_scanned, _objs_found_on_bitmap);
ysr@777 4055 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
ysr@777 4056 _local_pushes, _local_pops, _local_max_size);
ysr@777 4057 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
ysr@777 4058 _global_pushes, _global_pops, _global_max_size);
ysr@777 4059 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
ysr@777 4060 _global_transfers_to,_global_transfers_from);
tonyp@3691 4061 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed);
ysr@777 4062 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
ysr@777 4063 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
ysr@777 4064 _steal_attempts, _steals);
ysr@777 4065 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
ysr@777 4066 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
ysr@777 4067 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
ysr@777 4068 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
ysr@777 4069 _aborted_timed_out, _aborted_satb, _aborted_termination);
ysr@777 4070 #endif // _MARKING_STATS_
ysr@777 4071 }
ysr@777 4072
ysr@777 4073 /*****************************************************************************
ysr@777 4074
johnc@4787 4075 The do_marking_step(time_target_ms, ...) method is the building
johnc@4787 4076 block of the parallel marking framework. It can be called in parallel
ysr@777 4077 with other invocations of do_marking_step() on different tasks
ysr@777 4078 (but only one per task, obviously) and concurrently with the
ysr@777 4079 mutator threads, or during remark, hence it eliminates the need
ysr@777 4080 for two versions of the code. When called during remark, it will
ysr@777 4081 pick up from where the task left off during the concurrent marking
ysr@777 4082 phase. Interestingly, tasks are also claimable during evacuation
ysr@777 4083 pauses too, since do_marking_step() ensures that it aborts before
ysr@777 4084 it needs to yield.
ysr@777 4085
johnc@4787 4086 The data structures that it uses to do marking work are the
ysr@777 4087 following:
ysr@777 4088
ysr@777 4089 (1) Marking Bitmap. If there are gray objects that appear only
ysr@777 4090 on the bitmap (this happens either when dealing with an overflow
ysr@777 4091 or when the initial marking phase has simply marked the roots
ysr@777 4092 and didn't push them on the stack), then tasks claim heap
ysr@777 4093 regions whose bitmap they then scan to find gray objects. A
ysr@777 4094 global finger indicates where the end of the last claimed region
ysr@777 4095 is. A local finger indicates how far into the region a task has
ysr@777 4096 scanned. The two fingers are used to determine how to gray an
ysr@777 4097 object (i.e. whether simply marking it is OK, as it will be
ysr@777 4098 visited by a task in the future, or whether it needs to be also
ysr@777 4099 pushed on a stack).
ysr@777 4100
ysr@777 4101 (2) Local Queue. The local queue of the task which is accessed
ysr@777 4102 reasonably efficiently by the task. Other tasks can steal from
ysr@777 4103 it when they run out of work. Throughout the marking phase, a
ysr@777 4104 task attempts to keep its local queue short but not totally
ysr@777 4105 empty, so that entries are available for stealing by other
ysr@777 4106 tasks. Only when there is no more work, a task will totally
ysr@777 4107 drain its local queue.
ysr@777 4108
ysr@777 4109 (3) Global Mark Stack. This handles local queue overflow. During
ysr@777 4110 marking only sets of entries are moved between it and the local
ysr@777 4111 queues, as access to it requires a mutex and more fine-grain
ysr@777 4112 interaction with it which might cause contention. If it
ysr@777 4113 overflows, then the marking phase should restart and iterate
ysr@777 4114 over the bitmap to identify gray objects. Throughout the marking
ysr@777 4115 phase, tasks attempt to keep the global mark stack at a small
ysr@777 4116 length but not totally empty, so that entries are available for
ysr@777 4117 popping by other tasks. Only when there is no more work, tasks
ysr@777 4118 will totally drain the global mark stack.
ysr@777 4119
tonyp@3691 4120 (4) SATB Buffer Queue. This is where completed SATB buffers are
ysr@777 4121 made available. Buffers are regularly removed from this queue
ysr@777 4122 and scanned for roots, so that the queue doesn't get too
ysr@777 4123 long. During remark, all completed buffers are processed, as
ysr@777 4124 well as the filled in parts of any uncompleted buffers.
ysr@777 4125
ysr@777 4126 The do_marking_step() method tries to abort when the time target
ysr@777 4127 has been reached. There are a few other cases when the
ysr@777 4128 do_marking_step() method also aborts:
ysr@777 4129
ysr@777 4130 (1) When the marking phase has been aborted (after a Full GC).
ysr@777 4131
tonyp@3691 4132 (2) When a global overflow (on the global stack) has been
tonyp@3691 4133 triggered. Before the task aborts, it will actually sync up with
tonyp@3691 4134 the other tasks to ensure that all the marking data structures
johnc@4788 4135 (local queues, stacks, fingers etc.) are re-initialized so that
tonyp@3691 4136 when do_marking_step() completes, the marking phase can
tonyp@3691 4137 immediately restart.
ysr@777 4138
ysr@777 4139 (3) When enough completed SATB buffers are available. The
ysr@777 4140 do_marking_step() method only tries to drain SATB buffers right
ysr@777 4141 at the beginning. So, if enough buffers are available, the
ysr@777 4142 marking step aborts and the SATB buffers are processed at
ysr@777 4143 the beginning of the next invocation.
ysr@777 4144
ysr@777 4145 (4) To yield. when we have to yield then we abort and yield
ysr@777 4146 right at the end of do_marking_step(). This saves us from a lot
ysr@777 4147 of hassle as, by yielding we might allow a Full GC. If this
ysr@777 4148 happens then objects will be compacted underneath our feet, the
ysr@777 4149 heap might shrink, etc. We save checking for this by just
ysr@777 4150 aborting and doing the yield right at the end.
ysr@777 4151
ysr@777 4152 From the above it follows that the do_marking_step() method should
ysr@777 4153 be called in a loop (or, otherwise, regularly) until it completes.
ysr@777 4154
ysr@777 4155 If a marking step completes without its has_aborted() flag being
ysr@777 4156 true, it means it has completed the current marking phase (and
ysr@777 4157 also all other marking tasks have done so and have all synced up).
ysr@777 4158
ysr@777 4159 A method called regular_clock_call() is invoked "regularly" (in
ysr@777 4160 sub ms intervals) throughout marking. It is this clock method that
ysr@777 4161 checks all the abort conditions which were mentioned above and
ysr@777 4162 decides when the task should abort. A work-based scheme is used to
ysr@777 4163 trigger this clock method: when the number of object words the
ysr@777 4164 marking phase has scanned or the number of references the marking
ysr@777 4165 phase has visited reach a given limit. Additional invocations to
ysr@777 4166 the method clock have been planted in a few other strategic places
ysr@777 4167 too. The initial reason for the clock method was to avoid calling
ysr@777 4168 vtime too regularly, as it is quite expensive. So, once it was in
ysr@777 4169 place, it was natural to piggy-back all the other conditions on it
ysr@777 4170 too and not constantly check them throughout the code.
ysr@777 4171
johnc@4787 4172 If do_termination is true then do_marking_step will enter its
johnc@4787 4173 termination protocol.
johnc@4787 4174
johnc@4787 4175 The value of is_serial must be true when do_marking_step is being
johnc@4787 4176 called serially (i.e. by the VMThread) and do_marking_step should
johnc@4787 4177 skip any synchronization in the termination and overflow code.
johnc@4787 4178 Examples include the serial remark code and the serial reference
johnc@4787 4179 processing closures.
johnc@4787 4180
johnc@4787 4181 The value of is_serial must be false when do_marking_step is
johnc@4787 4182 being called by any of the worker threads in a work gang.
johnc@4787 4183 Examples include the concurrent marking code (CMMarkingTask),
johnc@4787 4184 the MT remark code, and the MT reference processing closures.
johnc@4787 4185
ysr@777 4186 *****************************************************************************/
ysr@777 4187
johnc@2494 4188 void CMTask::do_marking_step(double time_target_ms,
johnc@4787 4189 bool do_termination,
johnc@4787 4190 bool is_serial) {
tonyp@1458 4191 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
tonyp@1458 4192 assert(concurrent() == _cm->concurrent(), "they should be the same");
tonyp@1458 4193
ysr@777 4194 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
tonyp@1458 4195 assert(_task_queues != NULL, "invariant");
tonyp@1458 4196 assert(_task_queue != NULL, "invariant");
johnc@4173 4197 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
tonyp@1458 4198
tonyp@1458 4199 assert(!_claimed,
tonyp@1458 4200 "only one thread should claim this task at any one time");
ysr@777 4201
ysr@777 4202 // OK, this doesn't safeguard again all possible scenarios, as it is
ysr@777 4203 // possible for two threads to set the _claimed flag at the same
ysr@777 4204 // time. But it is only for debugging purposes anyway and it will
ysr@777 4205 // catch most problems.
ysr@777 4206 _claimed = true;
ysr@777 4207
ysr@777 4208 _start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4209 statsOnly( _interval_start_time_ms = _start_time_ms );
ysr@777 4210
johnc@4787 4211 // If do_stealing is true then do_marking_step will attempt to
johnc@4787 4212 // steal work from the other CMTasks. It only makes sense to
johnc@4787 4213 // enable stealing when the termination protocol is enabled
johnc@4787 4214 // and do_marking_step() is not being called serially.
johnc@4787 4215 bool do_stealing = do_termination && !is_serial;
johnc@4787 4216
ysr@777 4217 double diff_prediction_ms =
ysr@777 4218 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
ysr@777 4219 _time_target_ms = time_target_ms - diff_prediction_ms;
ysr@777 4220
ysr@777 4221 // set up the variables that are used in the work-based scheme to
ysr@777 4222 // call the regular clock method
ysr@777 4223 _words_scanned = 0;
ysr@777 4224 _refs_reached = 0;
ysr@777 4225 recalculate_limits();
ysr@777 4226
ysr@777 4227 // clear all flags
ysr@777 4228 clear_has_aborted();
johnc@2494 4229 _has_timed_out = false;
ysr@777 4230 _draining_satb_buffers = false;
ysr@777 4231
ysr@777 4232 ++_calls;
ysr@777 4233
tonyp@2973 4234 if (_cm->verbose_low()) {
johnc@4173 4235 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
ysr@777 4236 "target = %1.2lfms >>>>>>>>>>",
johnc@4173 4237 _worker_id, _calls, _time_target_ms);
tonyp@2973 4238 }
ysr@777 4239
ysr@777 4240 // Set up the bitmap and oop closures. Anything that uses them is
ysr@777 4241 // eventually called from this method, so it is OK to allocate these
ysr@777 4242 // statically.
ysr@777 4243 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
tonyp@2968 4244 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
tonyp@2968 4245 set_cm_oop_closure(&cm_oop_closure);
ysr@777 4246
ysr@777 4247 if (_cm->has_overflown()) {
tonyp@3691 4248 // This can happen if the mark stack overflows during a GC pause
tonyp@3691 4249 // and this task, after a yield point, restarts. We have to abort
tonyp@3691 4250 // as we need to get into the overflow protocol which happens
tonyp@3691 4251 // right at the end of this task.
ysr@777 4252 set_has_aborted();
ysr@777 4253 }
ysr@777 4254
ysr@777 4255 // First drain any available SATB buffers. After this, we will not
ysr@777 4256 // look at SATB buffers before the next invocation of this method.
ysr@777 4257 // If enough completed SATB buffers are queued up, the regular clock
ysr@777 4258 // will abort this task so that it restarts.
ysr@777 4259 drain_satb_buffers();
ysr@777 4260 // ...then partially drain the local queue and the global stack
ysr@777 4261 drain_local_queue(true);
ysr@777 4262 drain_global_stack(true);
ysr@777 4263
ysr@777 4264 do {
ysr@777 4265 if (!has_aborted() && _curr_region != NULL) {
ysr@777 4266 // This means that we're already holding on to a region.
tonyp@1458 4267 assert(_finger != NULL, "if region is not NULL, then the finger "
tonyp@1458 4268 "should not be NULL either");
ysr@777 4269
ysr@777 4270 // We might have restarted this task after an evacuation pause
ysr@777 4271 // which might have evacuated the region we're holding on to
ysr@777 4272 // underneath our feet. Let's read its limit again to make sure
ysr@777 4273 // that we do not iterate over a region of the heap that
ysr@777 4274 // contains garbage (update_region_limit() will also move
ysr@777 4275 // _finger to the start of the region if it is found empty).
ysr@777 4276 update_region_limit();
ysr@777 4277 // We will start from _finger not from the start of the region,
ysr@777 4278 // as we might be restarting this task after aborting half-way
ysr@777 4279 // through scanning this region. In this case, _finger points to
ysr@777 4280 // the address where we last found a marked object. If this is a
ysr@777 4281 // fresh region, _finger points to start().
ysr@777 4282 MemRegion mr = MemRegion(_finger, _region_limit);
ysr@777 4283
tonyp@2973 4284 if (_cm->verbose_low()) {
johnc@4173 4285 gclog_or_tty->print_cr("[%u] we're scanning part "
ysr@777 4286 "["PTR_FORMAT", "PTR_FORMAT") "
johnc@4580 4287 "of region "HR_FORMAT,
drchase@6680 4288 _worker_id, p2i(_finger), p2i(_region_limit),
johnc@4580 4289 HR_FORMAT_PARAMS(_curr_region));
tonyp@2973 4290 }
ysr@777 4291
johnc@4580 4292 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
johnc@4580 4293 "humongous regions should go around loop once only");
johnc@4580 4294
johnc@4580 4295 // Some special cases:
johnc@4580 4296 // If the memory region is empty, we can just give up the region.
johnc@4580 4297 // If the current region is humongous then we only need to check
johnc@4580 4298 // the bitmap for the bit associated with the start of the object,
johnc@4580 4299 // scan the object if it's live, and give up the region.
johnc@4580 4300 // Otherwise, let's iterate over the bitmap of the part of the region
johnc@4580 4301 // that is left.
johnc@4575 4302 // If the iteration is successful, give up the region.
johnc@4580 4303 if (mr.is_empty()) {
johnc@4580 4304 giveup_current_region();
johnc@4580 4305 regular_clock_call();
johnc@4580 4306 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
johnc@4580 4307 if (_nextMarkBitMap->isMarked(mr.start())) {
johnc@4580 4308 // The object is marked - apply the closure
johnc@4580 4309 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
johnc@4580 4310 bitmap_closure.do_bit(offset);
johnc@4580 4311 }
johnc@4580 4312 // Even if this task aborted while scanning the humongous object
johnc@4580 4313 // we can (and should) give up the current region.
johnc@4580 4314 giveup_current_region();
johnc@4580 4315 regular_clock_call();
johnc@4580 4316 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
ysr@777 4317 giveup_current_region();
ysr@777 4318 regular_clock_call();
ysr@777 4319 } else {
tonyp@1458 4320 assert(has_aborted(), "currently the only way to do so");
ysr@777 4321 // The only way to abort the bitmap iteration is to return
ysr@777 4322 // false from the do_bit() method. However, inside the
ysr@777 4323 // do_bit() method we move the _finger to point to the
ysr@777 4324 // object currently being looked at. So, if we bail out, we
ysr@777 4325 // have definitely set _finger to something non-null.
tonyp@1458 4326 assert(_finger != NULL, "invariant");
ysr@777 4327
ysr@777 4328 // Region iteration was actually aborted. So now _finger
ysr@777 4329 // points to the address of the object we last scanned. If we
ysr@777 4330 // leave it there, when we restart this task, we will rescan
ysr@777 4331 // the object. It is easy to avoid this. We move the finger by
ysr@777 4332 // enough to point to the next possible object header (the
ysr@777 4333 // bitmap knows by how much we need to move it as it knows its
ysr@777 4334 // granularity).
apetrusenko@1749 4335 assert(_finger < _region_limit, "invariant");
tamao@4733 4336 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
apetrusenko@1749 4337 // Check if bitmap iteration was aborted while scanning the last object
apetrusenko@1749 4338 if (new_finger >= _region_limit) {
tonyp@3691 4339 giveup_current_region();
apetrusenko@1749 4340 } else {
tonyp@3691 4341 move_finger_to(new_finger);
apetrusenko@1749 4342 }
ysr@777 4343 }
ysr@777 4344 }
ysr@777 4345 // At this point we have either completed iterating over the
ysr@777 4346 // region we were holding on to, or we have aborted.
ysr@777 4347
ysr@777 4348 // We then partially drain the local queue and the global stack.
ysr@777 4349 // (Do we really need this?)
ysr@777 4350 drain_local_queue(true);
ysr@777 4351 drain_global_stack(true);
ysr@777 4352
ysr@777 4353 // Read the note on the claim_region() method on why it might
ysr@777 4354 // return NULL with potentially more regions available for
ysr@777 4355 // claiming and why we have to check out_of_regions() to determine
ysr@777 4356 // whether we're done or not.
ysr@777 4357 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
ysr@777 4358 // We are going to try to claim a new region. We should have
ysr@777 4359 // given up on the previous one.
tonyp@1458 4360 // Separated the asserts so that we know which one fires.
tonyp@1458 4361 assert(_curr_region == NULL, "invariant");
tonyp@1458 4362 assert(_finger == NULL, "invariant");
tonyp@1458 4363 assert(_region_limit == NULL, "invariant");
tonyp@2973 4364 if (_cm->verbose_low()) {
johnc@4173 4365 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
tonyp@2973 4366 }
johnc@4173 4367 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
ysr@777 4368 if (claimed_region != NULL) {
ysr@777 4369 // Yes, we managed to claim one
ysr@777 4370 statsOnly( ++_regions_claimed );
ysr@777 4371
tonyp@2973 4372 if (_cm->verbose_low()) {
johnc@4173 4373 gclog_or_tty->print_cr("[%u] we successfully claimed "
ysr@777 4374 "region "PTR_FORMAT,
drchase@6680 4375 _worker_id, p2i(claimed_region));
tonyp@2973 4376 }
ysr@777 4377
ysr@777 4378 setup_for_region(claimed_region);
tonyp@1458 4379 assert(_curr_region == claimed_region, "invariant");
ysr@777 4380 }
ysr@777 4381 // It is important to call the regular clock here. It might take
ysr@777 4382 // a while to claim a region if, for example, we hit a large
ysr@777 4383 // block of empty regions. So we need to call the regular clock
ysr@777 4384 // method once round the loop to make sure it's called
ysr@777 4385 // frequently enough.
ysr@777 4386 regular_clock_call();
ysr@777 4387 }
ysr@777 4388
ysr@777 4389 if (!has_aborted() && _curr_region == NULL) {
tonyp@1458 4390 assert(_cm->out_of_regions(),
tonyp@1458 4391 "at this point we should be out of regions");
ysr@777 4392 }
ysr@777 4393 } while ( _curr_region != NULL && !has_aborted());
ysr@777 4394
ysr@777 4395 if (!has_aborted()) {
ysr@777 4396 // We cannot check whether the global stack is empty, since other
tonyp@3691 4397 // tasks might be pushing objects to it concurrently.
tonyp@1458 4398 assert(_cm->out_of_regions(),
tonyp@1458 4399 "at this point we should be out of regions");
ysr@777 4400
tonyp@2973 4401 if (_cm->verbose_low()) {
johnc@4173 4402 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
tonyp@2973 4403 }
ysr@777 4404
ysr@777 4405 // Try to reduce the number of available SATB buffers so that
ysr@777 4406 // remark has less work to do.
ysr@777 4407 drain_satb_buffers();
ysr@777 4408 }
ysr@777 4409
ysr@777 4410 // Since we've done everything else, we can now totally drain the
ysr@777 4411 // local queue and global stack.
ysr@777 4412 drain_local_queue(false);
ysr@777 4413 drain_global_stack(false);
ysr@777 4414
ysr@777 4415 // Attempt at work stealing from other task's queues.
johnc@2494 4416 if (do_stealing && !has_aborted()) {
ysr@777 4417 // We have not aborted. This means that we have finished all that
ysr@777 4418 // we could. Let's try to do some stealing...
ysr@777 4419
ysr@777 4420 // We cannot check whether the global stack is empty, since other
tonyp@3691 4421 // tasks might be pushing objects to it concurrently.
tonyp@1458 4422 assert(_cm->out_of_regions() && _task_queue->size() == 0,
tonyp@1458 4423 "only way to reach here");
ysr@777 4424
tonyp@2973 4425 if (_cm->verbose_low()) {
johnc@4173 4426 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
tonyp@2973 4427 }
ysr@777 4428
ysr@777 4429 while (!has_aborted()) {
ysr@777 4430 oop obj;
ysr@777 4431 statsOnly( ++_steal_attempts );
ysr@777 4432
johnc@4173 4433 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
tonyp@2973 4434 if (_cm->verbose_medium()) {
johnc@4173 4435 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
drchase@6680 4436 _worker_id, p2i((void*) obj));
tonyp@2973 4437 }
ysr@777 4438
ysr@777 4439 statsOnly( ++_steals );
ysr@777 4440
tonyp@1458 4441 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
tonyp@1458 4442 "any stolen object should be marked");
ysr@777 4443 scan_object(obj);
ysr@777 4444
ysr@777 4445 // And since we're towards the end, let's totally drain the
ysr@777 4446 // local queue and global stack.
ysr@777 4447 drain_local_queue(false);
ysr@777 4448 drain_global_stack(false);
ysr@777 4449 } else {
ysr@777 4450 break;
ysr@777 4451 }
ysr@777 4452 }
ysr@777 4453 }
ysr@777 4454
tonyp@2848 4455 // If we are about to wrap up and go into termination, check if we
tonyp@2848 4456 // should raise the overflow flag.
tonyp@2848 4457 if (do_termination && !has_aborted()) {
tonyp@2848 4458 if (_cm->force_overflow()->should_force()) {
tonyp@2848 4459 _cm->set_has_overflown();
tonyp@2848 4460 regular_clock_call();
tonyp@2848 4461 }
tonyp@2848 4462 }
tonyp@2848 4463
ysr@777 4464 // We still haven't aborted. Now, let's try to get into the
ysr@777 4465 // termination protocol.
johnc@2494 4466 if (do_termination && !has_aborted()) {
ysr@777 4467 // We cannot check whether the global stack is empty, since other
tonyp@3691 4468 // tasks might be concurrently pushing objects on it.
tonyp@1458 4469 // Separated the asserts so that we know which one fires.
tonyp@1458 4470 assert(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4471 assert(_task_queue->size() == 0, "only way to reach here");
ysr@777 4472
tonyp@2973 4473 if (_cm->verbose_low()) {
johnc@4173 4474 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
tonyp@2973 4475 }
ysr@777 4476
ysr@777 4477 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
johnc@4787 4478
ysr@777 4479 // The CMTask class also extends the TerminatorTerminator class,
ysr@777 4480 // hence its should_exit_termination() method will also decide
ysr@777 4481 // whether to exit the termination protocol or not.
johnc@4787 4482 bool finished = (is_serial ||
johnc@4787 4483 _cm->terminator()->offer_termination(this));
ysr@777 4484 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4485 _termination_time_ms +=
ysr@777 4486 termination_end_time_ms - _termination_start_time_ms;
ysr@777 4487
ysr@777 4488 if (finished) {
ysr@777 4489 // We're all done.
ysr@777 4490
johnc@4173 4491 if (_worker_id == 0) {
ysr@777 4492 // let's allow task 0 to do this
ysr@777 4493 if (concurrent()) {
tonyp@1458 4494 assert(_cm->concurrent_marking_in_progress(), "invariant");
ysr@777 4495 // we need to set this to false before the next
ysr@777 4496 // safepoint. This way we ensure that the marking phase
ysr@777 4497 // doesn't observe any more heap expansions.
ysr@777 4498 _cm->clear_concurrent_marking_in_progress();
ysr@777 4499 }
ysr@777 4500 }
ysr@777 4501
ysr@777 4502 // We can now guarantee that the global stack is empty, since
tonyp@1458 4503 // all other tasks have finished. We separated the guarantees so
tonyp@1458 4504 // that, if a condition is false, we can immediately find out
tonyp@1458 4505 // which one.
tonyp@1458 4506 guarantee(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4507 guarantee(_cm->mark_stack_empty(), "only way to reach here");
tonyp@1458 4508 guarantee(_task_queue->size() == 0, "only way to reach here");
tonyp@1458 4509 guarantee(!_cm->has_overflown(), "only way to reach here");
tonyp@1458 4510 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
ysr@777 4511
tonyp@2973 4512 if (_cm->verbose_low()) {
johnc@4173 4513 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
tonyp@2973 4514 }
ysr@777 4515 } else {
ysr@777 4516 // Apparently there's more work to do. Let's abort this task. It
ysr@777 4517 // will restart it and we can hopefully find more things to do.
ysr@777 4518
tonyp@2973 4519 if (_cm->verbose_low()) {
johnc@4173 4520 gclog_or_tty->print_cr("[%u] apparently there is more work to do",
johnc@4173 4521 _worker_id);
tonyp@2973 4522 }
ysr@777 4523
ysr@777 4524 set_has_aborted();
ysr@777 4525 statsOnly( ++_aborted_termination );
ysr@777 4526 }
ysr@777 4527 }
ysr@777 4528
ysr@777 4529 // Mainly for debugging purposes to make sure that a pointer to the
ysr@777 4530 // closure which was statically allocated in this frame doesn't
ysr@777 4531 // escape it by accident.
tonyp@2968 4532 set_cm_oop_closure(NULL);
ysr@777 4533 double end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4534 double elapsed_time_ms = end_time_ms - _start_time_ms;
ysr@777 4535 // Update the step history.
ysr@777 4536 _step_times_ms.add(elapsed_time_ms);
ysr@777 4537
ysr@777 4538 if (has_aborted()) {
ysr@777 4539 // The task was aborted for some reason.
ysr@777 4540
ysr@777 4541 statsOnly( ++_aborted );
ysr@777 4542
johnc@2494 4543 if (_has_timed_out) {
ysr@777 4544 double diff_ms = elapsed_time_ms - _time_target_ms;
ysr@777 4545 // Keep statistics of how well we did with respect to hitting
ysr@777 4546 // our target only if we actually timed out (if we aborted for
ysr@777 4547 // other reasons, then the results might get skewed).
ysr@777 4548 _marking_step_diffs_ms.add(diff_ms);
ysr@777 4549 }
ysr@777 4550
ysr@777 4551 if (_cm->has_overflown()) {
ysr@777 4552 // This is the interesting one. We aborted because a global
ysr@777 4553 // overflow was raised. This means we have to restart the
ysr@777 4554 // marking phase and start iterating over regions. However, in
ysr@777 4555 // order to do this we have to make sure that all tasks stop
ysr@777 4556 // what they are doing and re-initialise in a safe manner. We
ysr@777 4557 // will achieve this with the use of two barrier sync points.
ysr@777 4558
tonyp@2973 4559 if (_cm->verbose_low()) {
johnc@4173 4560 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
tonyp@2973 4561 }
ysr@777 4562
johnc@4787 4563 if (!is_serial) {
johnc@4787 4564 // We only need to enter the sync barrier if being called
johnc@4787 4565 // from a parallel context
johnc@4787 4566 _cm->enter_first_sync_barrier(_worker_id);
johnc@4787 4567
johnc@4787 4568 // When we exit this sync barrier we know that all tasks have
johnc@4787 4569 // stopped doing marking work. So, it's now safe to
johnc@4787 4570 // re-initialise our data structures. At the end of this method,
johnc@4787 4571 // task 0 will clear the global data structures.
johnc@4787 4572 }
ysr@777 4573
ysr@777 4574 statsOnly( ++_aborted_overflow );
ysr@777 4575
ysr@777 4576 // We clear the local state of this task...
ysr@777 4577 clear_region_fields();
ysr@777 4578
johnc@4787 4579 if (!is_serial) {
johnc@4787 4580 // ...and enter the second barrier.
johnc@4787 4581 _cm->enter_second_sync_barrier(_worker_id);
johnc@4787 4582 }
johnc@4788 4583 // At this point, if we're during the concurrent phase of
johnc@4788 4584 // marking, everything has been re-initialized and we're
ysr@777 4585 // ready to restart.
ysr@777 4586 }
ysr@777 4587
ysr@777 4588 if (_cm->verbose_low()) {
johnc@4173 4589 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
ysr@777 4590 "elapsed = %1.2lfms <<<<<<<<<<",
johnc@4173 4591 _worker_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4592 if (_cm->has_aborted()) {
johnc@4173 4593 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
johnc@4173 4594 _worker_id);
tonyp@2973 4595 }
ysr@777 4596 }
ysr@777 4597 } else {
tonyp@2973 4598 if (_cm->verbose_low()) {
johnc@4173 4599 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
ysr@777 4600 "elapsed = %1.2lfms <<<<<<<<<<",
johnc@4173 4601 _worker_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4602 }
ysr@777 4603 }
ysr@777 4604
ysr@777 4605 _claimed = false;
ysr@777 4606 }
ysr@777 4607
johnc@4173 4608 CMTask::CMTask(uint worker_id,
ysr@777 4609 ConcurrentMark* cm,
johnc@3463 4610 size_t* marked_bytes,
johnc@3463 4611 BitMap* card_bm,
ysr@777 4612 CMTaskQueue* task_queue,
ysr@777 4613 CMTaskQueueSet* task_queues)
ysr@777 4614 : _g1h(G1CollectedHeap::heap()),
johnc@4173 4615 _worker_id(worker_id), _cm(cm),
ysr@777 4616 _claimed(false),
ysr@777 4617 _nextMarkBitMap(NULL), _hash_seed(17),
ysr@777 4618 _task_queue(task_queue),
ysr@777 4619 _task_queues(task_queues),
tonyp@2968 4620 _cm_oop_closure(NULL),
johnc@3463 4621 _marked_bytes_array(marked_bytes),
johnc@3463 4622 _card_bm(card_bm) {
tonyp@1458 4623 guarantee(task_queue != NULL, "invariant");
tonyp@1458 4624 guarantee(task_queues != NULL, "invariant");
ysr@777 4625
ysr@777 4626 statsOnly( _clock_due_to_scanning = 0;
ysr@777 4627 _clock_due_to_marking = 0 );
ysr@777 4628
ysr@777 4629 _marking_step_diffs_ms.add(0.5);
ysr@777 4630 }
tonyp@2717 4631
tonyp@2717 4632 // These are formatting macros that are used below to ensure
tonyp@2717 4633 // consistent formatting. The *_H_* versions are used to format the
tonyp@2717 4634 // header for a particular value and they should be kept consistent
tonyp@2717 4635 // with the corresponding macro. Also note that most of the macros add
tonyp@2717 4636 // the necessary white space (as a prefix) which makes them a bit
tonyp@2717 4637 // easier to compose.
tonyp@2717 4638
tonyp@2717 4639 // All the output lines are prefixed with this string to be able to
tonyp@2717 4640 // identify them easily in a large log file.
tonyp@2717 4641 #define G1PPRL_LINE_PREFIX "###"
tonyp@2717 4642
tonyp@2717 4643 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
tonyp@2717 4644 #ifdef _LP64
tonyp@2717 4645 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
tonyp@2717 4646 #else // _LP64
tonyp@2717 4647 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
tonyp@2717 4648 #endif // _LP64
tonyp@2717 4649
tonyp@2717 4650 // For per-region info
tonyp@2717 4651 #define G1PPRL_TYPE_FORMAT " %-4s"
tonyp@2717 4652 #define G1PPRL_TYPE_H_FORMAT " %4s"
tonyp@2717 4653 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
tonyp@2717 4654 #define G1PPRL_BYTE_H_FORMAT " %9s"
tonyp@2717 4655 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
tonyp@2717 4656 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
tonyp@2717 4657
tonyp@2717 4658 // For summary info
tonyp@2717 4659 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4660 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
tonyp@2717 4661 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
tonyp@2717 4662 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
tonyp@2717 4663
tonyp@2717 4664 G1PrintRegionLivenessInfoClosure::
tonyp@2717 4665 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
tonyp@2717 4666 : _out(out),
tonyp@2717 4667 _total_used_bytes(0), _total_capacity_bytes(0),
tonyp@2717 4668 _total_prev_live_bytes(0), _total_next_live_bytes(0),
tonyp@2717 4669 _hum_used_bytes(0), _hum_capacity_bytes(0),
tschatzl@5122 4670 _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
johnc@5548 4671 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
tonyp@2717 4672 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2717 4673 MemRegion g1_reserved = g1h->g1_reserved();
tonyp@2717 4674 double now = os::elapsedTime();
tonyp@2717 4675
tonyp@2717 4676 // Print the header of the output.
tonyp@2717 4677 _out->cr();
tonyp@2717 4678 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
tonyp@2717 4679 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
tonyp@2717 4680 G1PPRL_SUM_ADDR_FORMAT("reserved")
tonyp@2717 4681 G1PPRL_SUM_BYTE_FORMAT("region-size"),
drchase@6680 4682 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
johnc@3182 4683 HeapRegion::GrainBytes);
tonyp@2717 4684 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4685 _out->print_cr(G1PPRL_LINE_PREFIX
tschatzl@5122 4686 G1PPRL_TYPE_H_FORMAT
tschatzl@5122 4687 G1PPRL_ADDR_BASE_H_FORMAT
tschatzl@5122 4688 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4689 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4690 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4691 G1PPRL_DOUBLE_H_FORMAT
johnc@5548 4692 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4693 G1PPRL_BYTE_H_FORMAT,
tschatzl@5122 4694 "type", "address-range",
johnc@5548 4695 "used", "prev-live", "next-live", "gc-eff",
johnc@5548 4696 "remset", "code-roots");
johnc@3173 4697 _out->print_cr(G1PPRL_LINE_PREFIX
tschatzl@5122 4698 G1PPRL_TYPE_H_FORMAT
tschatzl@5122 4699 G1PPRL_ADDR_BASE_H_FORMAT
tschatzl@5122 4700 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4701 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4702 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4703 G1PPRL_DOUBLE_H_FORMAT
johnc@5548 4704 G1PPRL_BYTE_H_FORMAT
tschatzl@5122 4705 G1PPRL_BYTE_H_FORMAT,
tschatzl@5122 4706 "", "",
johnc@5548 4707 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
johnc@5548 4708 "(bytes)", "(bytes)");
tonyp@2717 4709 }
tonyp@2717 4710
tonyp@2717 4711 // It takes as a parameter a reference to one of the _hum_* fields, it
tonyp@2717 4712 // deduces the corresponding value for a region in a humongous region
tonyp@2717 4713 // series (either the region size, or what's left if the _hum_* field
tonyp@2717 4714 // is < the region size), and updates the _hum_* field accordingly.
tonyp@2717 4715 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
tonyp@2717 4716 size_t bytes = 0;
tonyp@2717 4717 // The > 0 check is to deal with the prev and next live bytes which
tonyp@2717 4718 // could be 0.
tonyp@2717 4719 if (*hum_bytes > 0) {
johnc@3182 4720 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
tonyp@2717 4721 *hum_bytes -= bytes;
tonyp@2717 4722 }
tonyp@2717 4723 return bytes;
tonyp@2717 4724 }
tonyp@2717 4725
tonyp@2717 4726 // It deduces the values for a region in a humongous region series
tonyp@2717 4727 // from the _hum_* fields and updates those accordingly. It assumes
tonyp@2717 4728 // that that _hum_* fields have already been set up from the "starts
tonyp@2717 4729 // humongous" region and we visit the regions in address order.
tonyp@2717 4730 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
tonyp@2717 4731 size_t* capacity_bytes,
tonyp@2717 4732 size_t* prev_live_bytes,
tonyp@2717 4733 size_t* next_live_bytes) {
tonyp@2717 4734 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
tonyp@2717 4735 *used_bytes = get_hum_bytes(&_hum_used_bytes);
tonyp@2717 4736 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
tonyp@2717 4737 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
tonyp@2717 4738 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
tonyp@2717 4739 }
tonyp@2717 4740
tonyp@2717 4741 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
brutisso@7195 4742 const char* type = r->get_type_str();
tonyp@2717 4743 HeapWord* bottom = r->bottom();
tonyp@2717 4744 HeapWord* end = r->end();
tonyp@2717 4745 size_t capacity_bytes = r->capacity();
tonyp@2717 4746 size_t used_bytes = r->used();
tonyp@2717 4747 size_t prev_live_bytes = r->live_bytes();
tonyp@2717 4748 size_t next_live_bytes = r->next_live_bytes();
tonyp@2717 4749 double gc_eff = r->gc_efficiency();
tschatzl@5122 4750 size_t remset_bytes = r->rem_set()->mem_size();
johnc@5548 4751 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
johnc@5548 4752
brutisso@7195 4753 if (r->startsHumongous()) {
tonyp@2717 4754 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
tonyp@2717 4755 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
tonyp@2717 4756 "they should have been zeroed after the last time we used them");
tonyp@2717 4757 // Set up the _hum_* fields.
tonyp@2717 4758 _hum_capacity_bytes = capacity_bytes;
tonyp@2717 4759 _hum_used_bytes = used_bytes;
tonyp@2717 4760 _hum_prev_live_bytes = prev_live_bytes;
tonyp@2717 4761 _hum_next_live_bytes = next_live_bytes;
tonyp@2717 4762 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4763 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4764 end = bottom + HeapRegion::GrainWords;
tonyp@2717 4765 } else if (r->continuesHumongous()) {
tonyp@2717 4766 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4767 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4768 assert(end == bottom + HeapRegion::GrainWords, "invariant");
tonyp@2717 4769 }
tonyp@2717 4770
tonyp@2717 4771 _total_used_bytes += used_bytes;
tonyp@2717 4772 _total_capacity_bytes += capacity_bytes;
tonyp@2717 4773 _total_prev_live_bytes += prev_live_bytes;
tonyp@2717 4774 _total_next_live_bytes += next_live_bytes;
tschatzl@5122 4775 _total_remset_bytes += remset_bytes;
johnc@5548 4776 _total_strong_code_roots_bytes += strong_code_roots_bytes;
tonyp@2717 4777
tonyp@2717 4778 // Print a line for this particular region.
tonyp@2717 4779 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4780 G1PPRL_TYPE_FORMAT
tonyp@2717 4781 G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4782 G1PPRL_BYTE_FORMAT
tonyp@2717 4783 G1PPRL_BYTE_FORMAT
tonyp@2717 4784 G1PPRL_BYTE_FORMAT
tschatzl@5122 4785 G1PPRL_DOUBLE_FORMAT
johnc@5548 4786 G1PPRL_BYTE_FORMAT
tschatzl@5122 4787 G1PPRL_BYTE_FORMAT,
drchase@6680 4788 type, p2i(bottom), p2i(end),
johnc@5548 4789 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
johnc@5548 4790 remset_bytes, strong_code_roots_bytes);
tonyp@2717 4791
tonyp@2717 4792 return false;
tonyp@2717 4793 }
tonyp@2717 4794
tonyp@2717 4795 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
tschatzl@5122 4796 // add static memory usages to remembered set sizes
tschatzl@5122 4797 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
tonyp@2717 4798 // Print the footer of the output.
tonyp@2717 4799 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4800 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4801 " SUMMARY"
tonyp@2717 4802 G1PPRL_SUM_MB_FORMAT("capacity")
tonyp@2717 4803 G1PPRL_SUM_MB_PERC_FORMAT("used")
tonyp@2717 4804 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
tschatzl@5122 4805 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
johnc@5548 4806 G1PPRL_SUM_MB_FORMAT("remset")
johnc@5548 4807 G1PPRL_SUM_MB_FORMAT("code-roots"),
tonyp@2717 4808 bytes_to_mb(_total_capacity_bytes),
tonyp@2717 4809 bytes_to_mb(_total_used_bytes),
tonyp@2717 4810 perc(_total_used_bytes, _total_capacity_bytes),
tonyp@2717 4811 bytes_to_mb(_total_prev_live_bytes),
tonyp@2717 4812 perc(_total_prev_live_bytes, _total_capacity_bytes),
tonyp@2717 4813 bytes_to_mb(_total_next_live_bytes),
tschatzl@5122 4814 perc(_total_next_live_bytes, _total_capacity_bytes),
johnc@5548 4815 bytes_to_mb(_total_remset_bytes),
johnc@5548 4816 bytes_to_mb(_total_strong_code_roots_bytes));
tonyp@2717 4817 _out->cr();
tonyp@2717 4818 }

mercurial