src/share/vm/gc_implementation/g1/concurrentMark.cpp

Mon, 16 Jan 2012 11:21:21 +0100

author
brutisso
date
Mon, 16 Jan 2012 11:21:21 +0100
changeset 3455
851b58c26def
parent 3454
2e966d967c5c
child 3457
0b3d1ec6eaee
permissions
-rw-r--r--

7130334: G1: Change comments and error messages that refer to CMS in g1/concurrentMark.cpp/hpp
Summary: Removed references to CMS in the concurrentMark.cpp/hpp files.
Reviewed-by: tonyp, jmasa, johnc

ysr@777 1 /*
johnc@3412 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
tonyp@2968 27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
tonyp@2968 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1RemSet.hpp"
tonyp@3416 34 #include "gc_implementation/g1/heapRegion.inline.hpp"
stefank@2314 35 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 36 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
kamg@2445 37 #include "gc_implementation/shared/vmGCOperations.hpp"
stefank@2314 38 #include "memory/genOopClosures.inline.hpp"
stefank@2314 39 #include "memory/referencePolicy.hpp"
stefank@2314 40 #include "memory/resourceArea.hpp"
stefank@2314 41 #include "oops/oop.inline.hpp"
stefank@2314 42 #include "runtime/handles.inline.hpp"
stefank@2314 43 #include "runtime/java.hpp"
ysr@777 44
brutisso@3455 45 // Concurrent marking bit map wrapper
ysr@777 46
johnc@3292 47 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
ysr@777 48 _bm((uintptr_t*)NULL,0),
ysr@777 49 _shifter(shifter) {
ysr@777 50 _bmStartWord = (HeapWord*)(rs.base());
ysr@777 51 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes
ysr@777 52 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
ysr@777 53 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
ysr@777 54
brutisso@3455 55 guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
ysr@777 56 // For now we'll just commit all of the bit map up fromt.
ysr@777 57 // Later on we'll try to be more parsimonious with swap.
ysr@777 58 guarantee(_virtual_space.initialize(brs, brs.size()),
brutisso@3455 59 "couldn't reseve backing store for concurrent marking bit map");
ysr@777 60 assert(_virtual_space.committed_size() == brs.size(),
brutisso@3455 61 "didn't reserve backing store for all of concurrent marking bit map?");
ysr@777 62 _bm.set_map((uintptr_t*)_virtual_space.low());
ysr@777 63 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
ysr@777 64 _bmWordSize, "inconsistency in bit map sizing");
ysr@777 65 _bm.set_size(_bmWordSize >> _shifter);
ysr@777 66 }
ysr@777 67
ysr@777 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
ysr@777 69 HeapWord* limit) const {
ysr@777 70 // First we must round addr *up* to a possible object boundary.
ysr@777 71 addr = (HeapWord*)align_size_up((intptr_t)addr,
ysr@777 72 HeapWordSize << _shifter);
ysr@777 73 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 74 if (limit == NULL) {
tonyp@2973 75 limit = _bmStartWord + _bmWordSize;
tonyp@2973 76 }
ysr@777 77 size_t limitOffset = heapWordToOffset(limit);
ysr@777 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
ysr@777 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 80 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 81 assert(nextAddr == limit || isMarked(nextAddr),
ysr@777 82 "get_next_one postcondition");
ysr@777 83 return nextAddr;
ysr@777 84 }
ysr@777 85
ysr@777 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
ysr@777 87 HeapWord* limit) const {
ysr@777 88 size_t addrOffset = heapWordToOffset(addr);
tonyp@2973 89 if (limit == NULL) {
tonyp@2973 90 limit = _bmStartWord + _bmWordSize;
tonyp@2973 91 }
ysr@777 92 size_t limitOffset = heapWordToOffset(limit);
ysr@777 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
ysr@777 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 95 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 96 assert(nextAddr == limit || !isMarked(nextAddr),
ysr@777 97 "get_next_one postcondition");
ysr@777 98 return nextAddr;
ysr@777 99 }
ysr@777 100
ysr@777 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
ysr@777 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
ysr@777 103 return (int) (diff >> _shifter);
ysr@777 104 }
ysr@777 105
ysr@777 106 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap,
ysr@777 107 size_t from_start_index,
ysr@777 108 HeapWord* to_start_word,
ysr@777 109 size_t word_num) {
ysr@777 110 _bm.mostly_disjoint_range_union(from_bitmap,
ysr@777 111 from_start_index,
ysr@777 112 heapWordToOffset(to_start_word),
ysr@777 113 word_num);
ysr@777 114 }
ysr@777 115
ysr@777 116 #ifndef PRODUCT
ysr@777 117 bool CMBitMapRO::covers(ReservedSpace rs) const {
ysr@777 118 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
kvn@1080 119 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize,
ysr@777 120 "size inconsistency");
ysr@777 121 return _bmStartWord == (HeapWord*)(rs.base()) &&
ysr@777 122 _bmWordSize == rs.size()>>LogHeapWordSize;
ysr@777 123 }
ysr@777 124 #endif
ysr@777 125
ysr@777 126 void CMBitMap::clearAll() {
ysr@777 127 _bm.clear();
ysr@777 128 return;
ysr@777 129 }
ysr@777 130
ysr@777 131 void CMBitMap::markRange(MemRegion mr) {
ysr@777 132 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 133 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 134 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
ysr@777 135 ((HeapWord *) mr.end())),
ysr@777 136 "markRange memory region end is not card aligned");
ysr@777 137 // convert address range into offset range
ysr@777 138 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 139 heapWordToOffset(mr.end()), true);
ysr@777 140 }
ysr@777 141
ysr@777 142 void CMBitMap::clearRange(MemRegion mr) {
ysr@777 143 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 144 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 145 // convert address range into offset range
ysr@777 146 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 147 heapWordToOffset(mr.end()), false);
ysr@777 148 }
ysr@777 149
ysr@777 150 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
ysr@777 151 HeapWord* end_addr) {
ysr@777 152 HeapWord* start = getNextMarkedWordAddress(addr);
ysr@777 153 start = MIN2(start, end_addr);
ysr@777 154 HeapWord* end = getNextUnmarkedWordAddress(start);
ysr@777 155 end = MIN2(end, end_addr);
ysr@777 156 assert(start <= end, "Consistency check");
ysr@777 157 MemRegion mr(start, end);
ysr@777 158 if (!mr.is_empty()) {
ysr@777 159 clearRange(mr);
ysr@777 160 }
ysr@777 161 return mr;
ysr@777 162 }
ysr@777 163
ysr@777 164 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
ysr@777 165 _base(NULL), _cm(cm)
ysr@777 166 #ifdef ASSERT
ysr@777 167 , _drain_in_progress(false)
ysr@777 168 , _drain_in_progress_yields(false)
ysr@777 169 #endif
ysr@777 170 {}
ysr@777 171
ysr@777 172 void CMMarkStack::allocate(size_t size) {
ysr@777 173 _base = NEW_C_HEAP_ARRAY(oop, size);
tonyp@2973 174 if (_base == NULL) {
tonyp@3416 175 vm_exit_during_initialization("Failed to allocate CM region mark stack");
tonyp@2973 176 }
ysr@777 177 _index = 0;
ysr@777 178 _capacity = (jint) size;
tonyp@3416 179 _saved_index = -1;
ysr@777 180 NOT_PRODUCT(_max_depth = 0);
ysr@777 181 }
ysr@777 182
ysr@777 183 CMMarkStack::~CMMarkStack() {
tonyp@2973 184 if (_base != NULL) {
tonyp@2973 185 FREE_C_HEAP_ARRAY(oop, _base);
tonyp@2973 186 }
ysr@777 187 }
ysr@777 188
ysr@777 189 void CMMarkStack::par_push(oop ptr) {
ysr@777 190 while (true) {
ysr@777 191 if (isFull()) {
ysr@777 192 _overflow = true;
ysr@777 193 return;
ysr@777 194 }
ysr@777 195 // Otherwise...
ysr@777 196 jint index = _index;
ysr@777 197 jint next_index = index+1;
ysr@777 198 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 199 if (res == index) {
ysr@777 200 _base[index] = ptr;
ysr@777 201 // Note that we don't maintain this atomically. We could, but it
ysr@777 202 // doesn't seem necessary.
ysr@777 203 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 204 return;
ysr@777 205 }
ysr@777 206 // Otherwise, we need to try again.
ysr@777 207 }
ysr@777 208 }
ysr@777 209
ysr@777 210 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
ysr@777 211 while (true) {
ysr@777 212 if (isFull()) {
ysr@777 213 _overflow = true;
ysr@777 214 return;
ysr@777 215 }
ysr@777 216 // Otherwise...
ysr@777 217 jint index = _index;
ysr@777 218 jint next_index = index + n;
ysr@777 219 if (next_index > _capacity) {
ysr@777 220 _overflow = true;
ysr@777 221 return;
ysr@777 222 }
ysr@777 223 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 224 if (res == index) {
ysr@777 225 for (int i = 0; i < n; i++) {
ysr@777 226 int ind = index + i;
ysr@777 227 assert(ind < _capacity, "By overflow test above.");
ysr@777 228 _base[ind] = ptr_arr[i];
ysr@777 229 }
ysr@777 230 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 231 return;
ysr@777 232 }
ysr@777 233 // Otherwise, we need to try again.
ysr@777 234 }
ysr@777 235 }
ysr@777 236
ysr@777 237
ysr@777 238 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
ysr@777 239 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 240 jint start = _index;
ysr@777 241 jint next_index = start + n;
ysr@777 242 if (next_index > _capacity) {
ysr@777 243 _overflow = true;
ysr@777 244 return;
ysr@777 245 }
ysr@777 246 // Otherwise.
ysr@777 247 _index = next_index;
ysr@777 248 for (int i = 0; i < n; i++) {
ysr@777 249 int ind = start + i;
tonyp@1458 250 assert(ind < _capacity, "By overflow test above.");
ysr@777 251 _base[ind] = ptr_arr[i];
ysr@777 252 }
ysr@777 253 }
ysr@777 254
ysr@777 255
ysr@777 256 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
ysr@777 257 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 258 jint index = _index;
ysr@777 259 if (index == 0) {
ysr@777 260 *n = 0;
ysr@777 261 return false;
ysr@777 262 } else {
ysr@777 263 int k = MIN2(max, index);
ysr@777 264 jint new_ind = index - k;
ysr@777 265 for (int j = 0; j < k; j++) {
ysr@777 266 ptr_arr[j] = _base[new_ind + j];
ysr@777 267 }
ysr@777 268 _index = new_ind;
ysr@777 269 *n = k;
ysr@777 270 return true;
ysr@777 271 }
ysr@777 272 }
ysr@777 273
ysr@777 274 CMRegionStack::CMRegionStack() : _base(NULL) {}
ysr@777 275
ysr@777 276 void CMRegionStack::allocate(size_t size) {
ysr@777 277 _base = NEW_C_HEAP_ARRAY(MemRegion, size);
tonyp@2973 278 if (_base == NULL) {
tonyp@2973 279 vm_exit_during_initialization("Failed to allocate CM region mark stack");
tonyp@2973 280 }
ysr@777 281 _index = 0;
ysr@777 282 _capacity = (jint) size;
ysr@777 283 }
ysr@777 284
ysr@777 285 CMRegionStack::~CMRegionStack() {
tonyp@2973 286 if (_base != NULL) {
tonyp@2973 287 FREE_C_HEAP_ARRAY(oop, _base);
tonyp@2973 288 }
ysr@777 289 }
ysr@777 290
johnc@2190 291 void CMRegionStack::push_lock_free(MemRegion mr) {
tonyp@3416 292 guarantee(false, "push_lock_free(): don't call this any more");
tonyp@3416 293
ysr@777 294 assert(mr.word_size() > 0, "Precondition");
ysr@777 295 while (true) {
johnc@2190 296 jint index = _index;
johnc@2190 297
johnc@2190 298 if (index >= _capacity) {
ysr@777 299 _overflow = true;
ysr@777 300 return;
ysr@777 301 }
ysr@777 302 // Otherwise...
ysr@777 303 jint next_index = index+1;
ysr@777 304 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 305 if (res == index) {
ysr@777 306 _base[index] = mr;
ysr@777 307 return;
ysr@777 308 }
ysr@777 309 // Otherwise, we need to try again.
ysr@777 310 }
ysr@777 311 }
ysr@777 312
johnc@2190 313 // Lock-free pop of the region stack. Called during the concurrent
johnc@2190 314 // marking / remark phases. Should only be called in tandem with
johnc@2190 315 // other lock-free pops.
johnc@2190 316 MemRegion CMRegionStack::pop_lock_free() {
tonyp@3416 317 guarantee(false, "pop_lock_free(): don't call this any more");
tonyp@3416 318
ysr@777 319 while (true) {
ysr@777 320 jint index = _index;
ysr@777 321
ysr@777 322 if (index == 0) {
ysr@777 323 return MemRegion();
ysr@777 324 }
johnc@2190 325 // Otherwise...
ysr@777 326 jint next_index = index-1;
ysr@777 327 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 328 if (res == index) {
ysr@777 329 MemRegion mr = _base[next_index];
ysr@777 330 if (mr.start() != NULL) {
tonyp@1458 331 assert(mr.end() != NULL, "invariant");
tonyp@1458 332 assert(mr.word_size() > 0, "invariant");
ysr@777 333 return mr;
ysr@777 334 } else {
ysr@777 335 // that entry was invalidated... let's skip it
tonyp@1458 336 assert(mr.end() == NULL, "invariant");
ysr@777 337 }
ysr@777 338 }
ysr@777 339 // Otherwise, we need to try again.
ysr@777 340 }
ysr@777 341 }
johnc@2190 342
johnc@2190 343 #if 0
johnc@2190 344 // The routines that manipulate the region stack with a lock are
johnc@2190 345 // not currently used. They should be retained, however, as a
johnc@2190 346 // diagnostic aid.
tonyp@1793 347
tonyp@1793 348 void CMRegionStack::push_with_lock(MemRegion mr) {
tonyp@1793 349 assert(mr.word_size() > 0, "Precondition");
tonyp@1793 350 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
tonyp@1793 351
tonyp@1793 352 if (isFull()) {
tonyp@1793 353 _overflow = true;
tonyp@1793 354 return;
tonyp@1793 355 }
tonyp@1793 356
tonyp@1793 357 _base[_index] = mr;
tonyp@1793 358 _index += 1;
tonyp@1793 359 }
tonyp@1793 360
tonyp@1793 361 MemRegion CMRegionStack::pop_with_lock() {
tonyp@1793 362 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
tonyp@1793 363
tonyp@1793 364 while (true) {
tonyp@1793 365 if (_index == 0) {
tonyp@1793 366 return MemRegion();
tonyp@1793 367 }
tonyp@1793 368 _index -= 1;
tonyp@1793 369
tonyp@1793 370 MemRegion mr = _base[_index];
tonyp@1793 371 if (mr.start() != NULL) {
tonyp@1793 372 assert(mr.end() != NULL, "invariant");
tonyp@1793 373 assert(mr.word_size() > 0, "invariant");
tonyp@1793 374 return mr;
tonyp@1793 375 } else {
tonyp@1793 376 // that entry was invalidated... let's skip it
tonyp@1793 377 assert(mr.end() == NULL, "invariant");
tonyp@1793 378 }
tonyp@1793 379 }
tonyp@1793 380 }
johnc@2190 381 #endif
ysr@777 382
ysr@777 383 bool CMRegionStack::invalidate_entries_into_cset() {
tonyp@3416 384 guarantee(false, "invalidate_entries_into_cset(): don't call this any more");
tonyp@3416 385
ysr@777 386 bool result = false;
ysr@777 387 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 388 for (int i = 0; i < _oops_do_bound; ++i) {
ysr@777 389 MemRegion mr = _base[i];
ysr@777 390 if (mr.start() != NULL) {
tonyp@1458 391 assert(mr.end() != NULL, "invariant");
tonyp@1458 392 assert(mr.word_size() > 0, "invariant");
ysr@777 393 HeapRegion* hr = g1h->heap_region_containing(mr.start());
tonyp@1458 394 assert(hr != NULL, "invariant");
ysr@777 395 if (hr->in_collection_set()) {
ysr@777 396 // The region points into the collection set
ysr@777 397 _base[i] = MemRegion();
ysr@777 398 result = true;
ysr@777 399 }
ysr@777 400 } else {
ysr@777 401 // that entry was invalidated... let's skip it
tonyp@1458 402 assert(mr.end() == NULL, "invariant");
ysr@777 403 }
ysr@777 404 }
ysr@777 405 return result;
ysr@777 406 }
ysr@777 407
ysr@777 408 template<class OopClosureClass>
ysr@777 409 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
ysr@777 410 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
ysr@777 411 || SafepointSynchronize::is_at_safepoint(),
ysr@777 412 "Drain recursion must be yield-safe.");
ysr@777 413 bool res = true;
ysr@777 414 debug_only(_drain_in_progress = true);
ysr@777 415 debug_only(_drain_in_progress_yields = yield_after);
ysr@777 416 while (!isEmpty()) {
ysr@777 417 oop newOop = pop();
ysr@777 418 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
ysr@777 419 assert(newOop->is_oop(), "Expected an oop");
ysr@777 420 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
ysr@777 421 "only grey objects on this stack");
ysr@777 422 newOop->oop_iterate(cl);
ysr@777 423 if (yield_after && _cm->do_yield_check()) {
tonyp@2973 424 res = false;
tonyp@2973 425 break;
ysr@777 426 }
ysr@777 427 }
ysr@777 428 debug_only(_drain_in_progress = false);
ysr@777 429 return res;
ysr@777 430 }
ysr@777 431
tonyp@3416 432 void CMMarkStack::note_start_of_gc() {
tonyp@3416 433 assert(_saved_index == -1,
tonyp@3416 434 "note_start_of_gc()/end_of_gc() bracketed incorrectly");
tonyp@3416 435 _saved_index = _index;
tonyp@3416 436 }
tonyp@3416 437
tonyp@3416 438 void CMMarkStack::note_end_of_gc() {
tonyp@3416 439 // This is intentionally a guarantee, instead of an assert. If we
tonyp@3416 440 // accidentally add something to the mark stack during GC, it
tonyp@3416 441 // will be a correctness issue so it's better if we crash. we'll
tonyp@3416 442 // only check this once per GC anyway, so it won't be a performance
tonyp@3416 443 // issue in any way.
tonyp@3416 444 guarantee(_saved_index == _index,
tonyp@3416 445 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 446 _saved_index = -1;
tonyp@3416 447 }
tonyp@3416 448
ysr@777 449 void CMMarkStack::oops_do(OopClosure* f) {
tonyp@3416 450 assert(_saved_index == _index,
tonyp@3416 451 err_msg("saved index: %d index: %d", _saved_index, _index));
tonyp@3416 452 for (int i = 0; i < _index; i += 1) {
ysr@777 453 f->do_oop(&_base[i]);
ysr@777 454 }
ysr@777 455 }
ysr@777 456
ysr@777 457 bool ConcurrentMark::not_yet_marked(oop obj) const {
ysr@777 458 return (_g1h->is_obj_ill(obj)
ysr@777 459 || (_g1h->is_in_permanent(obj)
ysr@777 460 && !nextMarkBitMap()->isMarked((HeapWord*)obj)));
ysr@777 461 }
ysr@777 462
ysr@777 463 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 464 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 465 #endif // _MSC_VER
ysr@777 466
jmasa@3357 467 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
jmasa@3357 468 return MAX2((n_par_threads + 2) / 4, 1U);
jmasa@3294 469 }
jmasa@3294 470
ysr@777 471 ConcurrentMark::ConcurrentMark(ReservedSpace rs,
ysr@777 472 int max_regions) :
ysr@777 473 _markBitMap1(rs, MinObjAlignment - 1),
ysr@777 474 _markBitMap2(rs, MinObjAlignment - 1),
ysr@777 475
ysr@777 476 _parallel_marking_threads(0),
jmasa@3294 477 _max_parallel_marking_threads(0),
ysr@777 478 _sleep_factor(0.0),
ysr@777 479 _marking_task_overhead(1.0),
ysr@777 480 _cleanup_sleep_factor(0.0),
ysr@777 481 _cleanup_task_overhead(1.0),
tonyp@2472 482 _cleanup_list("Cleanup List"),
ysr@777 483 _region_bm(max_regions, false /* in_resource_area*/),
ysr@777 484 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
ysr@777 485 CardTableModRefBS::card_shift,
ysr@777 486 false /* in_resource_area*/),
ysr@777 487 _prevMarkBitMap(&_markBitMap1),
ysr@777 488 _nextMarkBitMap(&_markBitMap2),
ysr@777 489 _at_least_one_mark_complete(false),
ysr@777 490
ysr@777 491 _markStack(this),
ysr@777 492 _regionStack(),
ysr@777 493 // _finger set in set_non_marking_state
ysr@777 494
jmasa@3357 495 _max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
ysr@777 496 // _active_tasks set in set_non_marking_state
ysr@777 497 // _tasks set inside the constructor
ysr@777 498 _task_queues(new CMTaskQueueSet((int) _max_task_num)),
ysr@777 499 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)),
ysr@777 500
ysr@777 501 _has_overflown(false),
ysr@777 502 _concurrent(false),
tonyp@1054 503 _has_aborted(false),
tonyp@1054 504 _restart_for_overflow(false),
tonyp@1054 505 _concurrent_marking_in_progress(false),
tonyp@1054 506 _should_gray_objects(false),
ysr@777 507
ysr@777 508 // _verbose_level set below
ysr@777 509
ysr@777 510 _init_times(),
ysr@777 511 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
ysr@777 512 _cleanup_times(),
ysr@777 513 _total_counting_time(0.0),
ysr@777 514 _total_rs_scrub_time(0.0),
tonyp@2973 515 _parallel_workers(NULL) {
tonyp@2973 516 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
tonyp@2973 517 if (verbose_level < no_verbose) {
ysr@777 518 verbose_level = no_verbose;
tonyp@2973 519 }
tonyp@2973 520 if (verbose_level > high_verbose) {
ysr@777 521 verbose_level = high_verbose;
tonyp@2973 522 }
ysr@777 523 _verbose_level = verbose_level;
ysr@777 524
tonyp@2973 525 if (verbose_low()) {
ysr@777 526 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
ysr@777 527 "heap end = "PTR_FORMAT, _heap_start, _heap_end);
tonyp@2973 528 }
ysr@777 529
jmasa@1719 530 _markStack.allocate(MarkStackSize);
johnc@1186 531 _regionStack.allocate(G1MarkRegionStackSize);
ysr@777 532
ysr@777 533 // Create & start a ConcurrentMark thread.
ysr@1280 534 _cmThread = new ConcurrentMarkThread(this);
ysr@1280 535 assert(cmThread() != NULL, "CM Thread should have been created");
ysr@1280 536 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
ysr@1280 537
ysr@777 538 _g1h = G1CollectedHeap::heap();
ysr@777 539 assert(CGC_lock != NULL, "Where's the CGC_lock?");
ysr@777 540 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
ysr@777 541 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
ysr@777 542
ysr@777 543 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@1717 544 satb_qs.set_buffer_size(G1SATBBufferSize);
ysr@777 545
ysr@777 546 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
ysr@777 547 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
ysr@777 548
ysr@777 549 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
ysr@777 550 _active_tasks = _max_task_num;
ysr@777 551 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 552 CMTaskQueue* task_queue = new CMTaskQueue();
ysr@777 553 task_queue->initialize();
ysr@777 554 _task_queues->register_queue(i, task_queue);
ysr@777 555
ysr@777 556 _tasks[i] = new CMTask(i, this, task_queue, _task_queues);
ysr@777 557 _accum_task_vtime[i] = 0.0;
ysr@777 558 }
ysr@777 559
jmasa@1719 560 if (ConcGCThreads > ParallelGCThreads) {
jmasa@1719 561 vm_exit_during_initialization("Can't have more ConcGCThreads "
ysr@777 562 "than ParallelGCThreads.");
ysr@777 563 }
ysr@777 564 if (ParallelGCThreads == 0) {
ysr@777 565 // if we are not running with any parallel GC threads we will not
ysr@777 566 // spawn any marking threads either
jmasa@3294 567 _parallel_marking_threads = 0;
jmasa@3294 568 _max_parallel_marking_threads = 0;
jmasa@3294 569 _sleep_factor = 0.0;
jmasa@3294 570 _marking_task_overhead = 1.0;
ysr@777 571 } else {
jmasa@1719 572 if (ConcGCThreads > 0) {
jmasa@1719 573 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
ysr@777 574 // if both are set
ysr@777 575
jmasa@3357 576 _parallel_marking_threads = (uint) ConcGCThreads;
jmasa@3294 577 _max_parallel_marking_threads = _parallel_marking_threads;
ysr@777 578 _sleep_factor = 0.0;
ysr@777 579 _marking_task_overhead = 1.0;
johnc@1186 580 } else if (G1MarkingOverheadPercent > 0) {
ysr@777 581 // we will calculate the number of parallel marking threads
ysr@777 582 // based on a target overhead with respect to the soft real-time
ysr@777 583 // goal
ysr@777 584
johnc@1186 585 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
ysr@777 586 double overall_cm_overhead =
johnc@1186 587 (double) MaxGCPauseMillis * marking_overhead /
johnc@1186 588 (double) GCPauseIntervalMillis;
ysr@777 589 double cpu_ratio = 1.0 / (double) os::processor_count();
ysr@777 590 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
ysr@777 591 double marking_task_overhead =
ysr@777 592 overall_cm_overhead / marking_thread_num *
ysr@777 593 (double) os::processor_count();
ysr@777 594 double sleep_factor =
ysr@777 595 (1.0 - marking_task_overhead) / marking_task_overhead;
ysr@777 596
jmasa@3357 597 _parallel_marking_threads = (uint) marking_thread_num;
jmasa@3294 598 _max_parallel_marking_threads = _parallel_marking_threads;
ysr@777 599 _sleep_factor = sleep_factor;
ysr@777 600 _marking_task_overhead = marking_task_overhead;
ysr@777 601 } else {
jmasa@3357 602 _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
jmasa@3294 603 _max_parallel_marking_threads = _parallel_marking_threads;
ysr@777 604 _sleep_factor = 0.0;
ysr@777 605 _marking_task_overhead = 1.0;
ysr@777 606 }
ysr@777 607
tonyp@2973 608 if (parallel_marking_threads() > 1) {
ysr@777 609 _cleanup_task_overhead = 1.0;
tonyp@2973 610 } else {
ysr@777 611 _cleanup_task_overhead = marking_task_overhead();
tonyp@2973 612 }
ysr@777 613 _cleanup_sleep_factor =
ysr@777 614 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
ysr@777 615
ysr@777 616 #if 0
ysr@777 617 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
ysr@777 618 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
ysr@777 619 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
ysr@777 620 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
ysr@777 621 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
ysr@777 622 #endif
ysr@777 623
tonyp@1458 624 guarantee(parallel_marking_threads() > 0, "peace of mind");
jmasa@2188 625 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
jmasa@3357 626 _max_parallel_marking_threads, false, true);
jmasa@2188 627 if (_parallel_workers == NULL) {
ysr@777 628 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 629 } else {
jmasa@2188 630 _parallel_workers->initialize_workers();
jmasa@2188 631 }
ysr@777 632 }
ysr@777 633
ysr@777 634 // so that the call below can read a sensible value
ysr@777 635 _heap_start = (HeapWord*) rs.base();
ysr@777 636 set_non_marking_state();
ysr@777 637 }
ysr@777 638
ysr@777 639 void ConcurrentMark::update_g1_committed(bool force) {
ysr@777 640 // If concurrent marking is not in progress, then we do not need to
ysr@777 641 // update _heap_end. This has a subtle and important
ysr@777 642 // side-effect. Imagine that two evacuation pauses happen between
ysr@777 643 // marking completion and remark. The first one can grow the
ysr@777 644 // heap (hence now the finger is below the heap end). Then, the
ysr@777 645 // second one could unnecessarily push regions on the region
ysr@777 646 // stack. This causes the invariant that the region stack is empty
ysr@777 647 // at the beginning of remark to be false. By ensuring that we do
ysr@777 648 // not observe heap expansions after marking is complete, then we do
ysr@777 649 // not have this problem.
tonyp@2973 650 if (!concurrent_marking_in_progress() && !force) return;
ysr@777 651
ysr@777 652 MemRegion committed = _g1h->g1_committed();
tonyp@1458 653 assert(committed.start() == _heap_start, "start shouldn't change");
ysr@777 654 HeapWord* new_end = committed.end();
ysr@777 655 if (new_end > _heap_end) {
ysr@777 656 // The heap has been expanded.
ysr@777 657
ysr@777 658 _heap_end = new_end;
ysr@777 659 }
ysr@777 660 // Notice that the heap can also shrink. However, this only happens
ysr@777 661 // during a Full GC (at least currently) and the entire marking
ysr@777 662 // phase will bail out and the task will not be restarted. So, let's
ysr@777 663 // do nothing.
ysr@777 664 }
ysr@777 665
ysr@777 666 void ConcurrentMark::reset() {
ysr@777 667 // Starting values for these two. This should be called in a STW
ysr@777 668 // phase. CM will be notified of any future g1_committed expansions
ysr@777 669 // will be at the end of evacuation pauses, when tasks are
ysr@777 670 // inactive.
ysr@777 671 MemRegion committed = _g1h->g1_committed();
ysr@777 672 _heap_start = committed.start();
ysr@777 673 _heap_end = committed.end();
ysr@777 674
tonyp@1458 675 // Separated the asserts so that we know which one fires.
tonyp@1458 676 assert(_heap_start != NULL, "heap bounds should look ok");
tonyp@1458 677 assert(_heap_end != NULL, "heap bounds should look ok");
tonyp@1458 678 assert(_heap_start < _heap_end, "heap bounds should look ok");
ysr@777 679
ysr@777 680 // reset all the marking data structures and any necessary flags
ysr@777 681 clear_marking_state();
ysr@777 682
tonyp@2973 683 if (verbose_low()) {
ysr@777 684 gclog_or_tty->print_cr("[global] resetting");
tonyp@2973 685 }
ysr@777 686
ysr@777 687 // We do reset all of them, since different phases will use
ysr@777 688 // different number of active threads. So, it's easiest to have all
ysr@777 689 // of them ready.
johnc@2190 690 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 691 _tasks[i]->reset(_nextMarkBitMap);
johnc@2190 692 }
ysr@777 693
ysr@777 694 // we need this to make sure that the flag is on during the evac
ysr@777 695 // pause with initial mark piggy-backed
ysr@777 696 set_concurrent_marking_in_progress();
ysr@777 697 }
ysr@777 698
jmasa@3357 699 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
tonyp@1458 700 assert(active_tasks <= _max_task_num, "we should not have more");
ysr@777 701
ysr@777 702 _active_tasks = active_tasks;
ysr@777 703 // Need to update the three data structures below according to the
ysr@777 704 // number of active threads for this phase.
ysr@777 705 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
ysr@777 706 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 707 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 708
ysr@777 709 _concurrent = concurrent;
ysr@777 710 // We propagate this to all tasks, not just the active ones.
ysr@777 711 for (int i = 0; i < (int) _max_task_num; ++i)
ysr@777 712 _tasks[i]->set_concurrent(concurrent);
ysr@777 713
ysr@777 714 if (concurrent) {
ysr@777 715 set_concurrent_marking_in_progress();
ysr@777 716 } else {
ysr@777 717 // We currently assume that the concurrent flag has been set to
ysr@777 718 // false before we start remark. At this point we should also be
ysr@777 719 // in a STW phase.
tonyp@1458 720 assert(!concurrent_marking_in_progress(), "invariant");
tonyp@1458 721 assert(_finger == _heap_end, "only way to get here");
ysr@777 722 update_g1_committed(true);
ysr@777 723 }
ysr@777 724 }
ysr@777 725
ysr@777 726 void ConcurrentMark::set_non_marking_state() {
ysr@777 727 // We set the global marking state to some default values when we're
ysr@777 728 // not doing marking.
ysr@777 729 clear_marking_state();
ysr@777 730 _active_tasks = 0;
ysr@777 731 clear_concurrent_marking_in_progress();
ysr@777 732 }
ysr@777 733
ysr@777 734 ConcurrentMark::~ConcurrentMark() {
stefank@3364 735 // The ConcurrentMark instance is never freed.
stefank@3364 736 ShouldNotReachHere();
ysr@777 737 }
ysr@777 738
ysr@777 739 void ConcurrentMark::clearNextBitmap() {
tonyp@1794 740 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1794 741 G1CollectorPolicy* g1p = g1h->g1_policy();
tonyp@1794 742
tonyp@1794 743 // Make sure that the concurrent mark thread looks to still be in
tonyp@1794 744 // the current cycle.
tonyp@1794 745 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 746
tonyp@1794 747 // We are finishing up the current cycle by clearing the next
tonyp@1794 748 // marking bitmap and getting it ready for the next cycle. During
tonyp@1794 749 // this time no other cycle can start. So, let's make sure that this
tonyp@1794 750 // is the case.
tonyp@1794 751 guarantee(!g1h->mark_in_progress(), "invariant");
tonyp@1794 752
tonyp@1794 753 // clear the mark bitmap (no grey objects to start with).
tonyp@1794 754 // We need to do this in chunks and offer to yield in between
tonyp@1794 755 // each chunk.
tonyp@1794 756 HeapWord* start = _nextMarkBitMap->startWord();
tonyp@1794 757 HeapWord* end = _nextMarkBitMap->endWord();
tonyp@1794 758 HeapWord* cur = start;
tonyp@1794 759 size_t chunkSize = M;
tonyp@1794 760 while (cur < end) {
tonyp@1794 761 HeapWord* next = cur + chunkSize;
tonyp@2973 762 if (next > end) {
tonyp@1794 763 next = end;
tonyp@2973 764 }
tonyp@1794 765 MemRegion mr(cur,next);
tonyp@1794 766 _nextMarkBitMap->clearRange(mr);
tonyp@1794 767 cur = next;
tonyp@1794 768 do_yield_check();
tonyp@1794 769
tonyp@1794 770 // Repeat the asserts from above. We'll do them as asserts here to
tonyp@1794 771 // minimize their overhead on the product. However, we'll have
tonyp@1794 772 // them as guarantees at the beginning / end of the bitmap
tonyp@1794 773 // clearing to get some checking in the product.
tonyp@1794 774 assert(cmThread()->during_cycle(), "invariant");
tonyp@1794 775 assert(!g1h->mark_in_progress(), "invariant");
tonyp@1794 776 }
tonyp@1794 777
tonyp@1794 778 // Repeat the asserts from above.
tonyp@1794 779 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 780 guarantee(!g1h->mark_in_progress(), "invariant");
ysr@777 781 }
ysr@777 782
ysr@777 783 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
ysr@777 784 public:
ysr@777 785 bool doHeapRegion(HeapRegion* r) {
ysr@777 786 if (!r->continuesHumongous()) {
tonyp@3416 787 r->note_start_of_marking();
ysr@777 788 }
ysr@777 789 return false;
ysr@777 790 }
ysr@777 791 };
ysr@777 792
ysr@777 793 void ConcurrentMark::checkpointRootsInitialPre() {
ysr@777 794 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 795 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 796
ysr@777 797 _has_aborted = false;
ysr@777 798
jcoomes@1902 799 #ifndef PRODUCT
tonyp@1479 800 if (G1PrintReachableAtInitialMark) {
tonyp@1823 801 print_reachable("at-cycle-start",
johnc@2969 802 VerifyOption_G1UsePrevMarking, true /* all */);
tonyp@1479 803 }
jcoomes@1902 804 #endif
ysr@777 805
ysr@777 806 // Initialise marking structures. This has to be done in a STW phase.
ysr@777 807 reset();
tonyp@3416 808
tonyp@3416 809 // For each region note start of marking.
tonyp@3416 810 NoteStartOfMarkHRClosure startcl;
tonyp@3416 811 g1h->heap_region_iterate(&startcl);
ysr@777 812 }
ysr@777 813
ysr@777 814
ysr@777 815 void ConcurrentMark::checkpointRootsInitialPost() {
ysr@777 816 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 817
tonyp@2848 818 // If we force an overflow during remark, the remark operation will
tonyp@2848 819 // actually abort and we'll restart concurrent marking. If we always
tonyp@2848 820 // force an oveflow during remark we'll never actually complete the
tonyp@2848 821 // marking phase. So, we initilize this here, at the start of the
tonyp@2848 822 // cycle, so that at the remaining overflow number will decrease at
tonyp@2848 823 // every remark and we'll eventually not need to cause one.
tonyp@2848 824 force_overflow_stw()->init();
tonyp@2848 825
johnc@3175 826 // Start Concurrent Marking weak-reference discovery.
johnc@3175 827 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3175 828 // enable ("weak") refs discovery
johnc@3175 829 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ysr@892 830 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
ysr@777 831
ysr@777 832 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1752 833 // This is the start of the marking cycle, we're expected all
tonyp@1752 834 // threads to have SATB queues with active set to false.
tonyp@1752 835 satb_mq_set.set_active_all_threads(true, /* new active value */
tonyp@1752 836 false /* expected_active */);
ysr@777 837
ysr@777 838 // update_g1_committed() will be called at the end of an evac pause
ysr@777 839 // when marking is on. So, it's also called at the end of the
ysr@777 840 // initial-mark pause to update the heap end, if the heap expands
ysr@777 841 // during it. No need to call it here.
ysr@777 842 }
ysr@777 843
ysr@777 844 /*
tonyp@2848 845 * Notice that in the next two methods, we actually leave the STS
tonyp@2848 846 * during the barrier sync and join it immediately afterwards. If we
tonyp@2848 847 * do not do this, the following deadlock can occur: one thread could
tonyp@2848 848 * be in the barrier sync code, waiting for the other thread to also
tonyp@2848 849 * sync up, whereas another one could be trying to yield, while also
tonyp@2848 850 * waiting for the other threads to sync up too.
tonyp@2848 851 *
tonyp@2848 852 * Note, however, that this code is also used during remark and in
tonyp@2848 853 * this case we should not attempt to leave / enter the STS, otherwise
tonyp@2848 854 * we'll either hit an asseert (debug / fastdebug) or deadlock
tonyp@2848 855 * (product). So we should only leave / enter the STS if we are
tonyp@2848 856 * operating concurrently.
tonyp@2848 857 *
tonyp@2848 858 * Because the thread that does the sync barrier has left the STS, it
tonyp@2848 859 * is possible to be suspended for a Full GC or an evacuation pause
tonyp@2848 860 * could occur. This is actually safe, since the entering the sync
tonyp@2848 861 * barrier is one of the last things do_marking_step() does, and it
tonyp@2848 862 * doesn't manipulate any data structures afterwards.
tonyp@2848 863 */
ysr@777 864
ysr@777 865 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
tonyp@2973 866 if (verbose_low()) {
ysr@777 867 gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
tonyp@2973 868 }
ysr@777 869
tonyp@2848 870 if (concurrent()) {
tonyp@2848 871 ConcurrentGCThread::stsLeave();
tonyp@2848 872 }
ysr@777 873 _first_overflow_barrier_sync.enter();
tonyp@2848 874 if (concurrent()) {
tonyp@2848 875 ConcurrentGCThread::stsJoin();
tonyp@2848 876 }
ysr@777 877 // at this point everyone should have synced up and not be doing any
ysr@777 878 // more work
ysr@777 879
tonyp@2973 880 if (verbose_low()) {
ysr@777 881 gclog_or_tty->print_cr("[%d] leaving first barrier", task_num);
tonyp@2973 882 }
ysr@777 883
ysr@777 884 // let task 0 do this
ysr@777 885 if (task_num == 0) {
ysr@777 886 // task 0 is responsible for clearing the global data structures
tonyp@2848 887 // We should be here because of an overflow. During STW we should
tonyp@2848 888 // not clear the overflow flag since we rely on it being true when
tonyp@2848 889 // we exit this method to abort the pause and restart concurent
tonyp@2848 890 // marking.
tonyp@2848 891 clear_marking_state(concurrent() /* clear_overflow */);
tonyp@2848 892 force_overflow()->update();
ysr@777 893
ysr@777 894 if (PrintGC) {
ysr@777 895 gclog_or_tty->date_stamp(PrintGCDateStamps);
ysr@777 896 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 897 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
ysr@777 898 }
ysr@777 899 }
ysr@777 900
ysr@777 901 // after this, each task should reset its own data structures then
ysr@777 902 // then go into the second barrier
ysr@777 903 }
ysr@777 904
ysr@777 905 void ConcurrentMark::enter_second_sync_barrier(int task_num) {
tonyp@2973 906 if (verbose_low()) {
ysr@777 907 gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
tonyp@2973 908 }
ysr@777 909
tonyp@2848 910 if (concurrent()) {
tonyp@2848 911 ConcurrentGCThread::stsLeave();
tonyp@2848 912 }
ysr@777 913 _second_overflow_barrier_sync.enter();
tonyp@2848 914 if (concurrent()) {
tonyp@2848 915 ConcurrentGCThread::stsJoin();
tonyp@2848 916 }
ysr@777 917 // at this point everything should be re-initialised and ready to go
ysr@777 918
tonyp@2973 919 if (verbose_low()) {
ysr@777 920 gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
tonyp@2973 921 }
ysr@777 922 }
ysr@777 923
tonyp@2848 924 #ifndef PRODUCT
tonyp@2848 925 void ForceOverflowSettings::init() {
tonyp@2848 926 _num_remaining = G1ConcMarkForceOverflow;
tonyp@2848 927 _force = false;
tonyp@2848 928 update();
tonyp@2848 929 }
tonyp@2848 930
tonyp@2848 931 void ForceOverflowSettings::update() {
tonyp@2848 932 if (_num_remaining > 0) {
tonyp@2848 933 _num_remaining -= 1;
tonyp@2848 934 _force = true;
tonyp@2848 935 } else {
tonyp@2848 936 _force = false;
tonyp@2848 937 }
tonyp@2848 938 }
tonyp@2848 939
tonyp@2848 940 bool ForceOverflowSettings::should_force() {
tonyp@2848 941 if (_force) {
tonyp@2848 942 _force = false;
tonyp@2848 943 return true;
tonyp@2848 944 } else {
tonyp@2848 945 return false;
tonyp@2848 946 }
tonyp@2848 947 }
tonyp@2848 948 #endif // !PRODUCT
tonyp@2848 949
ysr@777 950 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
tonyp@3416 951 guarantee(false, "grayRegionIfNecessary(): don't call this any more");
tonyp@3416 952
ysr@777 953 // The objects on the region have already been marked "in bulk" by
ysr@777 954 // the caller. We only need to decide whether to push the region on
ysr@777 955 // the region stack or not.
ysr@777 956
tonyp@2973 957 if (!concurrent_marking_in_progress() || !_should_gray_objects) {
ysr@777 958 // We're done with marking and waiting for remark. We do not need to
ysr@777 959 // push anything else on the region stack.
ysr@777 960 return;
tonyp@2973 961 }
ysr@777 962
ysr@777 963 HeapWord* finger = _finger;
ysr@777 964
tonyp@2973 965 if (verbose_low()) {
ysr@777 966 gclog_or_tty->print_cr("[global] attempting to push "
ysr@777 967 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at "
ysr@777 968 PTR_FORMAT, mr.start(), mr.end(), finger);
tonyp@2973 969 }
ysr@777 970
ysr@777 971 if (mr.start() < finger) {
ysr@777 972 // The finger is always heap region aligned and it is not possible
ysr@777 973 // for mr to span heap regions.
tonyp@1458 974 assert(mr.end() <= finger, "invariant");
tonyp@1458 975
tonyp@1458 976 // Separated the asserts so that we know which one fires.
tonyp@1458 977 assert(mr.start() <= mr.end(),
tonyp@1458 978 "region boundaries should fall within the committed space");
tonyp@1458 979 assert(_heap_start <= mr.start(),
tonyp@1458 980 "region boundaries should fall within the committed space");
tonyp@1458 981 assert(mr.end() <= _heap_end,
tonyp@1458 982 "region boundaries should fall within the committed space");
tonyp@2973 983 if (verbose_low()) {
ysr@777 984 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
ysr@777 985 "below the finger, pushing it",
ysr@777 986 mr.start(), mr.end());
tonyp@2973 987 }
ysr@777 988
johnc@2190 989 if (!region_stack_push_lock_free(mr)) {
tonyp@2973 990 if (verbose_low()) {
ysr@777 991 gclog_or_tty->print_cr("[global] region stack has overflown.");
tonyp@2973 992 }
ysr@777 993 }
ysr@777 994 }
ysr@777 995 }
ysr@777 996
ysr@777 997 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
tonyp@3416 998 guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more");
tonyp@3416 999
ysr@777 1000 // The object is not marked by the caller. We need to at least mark
ysr@777 1001 // it and maybe push in on the stack.
ysr@777 1002
ysr@777 1003 HeapWord* addr = (HeapWord*)p;
ysr@777 1004 if (!_nextMarkBitMap->isMarked(addr)) {
ysr@777 1005 // We definitely need to mark it, irrespective whether we bail out
ysr@777 1006 // because we're done with marking.
ysr@777 1007 if (_nextMarkBitMap->parMark(addr)) {
tonyp@2973 1008 if (!concurrent_marking_in_progress() || !_should_gray_objects) {
ysr@777 1009 // If we're done with concurrent marking and we're waiting for
ysr@777 1010 // remark, then we're not pushing anything on the stack.
ysr@777 1011 return;
tonyp@2973 1012 }
ysr@777 1013
ysr@777 1014 // No OrderAccess:store_load() is needed. It is implicit in the
ysr@777 1015 // CAS done in parMark(addr) above
ysr@777 1016 HeapWord* finger = _finger;
ysr@777 1017
ysr@777 1018 if (addr < finger) {
ysr@777 1019 if (!mark_stack_push(oop(addr))) {
tonyp@2973 1020 if (verbose_low()) {
ysr@777 1021 gclog_or_tty->print_cr("[global] global stack overflow "
ysr@777 1022 "during parMark");
tonyp@2973 1023 }
ysr@777 1024 }
ysr@777 1025 }
ysr@777 1026 }
ysr@777 1027 }
ysr@777 1028 }
ysr@777 1029
ysr@777 1030 class CMConcurrentMarkingTask: public AbstractGangTask {
ysr@777 1031 private:
ysr@777 1032 ConcurrentMark* _cm;
ysr@777 1033 ConcurrentMarkThread* _cmt;
ysr@777 1034
ysr@777 1035 public:
jmasa@3357 1036 void work(uint worker_id) {
tonyp@1458 1037 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@1458 1038 "this should only be done by a conc GC thread");
johnc@2316 1039 ResourceMark rm;
ysr@777 1040
ysr@777 1041 double start_vtime = os::elapsedVTime();
ysr@777 1042
ysr@777 1043 ConcurrentGCThread::stsJoin();
ysr@777 1044
jmasa@3357 1045 assert(worker_id < _cm->active_tasks(), "invariant");
jmasa@3357 1046 CMTask* the_task = _cm->task(worker_id);
ysr@777 1047 the_task->record_start_time();
ysr@777 1048 if (!_cm->has_aborted()) {
ysr@777 1049 do {
ysr@777 1050 double start_vtime_sec = os::elapsedVTime();
ysr@777 1051 double start_time_sec = os::elapsedTime();
johnc@2494 1052 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 1053
johnc@2494 1054 the_task->do_marking_step(mark_step_duration_ms,
johnc@2494 1055 true /* do_stealing */,
johnc@2494 1056 true /* do_termination */);
johnc@2494 1057
ysr@777 1058 double end_time_sec = os::elapsedTime();
ysr@777 1059 double end_vtime_sec = os::elapsedVTime();
ysr@777 1060 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
ysr@777 1061 double elapsed_time_sec = end_time_sec - start_time_sec;
ysr@777 1062 _cm->clear_has_overflown();
ysr@777 1063
jmasa@3357 1064 bool ret = _cm->do_yield_check(worker_id);
ysr@777 1065
ysr@777 1066 jlong sleep_time_ms;
ysr@777 1067 if (!_cm->has_aborted() && the_task->has_aborted()) {
ysr@777 1068 sleep_time_ms =
ysr@777 1069 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
ysr@777 1070 ConcurrentGCThread::stsLeave();
ysr@777 1071 os::sleep(Thread::current(), sleep_time_ms, false);
ysr@777 1072 ConcurrentGCThread::stsJoin();
ysr@777 1073 }
ysr@777 1074 double end_time2_sec = os::elapsedTime();
ysr@777 1075 double elapsed_time2_sec = end_time2_sec - start_time_sec;
ysr@777 1076
ysr@777 1077 #if 0
ysr@777 1078 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
ysr@777 1079 "overhead %1.4lf",
ysr@777 1080 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
ysr@777 1081 the_task->conc_overhead(os::elapsedTime()) * 8.0);
ysr@777 1082 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
ysr@777 1083 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
ysr@777 1084 #endif
ysr@777 1085 } while (!_cm->has_aborted() && the_task->has_aborted());
ysr@777 1086 }
ysr@777 1087 the_task->record_end_time();
tonyp@1458 1088 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
ysr@777 1089
ysr@777 1090 ConcurrentGCThread::stsLeave();
ysr@777 1091
ysr@777 1092 double end_vtime = os::elapsedVTime();
jmasa@3357 1093 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
ysr@777 1094 }
ysr@777 1095
ysr@777 1096 CMConcurrentMarkingTask(ConcurrentMark* cm,
ysr@777 1097 ConcurrentMarkThread* cmt) :
ysr@777 1098 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
ysr@777 1099
ysr@777 1100 ~CMConcurrentMarkingTask() { }
ysr@777 1101 };
ysr@777 1102
jmasa@3294 1103 // Calculates the number of active workers for a concurrent
jmasa@3294 1104 // phase.
jmasa@3357 1105 uint ConcurrentMark::calc_parallel_marking_threads() {
johnc@3338 1106 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1107 uint n_conc_workers = 0;
jmasa@3294 1108 if (!UseDynamicNumberOfGCThreads ||
jmasa@3294 1109 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
jmasa@3294 1110 !ForceDynamicNumberOfGCThreads)) {
jmasa@3294 1111 n_conc_workers = max_parallel_marking_threads();
jmasa@3294 1112 } else {
jmasa@3294 1113 n_conc_workers =
jmasa@3294 1114 AdaptiveSizePolicy::calc_default_active_workers(
jmasa@3294 1115 max_parallel_marking_threads(),
jmasa@3294 1116 1, /* Minimum workers */
jmasa@3294 1117 parallel_marking_threads(),
jmasa@3294 1118 Threads::number_of_non_daemon_threads());
jmasa@3294 1119 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
jmasa@3294 1120 // that scaling has already gone into "_max_parallel_marking_threads".
jmasa@3294 1121 }
johnc@3338 1122 assert(n_conc_workers > 0, "Always need at least 1");
johnc@3338 1123 return n_conc_workers;
jmasa@3294 1124 }
johnc@3338 1125 // If we are not running with any parallel GC threads we will not
johnc@3338 1126 // have spawned any marking threads either. Hence the number of
johnc@3338 1127 // concurrent workers should be 0.
johnc@3338 1128 return 0;
jmasa@3294 1129 }
jmasa@3294 1130
ysr@777 1131 void ConcurrentMark::markFromRoots() {
ysr@777 1132 // we might be tempted to assert that:
ysr@777 1133 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
ysr@777 1134 // "inconsistent argument?");
ysr@777 1135 // However that wouldn't be right, because it's possible that
ysr@777 1136 // a safepoint is indeed in progress as a younger generation
ysr@777 1137 // stop-the-world GC happens even as we mark in this generation.
ysr@777 1138
ysr@777 1139 _restart_for_overflow = false;
tonyp@2848 1140 force_overflow_conc()->init();
jmasa@3294 1141
jmasa@3294 1142 // _g1h has _n_par_threads
jmasa@3294 1143 _parallel_marking_threads = calc_parallel_marking_threads();
jmasa@3294 1144 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
jmasa@3294 1145 "Maximum number of marking threads exceeded");
johnc@3338 1146
jmasa@3357 1147 uint active_workers = MAX2(1U, parallel_marking_threads());
johnc@3338 1148
johnc@3338 1149 // Parallel task terminator is set in "set_phase()"
johnc@3338 1150 set_phase(active_workers, true /* concurrent */);
ysr@777 1151
ysr@777 1152 CMConcurrentMarkingTask markingTask(this, cmThread());
tonyp@2973 1153 if (parallel_marking_threads() > 0) {
johnc@3338 1154 _parallel_workers->set_active_workers((int)active_workers);
johnc@3338 1155 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
johnc@3338 1156 // and the decisions on that MT processing is made elsewhere.
johnc@3338 1157 assert(_parallel_workers->active_workers() > 0, "Should have been set");
ysr@777 1158 _parallel_workers->run_task(&markingTask);
tonyp@2973 1159 } else {
ysr@777 1160 markingTask.work(0);
tonyp@2973 1161 }
ysr@777 1162 print_stats();
ysr@777 1163 }
ysr@777 1164
ysr@777 1165 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
ysr@777 1166 // world is stopped at this checkpoint
ysr@777 1167 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1168 "world should be stopped");
johnc@3175 1169
ysr@777 1170 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1171
ysr@777 1172 // If a full collection has happened, we shouldn't do this.
ysr@777 1173 if (has_aborted()) {
ysr@777 1174 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1175 return;
ysr@777 1176 }
ysr@777 1177
kamg@2445 1178 SvcGCMarker sgcm(SvcGCMarker::OTHER);
kamg@2445 1179
ysr@1280 1180 if (VerifyDuringGC) {
ysr@1280 1181 HandleMark hm; // handle scope
ysr@1280 1182 gclog_or_tty->print(" VerifyDuringGC:(before)");
ysr@1280 1183 Universe::heap()->prepare_for_verify();
johnc@2969 1184 Universe::verify(/* allow dirty */ true,
johnc@2969 1185 /* silent */ false,
johnc@2969 1186 /* option */ VerifyOption_G1UsePrevMarking);
ysr@1280 1187 }
ysr@1280 1188
ysr@777 1189 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 1190 g1p->record_concurrent_mark_remark_start();
ysr@777 1191
ysr@777 1192 double start = os::elapsedTime();
ysr@777 1193
ysr@777 1194 checkpointRootsFinalWork();
ysr@777 1195
ysr@777 1196 double mark_work_end = os::elapsedTime();
ysr@777 1197
ysr@777 1198 weakRefsWork(clear_all_soft_refs);
ysr@777 1199
ysr@777 1200 if (has_overflown()) {
ysr@777 1201 // Oops. We overflowed. Restart concurrent marking.
ysr@777 1202 _restart_for_overflow = true;
ysr@777 1203 // Clear the flag. We do not need it any more.
ysr@777 1204 clear_has_overflown();
tonyp@2973 1205 if (G1TraceMarkStackOverflow) {
ysr@777 1206 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
tonyp@2973 1207 }
ysr@777 1208 } else {
tonyp@2469 1209 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 1210 // We're done with marking.
tonyp@1752 1211 // This is the end of the marking cycle, we're expected all
tonyp@1752 1212 // threads to have SATB queues with active set to true.
tonyp@2469 1213 satb_mq_set.set_active_all_threads(false, /* new active value */
tonyp@2469 1214 true /* expected_active */);
tonyp@1246 1215
tonyp@1246 1216 if (VerifyDuringGC) {
ysr@1280 1217 HandleMark hm; // handle scope
ysr@1280 1218 gclog_or_tty->print(" VerifyDuringGC:(after)");
ysr@1280 1219 Universe::heap()->prepare_for_verify();
johnc@2969 1220 Universe::verify(/* allow dirty */ true,
johnc@2969 1221 /* silent */ false,
johnc@2969 1222 /* option */ VerifyOption_G1UseNextMarking);
tonyp@1246 1223 }
johnc@2494 1224 assert(!restart_for_overflow(), "sanity");
johnc@2494 1225 }
johnc@2494 1226
johnc@2494 1227 // Reset the marking state if marking completed
johnc@2494 1228 if (!restart_for_overflow()) {
johnc@2494 1229 set_non_marking_state();
ysr@777 1230 }
ysr@777 1231
ysr@777 1232 #if VERIFY_OBJS_PROCESSED
ysr@777 1233 _scan_obj_cl.objs_processed = 0;
ysr@777 1234 ThreadLocalObjQueue::objs_enqueued = 0;
ysr@777 1235 #endif
ysr@777 1236
ysr@777 1237 // Statistics
ysr@777 1238 double now = os::elapsedTime();
ysr@777 1239 _remark_mark_times.add((mark_work_end - start) * 1000.0);
ysr@777 1240 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
ysr@777 1241 _remark_times.add((now - start) * 1000.0);
ysr@777 1242
ysr@777 1243 g1p->record_concurrent_mark_remark_end();
ysr@777 1244 }
ysr@777 1245
ysr@777 1246 #define CARD_BM_TEST_MODE 0
ysr@777 1247
ysr@777 1248 class CalcLiveObjectsClosure: public HeapRegionClosure {
ysr@777 1249
ysr@777 1250 CMBitMapRO* _bm;
ysr@777 1251 ConcurrentMark* _cm;
ysr@777 1252 bool _changed;
ysr@777 1253 bool _yield;
ysr@777 1254 size_t _words_done;
ysr@777 1255 size_t _tot_live;
ysr@777 1256 size_t _tot_used;
ysr@777 1257 size_t _regions_done;
ysr@777 1258 double _start_vtime_sec;
ysr@777 1259
ysr@777 1260 BitMap* _region_bm;
ysr@777 1261 BitMap* _card_bm;
ysr@777 1262 intptr_t _bottom_card_num;
ysr@777 1263 bool _final;
ysr@777 1264
ysr@777 1265 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
ysr@777 1266 for (intptr_t i = start_card_num; i <= last_card_num; i++) {
ysr@777 1267 #if CARD_BM_TEST_MODE
tonyp@1458 1268 guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
ysr@777 1269 #else
ysr@777 1270 _card_bm->par_at_put(i - _bottom_card_num, 1);
ysr@777 1271 #endif
ysr@777 1272 }
ysr@777 1273 }
ysr@777 1274
ysr@777 1275 public:
ysr@777 1276 CalcLiveObjectsClosure(bool final,
ysr@777 1277 CMBitMapRO *bm, ConcurrentMark *cm,
tonyp@1371 1278 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1279 _bm(bm), _cm(cm), _changed(false), _yield(true),
ysr@777 1280 _words_done(0), _tot_live(0), _tot_used(0),
tonyp@1371 1281 _region_bm(region_bm), _card_bm(card_bm),_final(final),
ysr@777 1282 _regions_done(0), _start_vtime_sec(0.0)
ysr@777 1283 {
ysr@777 1284 _bottom_card_num =
ysr@777 1285 intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >>
ysr@777 1286 CardTableModRefBS::card_shift);
ysr@777 1287 }
ysr@777 1288
tonyp@1264 1289 // It takes a region that's not empty (i.e., it has at least one
tonyp@1264 1290 // live object in it and sets its corresponding bit on the region
tonyp@1264 1291 // bitmap to 1. If the region is "starts humongous" it will also set
tonyp@1264 1292 // to 1 the bits on the region bitmap that correspond to its
tonyp@1264 1293 // associated "continues humongous" regions.
tonyp@1264 1294 void set_bit_for_region(HeapRegion* hr) {
tonyp@1264 1295 assert(!hr->continuesHumongous(), "should have filtered those out");
tonyp@1264 1296
tonyp@1264 1297 size_t index = hr->hrs_index();
tonyp@1264 1298 if (!hr->startsHumongous()) {
tonyp@1264 1299 // Normal (non-humongous) case: just set the bit.
tonyp@1264 1300 _region_bm->par_at_put((BitMap::idx_t) index, true);
tonyp@1264 1301 } else {
tonyp@1264 1302 // Starts humongous case: calculate how many regions are part of
tonyp@1264 1303 // this humongous region and then set the bit range. It might
tonyp@1264 1304 // have been a bit more efficient to look at the object that
tonyp@1264 1305 // spans these humongous regions to calculate their number from
tonyp@1264 1306 // the object's size. However, it's a good idea to calculate
tonyp@1264 1307 // this based on the metadata itself, and not the region
tonyp@1264 1308 // contents, so that this code is not aware of what goes into
tonyp@1264 1309 // the humongous regions (in case this changes in the future).
tonyp@1264 1310 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1264 1311 size_t end_index = index + 1;
tonyp@1266 1312 while (end_index < g1h->n_regions()) {
tonyp@1266 1313 HeapRegion* chr = g1h->region_at(end_index);
tonyp@2973 1314 if (!chr->continuesHumongous()) break;
tonyp@1264 1315 end_index += 1;
tonyp@1264 1316 }
tonyp@1264 1317 _region_bm->par_at_put_range((BitMap::idx_t) index,
tonyp@1264 1318 (BitMap::idx_t) end_index, true);
tonyp@1264 1319 }
tonyp@1264 1320 }
tonyp@1264 1321
ysr@777 1322 bool doHeapRegion(HeapRegion* hr) {
tonyp@2973 1323 if (!_final && _regions_done == 0) {
ysr@777 1324 _start_vtime_sec = os::elapsedVTime();
tonyp@2973 1325 }
ysr@777 1326
iveresov@1074 1327 if (hr->continuesHumongous()) {
tonyp@1264 1328 // We will ignore these here and process them when their
tonyp@1264 1329 // associated "starts humongous" region is processed (see
tonyp@1264 1330 // set_bit_for_heap_region()). Note that we cannot rely on their
tonyp@1264 1331 // associated "starts humongous" region to have their bit set to
tonyp@1264 1332 // 1 since, due to the region chunking in the parallel region
tonyp@1264 1333 // iteration, a "continues humongous" region might be visited
tonyp@1264 1334 // before its associated "starts humongous".
iveresov@1074 1335 return false;
iveresov@1074 1336 }
ysr@777 1337
ysr@777 1338 HeapWord* nextTop = hr->next_top_at_mark_start();
ysr@777 1339 HeapWord* start = hr->top_at_conc_mark_count();
ysr@777 1340 assert(hr->bottom() <= start && start <= hr->end() &&
ysr@777 1341 hr->bottom() <= nextTop && nextTop <= hr->end() &&
ysr@777 1342 start <= nextTop,
ysr@777 1343 "Preconditions.");
ysr@777 1344 // Otherwise, record the number of word's we'll examine.
ysr@777 1345 size_t words_done = (nextTop - start);
ysr@777 1346 // Find the first marked object at or after "start".
ysr@777 1347 start = _bm->getNextMarkedWordAddress(start, nextTop);
ysr@777 1348 size_t marked_bytes = 0;
ysr@777 1349
ysr@777 1350 // Below, the term "card num" means the result of shifting an address
ysr@777 1351 // by the card shift -- address 0 corresponds to card number 0. One
ysr@777 1352 // must subtract the card num of the bottom of the heap to obtain a
ysr@777 1353 // card table index.
ysr@777 1354 // The first card num of the sequence of live cards currently being
ysr@777 1355 // constructed. -1 ==> no sequence.
ysr@777 1356 intptr_t start_card_num = -1;
ysr@777 1357 // The last card num of the sequence of live cards currently being
ysr@777 1358 // constructed. -1 ==> no sequence.
ysr@777 1359 intptr_t last_card_num = -1;
ysr@777 1360
ysr@777 1361 while (start < nextTop) {
ysr@777 1362 if (_yield && _cm->do_yield_check()) {
ysr@777 1363 // We yielded. It might be for a full collection, in which case
ysr@777 1364 // all bets are off; terminate the traversal.
ysr@777 1365 if (_cm->has_aborted()) {
ysr@777 1366 _changed = false;
ysr@777 1367 return true;
ysr@777 1368 } else {
ysr@777 1369 // Otherwise, it might be a collection pause, and the region
ysr@777 1370 // we're looking at might be in the collection set. We'll
ysr@777 1371 // abandon this region.
ysr@777 1372 return false;
ysr@777 1373 }
ysr@777 1374 }
ysr@777 1375 oop obj = oop(start);
ysr@777 1376 int obj_sz = obj->size();
ysr@777 1377 // The card num of the start of the current object.
ysr@777 1378 intptr_t obj_card_num =
ysr@777 1379 intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
ysr@777 1380
ysr@777 1381 HeapWord* obj_last = start + obj_sz - 1;
ysr@777 1382 intptr_t obj_last_card_num =
ysr@777 1383 intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
ysr@777 1384
ysr@777 1385 if (obj_card_num != last_card_num) {
ysr@777 1386 if (start_card_num == -1) {
ysr@777 1387 assert(last_card_num == -1, "Both or neither.");
ysr@777 1388 start_card_num = obj_card_num;
ysr@777 1389 } else {
ysr@777 1390 assert(last_card_num != -1, "Both or neither.");
ysr@777 1391 assert(obj_card_num >= last_card_num, "Inv");
ysr@777 1392 if ((obj_card_num - last_card_num) > 1) {
ysr@777 1393 // Mark the last run, and start a new one.
ysr@777 1394 mark_card_num_range(start_card_num, last_card_num);
ysr@777 1395 start_card_num = obj_card_num;
ysr@777 1396 }
ysr@777 1397 }
ysr@777 1398 #if CARD_BM_TEST_MODE
ysr@777 1399 /*
ysr@777 1400 gclog_or_tty->print_cr("Setting bits from %d/%d.",
ysr@777 1401 obj_card_num - _bottom_card_num,
ysr@777 1402 obj_last_card_num - _bottom_card_num);
ysr@777 1403 */
ysr@777 1404 for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) {
ysr@777 1405 _card_bm->par_at_put(j - _bottom_card_num, 1);
ysr@777 1406 }
ysr@777 1407 #endif
ysr@777 1408 }
ysr@777 1409 // In any case, we set the last card num.
ysr@777 1410 last_card_num = obj_last_card_num;
ysr@777 1411
apetrusenko@1465 1412 marked_bytes += (size_t)obj_sz * HeapWordSize;
ysr@777 1413 // Find the next marked object after this one.
ysr@777 1414 start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
ysr@777 1415 _changed = true;
ysr@777 1416 }
ysr@777 1417 // Handle the last range, if any.
tonyp@2973 1418 if (start_card_num != -1) {
ysr@777 1419 mark_card_num_range(start_card_num, last_card_num);
tonyp@2973 1420 }
ysr@777 1421 if (_final) {
ysr@777 1422 // Mark the allocated-since-marking portion...
ysr@777 1423 HeapWord* tp = hr->top();
ysr@777 1424 if (nextTop < tp) {
ysr@777 1425 start_card_num =
ysr@777 1426 intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
ysr@777 1427 last_card_num =
ysr@777 1428 intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
ysr@777 1429 mark_card_num_range(start_card_num, last_card_num);
ysr@777 1430 // This definitely means the region has live objects.
tonyp@1264 1431 set_bit_for_region(hr);
ysr@777 1432 }
ysr@777 1433 }
ysr@777 1434
ysr@777 1435 hr->add_to_marked_bytes(marked_bytes);
ysr@777 1436 // Update the live region bitmap.
ysr@777 1437 if (marked_bytes > 0) {
tonyp@1264 1438 set_bit_for_region(hr);
ysr@777 1439 }
ysr@777 1440 hr->set_top_at_conc_mark_count(nextTop);
ysr@777 1441 _tot_live += hr->next_live_bytes();
ysr@777 1442 _tot_used += hr->used();
ysr@777 1443 _words_done = words_done;
ysr@777 1444
ysr@777 1445 if (!_final) {
ysr@777 1446 ++_regions_done;
ysr@777 1447 if (_regions_done % 10 == 0) {
ysr@777 1448 double end_vtime_sec = os::elapsedVTime();
ysr@777 1449 double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec;
ysr@777 1450 if (elapsed_vtime_sec > (10.0 / 1000.0)) {
ysr@777 1451 jlong sleep_time_ms =
ysr@777 1452 (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
ysr@777 1453 os::sleep(Thread::current(), sleep_time_ms, false);
ysr@777 1454 _start_vtime_sec = end_vtime_sec;
ysr@777 1455 }
ysr@777 1456 }
ysr@777 1457 }
ysr@777 1458
ysr@777 1459 return false;
ysr@777 1460 }
ysr@777 1461
ysr@777 1462 bool changed() { return _changed; }
ysr@777 1463 void reset() { _changed = false; _words_done = 0; }
ysr@777 1464 void no_yield() { _yield = false; }
ysr@777 1465 size_t words_done() { return _words_done; }
ysr@777 1466 size_t tot_live() { return _tot_live; }
ysr@777 1467 size_t tot_used() { return _tot_used; }
ysr@777 1468 };
ysr@777 1469
ysr@777 1470
ysr@777 1471 void ConcurrentMark::calcDesiredRegions() {
ysr@777 1472 _region_bm.clear();
ysr@777 1473 _card_bm.clear();
ysr@777 1474 CalcLiveObjectsClosure calccl(false /*final*/,
ysr@777 1475 nextMarkBitMap(), this,
tonyp@1371 1476 &_region_bm, &_card_bm);
ysr@777 1477 G1CollectedHeap *g1h = G1CollectedHeap::heap();
ysr@777 1478 g1h->heap_region_iterate(&calccl);
ysr@777 1479
ysr@777 1480 do {
ysr@777 1481 calccl.reset();
ysr@777 1482 g1h->heap_region_iterate(&calccl);
ysr@777 1483 } while (calccl.changed());
ysr@777 1484 }
ysr@777 1485
ysr@777 1486 class G1ParFinalCountTask: public AbstractGangTask {
ysr@777 1487 protected:
ysr@777 1488 G1CollectedHeap* _g1h;
ysr@777 1489 CMBitMap* _bm;
jmasa@3357 1490 uint _n_workers;
ysr@777 1491 size_t *_live_bytes;
ysr@777 1492 size_t *_used_bytes;
ysr@777 1493 BitMap* _region_bm;
ysr@777 1494 BitMap* _card_bm;
ysr@777 1495 public:
ysr@777 1496 G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
tonyp@2973 1497 BitMap* region_bm, BitMap* card_bm)
tonyp@2973 1498 : AbstractGangTask("G1 final counting"), _g1h(g1h),
jmasa@3294 1499 _bm(bm), _region_bm(region_bm), _card_bm(card_bm),
jmasa@3294 1500 _n_workers(0)
jmasa@3294 1501 {
jmasa@3294 1502 // Use the value already set as the number of active threads
jmasa@3294 1503 // in the call to run_task(). Needed for the allocation of
jmasa@3294 1504 // _live_bytes and _used_bytes.
jmasa@3294 1505 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1506 assert( _g1h->workers()->active_workers() > 0,
jmasa@3294 1507 "Should have been previously set");
jmasa@3294 1508 _n_workers = _g1h->workers()->active_workers();
tonyp@2973 1509 } else {
ysr@777 1510 _n_workers = 1;
tonyp@2973 1511 }
jmasa@3294 1512
ysr@777 1513 _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
ysr@777 1514 _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
ysr@777 1515 }
ysr@777 1516
ysr@777 1517 ~G1ParFinalCountTask() {
ysr@777 1518 FREE_C_HEAP_ARRAY(size_t, _live_bytes);
ysr@777 1519 FREE_C_HEAP_ARRAY(size_t, _used_bytes);
ysr@777 1520 }
ysr@777 1521
jmasa@3357 1522 void work(uint worker_id) {
ysr@777 1523 CalcLiveObjectsClosure calccl(true /*final*/,
ysr@777 1524 _bm, _g1h->concurrent_mark(),
tonyp@1371 1525 _region_bm, _card_bm);
ysr@777 1526 calccl.no_yield();
jmasa@2188 1527 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1528 _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
jmasa@3294 1529 (int) _n_workers,
tonyp@790 1530 HeapRegion::FinalCountClaimValue);
ysr@777 1531 } else {
ysr@777 1532 _g1h->heap_region_iterate(&calccl);
ysr@777 1533 }
ysr@777 1534 assert(calccl.complete(), "Shouldn't have yielded!");
ysr@777 1535
jmasa@3357 1536 assert(worker_id < _n_workers, "invariant");
jmasa@3357 1537 _live_bytes[worker_id] = calccl.tot_live();
jmasa@3357 1538 _used_bytes[worker_id] = calccl.tot_used();
ysr@777 1539 }
ysr@777 1540 size_t live_bytes() {
ysr@777 1541 size_t live_bytes = 0;
jmasa@3357 1542 for (uint i = 0; i < _n_workers; ++i)
ysr@777 1543 live_bytes += _live_bytes[i];
ysr@777 1544 return live_bytes;
ysr@777 1545 }
ysr@777 1546 size_t used_bytes() {
ysr@777 1547 size_t used_bytes = 0;
jmasa@3357 1548 for (uint i = 0; i < _n_workers; ++i)
ysr@777 1549 used_bytes += _used_bytes[i];
ysr@777 1550 return used_bytes;
ysr@777 1551 }
ysr@777 1552 };
ysr@777 1553
ysr@777 1554 class G1ParNoteEndTask;
ysr@777 1555
ysr@777 1556 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
ysr@777 1557 G1CollectedHeap* _g1;
ysr@777 1558 int _worker_num;
ysr@777 1559 size_t _max_live_bytes;
ysr@777 1560 size_t _regions_claimed;
ysr@777 1561 size_t _freed_bytes;
tonyp@2493 1562 FreeRegionList* _local_cleanup_list;
tonyp@3268 1563 OldRegionSet* _old_proxy_set;
tonyp@2493 1564 HumongousRegionSet* _humongous_proxy_set;
tonyp@2493 1565 HRRSCleanupTask* _hrrs_cleanup_task;
ysr@777 1566 double _claimed_region_time;
ysr@777 1567 double _max_region_time;
ysr@777 1568
ysr@777 1569 public:
ysr@777 1570 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
tonyp@2493 1571 int worker_num,
tonyp@2493 1572 FreeRegionList* local_cleanup_list,
tonyp@3268 1573 OldRegionSet* old_proxy_set,
tonyp@2493 1574 HumongousRegionSet* humongous_proxy_set,
johnc@3292 1575 HRRSCleanupTask* hrrs_cleanup_task) :
johnc@3292 1576 _g1(g1), _worker_num(worker_num),
johnc@3292 1577 _max_live_bytes(0), _regions_claimed(0),
johnc@3292 1578 _freed_bytes(0),
johnc@3292 1579 _claimed_region_time(0.0), _max_region_time(0.0),
johnc@3292 1580 _local_cleanup_list(local_cleanup_list),
johnc@3292 1581 _old_proxy_set(old_proxy_set),
johnc@3292 1582 _humongous_proxy_set(humongous_proxy_set),
johnc@3292 1583 _hrrs_cleanup_task(hrrs_cleanup_task) { }
johnc@3292 1584
ysr@777 1585 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1586
johnc@3292 1587 bool doHeapRegion(HeapRegion *hr) {
johnc@3292 1588 // We use a claim value of zero here because all regions
johnc@3292 1589 // were claimed with value 1 in the FinalCount task.
johnc@3292 1590 hr->reset_gc_time_stamp();
johnc@3292 1591 if (!hr->continuesHumongous()) {
johnc@3292 1592 double start = os::elapsedTime();
johnc@3292 1593 _regions_claimed++;
johnc@3292 1594 hr->note_end_of_marking();
johnc@3292 1595 _max_live_bytes += hr->max_live_bytes();
johnc@3292 1596 _g1->free_region_if_empty(hr,
johnc@3292 1597 &_freed_bytes,
johnc@3292 1598 _local_cleanup_list,
johnc@3292 1599 _old_proxy_set,
johnc@3292 1600 _humongous_proxy_set,
johnc@3292 1601 _hrrs_cleanup_task,
johnc@3292 1602 true /* par */);
johnc@3292 1603 double region_time = (os::elapsedTime() - start);
johnc@3292 1604 _claimed_region_time += region_time;
johnc@3292 1605 if (region_time > _max_region_time) {
johnc@3292 1606 _max_region_time = region_time;
johnc@3292 1607 }
johnc@3292 1608 }
johnc@3292 1609 return false;
johnc@3292 1610 }
ysr@777 1611
ysr@777 1612 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1613 size_t regions_claimed() { return _regions_claimed; }
ysr@777 1614 double claimed_region_time_sec() { return _claimed_region_time; }
ysr@777 1615 double max_region_time_sec() { return _max_region_time; }
ysr@777 1616 };
ysr@777 1617
ysr@777 1618 class G1ParNoteEndTask: public AbstractGangTask {
ysr@777 1619 friend class G1NoteEndOfConcMarkClosure;
tonyp@2472 1620
ysr@777 1621 protected:
ysr@777 1622 G1CollectedHeap* _g1h;
ysr@777 1623 size_t _max_live_bytes;
ysr@777 1624 size_t _freed_bytes;
tonyp@2472 1625 FreeRegionList* _cleanup_list;
tonyp@2472 1626
ysr@777 1627 public:
ysr@777 1628 G1ParNoteEndTask(G1CollectedHeap* g1h,
tonyp@2472 1629 FreeRegionList* cleanup_list) :
ysr@777 1630 AbstractGangTask("G1 note end"), _g1h(g1h),
tonyp@2472 1631 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
ysr@777 1632
jmasa@3357 1633 void work(uint worker_id) {
ysr@777 1634 double start = os::elapsedTime();
tonyp@2493 1635 FreeRegionList local_cleanup_list("Local Cleanup List");
tonyp@3268 1636 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
tonyp@2493 1637 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
tonyp@2493 1638 HRRSCleanupTask hrrs_cleanup_task;
jmasa@3357 1639 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
tonyp@3268 1640 &old_proxy_set,
tonyp@2493 1641 &humongous_proxy_set,
tonyp@2493 1642 &hrrs_cleanup_task);
jmasa@2188 1643 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1644 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
jmasa@3294 1645 _g1h->workers()->active_workers(),
tonyp@790 1646 HeapRegion::NoteEndClaimValue);
ysr@777 1647 } else {
ysr@777 1648 _g1h->heap_region_iterate(&g1_note_end);
ysr@777 1649 }
ysr@777 1650 assert(g1_note_end.complete(), "Shouldn't have yielded!");
ysr@777 1651
tonyp@2472 1652 // Now update the lists
tonyp@2472 1653 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
tonyp@2472 1654 NULL /* free_list */,
tonyp@3268 1655 &old_proxy_set,
tonyp@2493 1656 &humongous_proxy_set,
tonyp@2472 1657 true /* par */);
ysr@777 1658 {
ysr@777 1659 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 1660 _max_live_bytes += g1_note_end.max_live_bytes();
ysr@777 1661 _freed_bytes += g1_note_end.freed_bytes();
tonyp@2472 1662
tonyp@2975 1663 // If we iterate over the global cleanup list at the end of
tonyp@2975 1664 // cleanup to do this printing we will not guarantee to only
tonyp@2975 1665 // generate output for the newly-reclaimed regions (the list
tonyp@2975 1666 // might not be empty at the beginning of cleanup; we might
tonyp@2975 1667 // still be working on its previous contents). So we do the
tonyp@2975 1668 // printing here, before we append the new regions to the global
tonyp@2975 1669 // cleanup list.
tonyp@2975 1670
tonyp@2975 1671 G1HRPrinter* hr_printer = _g1h->hr_printer();
tonyp@2975 1672 if (hr_printer->is_active()) {
tonyp@2975 1673 HeapRegionLinkedListIterator iter(&local_cleanup_list);
tonyp@2975 1674 while (iter.more_available()) {
tonyp@2975 1675 HeapRegion* hr = iter.get_next();
tonyp@2975 1676 hr_printer->cleanup(hr);
tonyp@2975 1677 }
tonyp@2975 1678 }
tonyp@2975 1679
tonyp@2493 1680 _cleanup_list->add_as_tail(&local_cleanup_list);
tonyp@2493 1681 assert(local_cleanup_list.is_empty(), "post-condition");
tonyp@2493 1682
tonyp@2493 1683 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
ysr@777 1684 }
ysr@777 1685 double end = os::elapsedTime();
ysr@777 1686 if (G1PrintParCleanupStats) {
ysr@777 1687 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
jmasa@3357 1688 "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
jmasa@3357 1689 worker_id, start, end, (end-start)*1000.0,
ysr@777 1690 g1_note_end.regions_claimed(),
ysr@777 1691 g1_note_end.claimed_region_time_sec()*1000.0,
ysr@777 1692 g1_note_end.max_region_time_sec()*1000.0);
ysr@777 1693 }
ysr@777 1694 }
ysr@777 1695 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1696 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1697 };
ysr@777 1698
ysr@777 1699 class G1ParScrubRemSetTask: public AbstractGangTask {
ysr@777 1700 protected:
ysr@777 1701 G1RemSet* _g1rs;
ysr@777 1702 BitMap* _region_bm;
ysr@777 1703 BitMap* _card_bm;
ysr@777 1704 public:
ysr@777 1705 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
ysr@777 1706 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1707 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
ysr@777 1708 _region_bm(region_bm), _card_bm(card_bm)
ysr@777 1709 {}
ysr@777 1710
jmasa@3357 1711 void work(uint worker_id) {
jmasa@2188 1712 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3357 1713 _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
tonyp@790 1714 HeapRegion::ScrubRemSetClaimValue);
ysr@777 1715 } else {
ysr@777 1716 _g1rs->scrub(_region_bm, _card_bm);
ysr@777 1717 }
ysr@777 1718 }
ysr@777 1719
ysr@777 1720 };
ysr@777 1721
ysr@777 1722 void ConcurrentMark::cleanup() {
ysr@777 1723 // world is stopped at this checkpoint
ysr@777 1724 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1725 "world should be stopped");
ysr@777 1726 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1727
ysr@777 1728 // If a full collection has happened, we shouldn't do this.
ysr@777 1729 if (has_aborted()) {
ysr@777 1730 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1731 return;
ysr@777 1732 }
ysr@777 1733
tonyp@3268 1734 HRSPhaseSetter x(HRSPhaseCleanup);
tonyp@2472 1735 g1h->verify_region_sets_optional();
tonyp@2472 1736
ysr@1280 1737 if (VerifyDuringGC) {
ysr@1280 1738 HandleMark hm; // handle scope
ysr@1280 1739 gclog_or_tty->print(" VerifyDuringGC:(before)");
ysr@1280 1740 Universe::heap()->prepare_for_verify();
johnc@2969 1741 Universe::verify(/* allow dirty */ true,
johnc@2969 1742 /* silent */ false,
johnc@2969 1743 /* option */ VerifyOption_G1UsePrevMarking);
ysr@1280 1744 }
ysr@1280 1745
ysr@777 1746 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
ysr@777 1747 g1p->record_concurrent_mark_cleanup_start();
ysr@777 1748
ysr@777 1749 double start = os::elapsedTime();
ysr@777 1750
tonyp@2493 1751 HeapRegionRemSet::reset_for_cleanup_tasks();
tonyp@2493 1752
jmasa@3357 1753 uint n_workers;
jmasa@3294 1754
ysr@777 1755 // Do counting once more with the world stopped for good measure.
ysr@777 1756 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
ysr@777 1757 &_region_bm, &_card_bm);
jmasa@2188 1758 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@790 1759 assert(g1h->check_heap_region_claim_values(
tonyp@790 1760 HeapRegion::InitialClaimValue),
tonyp@790 1761 "sanity check");
tonyp@790 1762
johnc@3338 1763 g1h->set_par_threads();
johnc@3338 1764 n_workers = g1h->n_par_threads();
jmasa@3357 1765 assert(g1h->n_par_threads() == n_workers,
johnc@3338 1766 "Should not have been reset");
ysr@777 1767 g1h->workers()->run_task(&g1_par_count_task);
jmasa@3294 1768 // Done with the parallel phase so reset to 0.
ysr@777 1769 g1h->set_par_threads(0);
tonyp@790 1770
tonyp@790 1771 assert(g1h->check_heap_region_claim_values(
tonyp@790 1772 HeapRegion::FinalCountClaimValue),
tonyp@790 1773 "sanity check");
ysr@777 1774 } else {
johnc@3338 1775 n_workers = 1;
ysr@777 1776 g1_par_count_task.work(0);
ysr@777 1777 }
ysr@777 1778
ysr@777 1779 size_t known_garbage_bytes =
ysr@777 1780 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
ysr@777 1781 g1p->set_known_garbage_bytes(known_garbage_bytes);
ysr@777 1782
ysr@777 1783 size_t start_used_bytes = g1h->used();
ysr@777 1784 _at_least_one_mark_complete = true;
ysr@777 1785 g1h->set_marking_complete();
ysr@777 1786
tonyp@3114 1787 ergo_verbose4(ErgoConcCycles,
tonyp@3114 1788 "finish cleanup",
tonyp@3114 1789 ergo_format_byte("occupancy")
tonyp@3114 1790 ergo_format_byte("capacity")
tonyp@3114 1791 ergo_format_byte_perc("known garbage"),
tonyp@3114 1792 start_used_bytes, g1h->capacity(),
tonyp@3114 1793 known_garbage_bytes,
tonyp@3114 1794 ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
tonyp@3114 1795
ysr@777 1796 double count_end = os::elapsedTime();
ysr@777 1797 double this_final_counting_time = (count_end - start);
ysr@777 1798 if (G1PrintParCleanupStats) {
ysr@777 1799 gclog_or_tty->print_cr("Cleanup:");
ysr@777 1800 gclog_or_tty->print_cr(" Finalize counting: %8.3f ms",
ysr@777 1801 this_final_counting_time*1000.0);
ysr@777 1802 }
ysr@777 1803 _total_counting_time += this_final_counting_time;
ysr@777 1804
tonyp@2717 1805 if (G1PrintRegionLivenessInfo) {
tonyp@2717 1806 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
tonyp@2717 1807 _g1h->heap_region_iterate(&cl);
tonyp@2717 1808 }
tonyp@2717 1809
ysr@777 1810 // Install newly created mark bitMap as "prev".
ysr@777 1811 swapMarkBitMaps();
ysr@777 1812
ysr@777 1813 g1h->reset_gc_time_stamp();
ysr@777 1814
ysr@777 1815 // Note end of marking in all heap regions.
ysr@777 1816 double note_end_start = os::elapsedTime();
tonyp@2472 1817 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
jmasa@2188 1818 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1819 g1h->set_par_threads((int)n_workers);
ysr@777 1820 g1h->workers()->run_task(&g1_par_note_end_task);
ysr@777 1821 g1h->set_par_threads(0);
tonyp@790 1822
tonyp@790 1823 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
tonyp@790 1824 "sanity check");
ysr@777 1825 } else {
ysr@777 1826 g1_par_note_end_task.work(0);
ysr@777 1827 }
tonyp@2472 1828
tonyp@2472 1829 if (!cleanup_list_is_empty()) {
tonyp@2472 1830 // The cleanup list is not empty, so we'll have to process it
tonyp@2472 1831 // concurrently. Notify anyone else that might be wanting free
tonyp@2472 1832 // regions that there will be more free regions coming soon.
tonyp@2472 1833 g1h->set_free_regions_coming();
tonyp@2472 1834 }
ysr@777 1835 double note_end_end = os::elapsedTime();
ysr@777 1836 if (G1PrintParCleanupStats) {
ysr@777 1837 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
ysr@777 1838 (note_end_end - note_end_start)*1000.0);
ysr@777 1839 }
ysr@777 1840
ysr@777 1841 // call below, since it affects the metric by which we sort the heap
ysr@777 1842 // regions.
ysr@777 1843 if (G1ScrubRemSets) {
ysr@777 1844 double rs_scrub_start = os::elapsedTime();
ysr@777 1845 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
jmasa@2188 1846 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1847 g1h->set_par_threads((int)n_workers);
ysr@777 1848 g1h->workers()->run_task(&g1_par_scrub_rs_task);
ysr@777 1849 g1h->set_par_threads(0);
tonyp@790 1850
tonyp@790 1851 assert(g1h->check_heap_region_claim_values(
tonyp@790 1852 HeapRegion::ScrubRemSetClaimValue),
tonyp@790 1853 "sanity check");
ysr@777 1854 } else {
ysr@777 1855 g1_par_scrub_rs_task.work(0);
ysr@777 1856 }
ysr@777 1857
ysr@777 1858 double rs_scrub_end = os::elapsedTime();
ysr@777 1859 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
ysr@777 1860 _total_rs_scrub_time += this_rs_scrub_time;
ysr@777 1861 }
ysr@777 1862
ysr@777 1863 // this will also free any regions totally full of garbage objects,
ysr@777 1864 // and sort the regions.
jmasa@3294 1865 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
ysr@777 1866
ysr@777 1867 // Statistics.
ysr@777 1868 double end = os::elapsedTime();
ysr@777 1869 _cleanup_times.add((end - start) * 1000.0);
ysr@777 1870
ysr@777 1871 if (PrintGC || PrintGCDetails) {
ysr@777 1872 g1h->print_size_transition(gclog_or_tty,
ysr@777 1873 start_used_bytes,
ysr@777 1874 g1h->used(),
ysr@777 1875 g1h->capacity());
ysr@777 1876 }
ysr@777 1877
ysr@777 1878 size_t cleaned_up_bytes = start_used_bytes - g1h->used();
ysr@777 1879 g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
ysr@777 1880
johnc@3175 1881 // Clean up will have freed any regions completely full of garbage.
johnc@3175 1882 // Update the soft reference policy with the new heap occupancy.
johnc@3175 1883 Universe::update_heap_info_at_gc();
johnc@3175 1884
ysr@777 1885 // We need to make this be a "collection" so any collection pause that
ysr@777 1886 // races with it goes around and waits for completeCleanup to finish.
ysr@777 1887 g1h->increment_total_collections();
ysr@777 1888
johnc@1186 1889 if (VerifyDuringGC) {
ysr@1280 1890 HandleMark hm; // handle scope
ysr@1280 1891 gclog_or_tty->print(" VerifyDuringGC:(after)");
ysr@1280 1892 Universe::heap()->prepare_for_verify();
johnc@2969 1893 Universe::verify(/* allow dirty */ true,
johnc@2969 1894 /* silent */ false,
johnc@2969 1895 /* option */ VerifyOption_G1UsePrevMarking);
ysr@777 1896 }
tonyp@2472 1897
tonyp@2472 1898 g1h->verify_region_sets_optional();
ysr@777 1899 }
ysr@777 1900
ysr@777 1901 void ConcurrentMark::completeCleanup() {
ysr@777 1902 if (has_aborted()) return;
ysr@777 1903
tonyp@2472 1904 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2472 1905
tonyp@2472 1906 _cleanup_list.verify_optional();
tonyp@2643 1907 FreeRegionList tmp_free_list("Tmp Free List");
tonyp@2472 1908
tonyp@2472 1909 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 1910 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@2472 1911 "cleanup list has "SIZE_FORMAT" entries",
tonyp@2472 1912 _cleanup_list.length());
tonyp@2472 1913 }
tonyp@2472 1914
tonyp@2472 1915 // Noone else should be accessing the _cleanup_list at this point,
tonyp@2472 1916 // so it's not necessary to take any locks
tonyp@2472 1917 while (!_cleanup_list.is_empty()) {
tonyp@2472 1918 HeapRegion* hr = _cleanup_list.remove_head();
tonyp@2472 1919 assert(hr != NULL, "the list was not empty");
tonyp@2849 1920 hr->par_clear();
tonyp@2643 1921 tmp_free_list.add_as_tail(hr);
tonyp@2472 1922
tonyp@2472 1923 // Instead of adding one region at a time to the secondary_free_list,
tonyp@2472 1924 // we accumulate them in the local list and move them a few at a
tonyp@2472 1925 // time. This also cuts down on the number of notify_all() calls
tonyp@2472 1926 // we do during this process. We'll also append the local list when
tonyp@2472 1927 // _cleanup_list is empty (which means we just removed the last
tonyp@2472 1928 // region from the _cleanup_list).
tonyp@2643 1929 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
tonyp@2472 1930 _cleanup_list.is_empty()) {
tonyp@2472 1931 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 1932 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@2472 1933 "appending "SIZE_FORMAT" entries to the "
tonyp@2472 1934 "secondary_free_list, clean list still has "
tonyp@2472 1935 SIZE_FORMAT" entries",
tonyp@2643 1936 tmp_free_list.length(),
tonyp@2472 1937 _cleanup_list.length());
ysr@777 1938 }
tonyp@2472 1939
tonyp@2472 1940 {
tonyp@2472 1941 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2643 1942 g1h->secondary_free_list_add_as_tail(&tmp_free_list);
tonyp@2472 1943 SecondaryFreeList_lock->notify_all();
tonyp@2472 1944 }
tonyp@2472 1945
tonyp@2472 1946 if (G1StressConcRegionFreeing) {
tonyp@2472 1947 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
tonyp@2472 1948 os::sleep(Thread::current(), (jlong) 1, false);
tonyp@2472 1949 }
tonyp@2472 1950 }
ysr@777 1951 }
ysr@777 1952 }
tonyp@2643 1953 assert(tmp_free_list.is_empty(), "post-condition");
ysr@777 1954 }
ysr@777 1955
johnc@2494 1956 // Support closures for reference procssing in G1
johnc@2494 1957
johnc@2379 1958 bool G1CMIsAliveClosure::do_object_b(oop obj) {
johnc@2379 1959 HeapWord* addr = (HeapWord*)obj;
johnc@2379 1960 return addr != NULL &&
johnc@2379 1961 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
johnc@2379 1962 }
ysr@777 1963
ysr@777 1964 class G1CMKeepAliveClosure: public OopClosure {
ysr@777 1965 G1CollectedHeap* _g1;
ysr@777 1966 ConcurrentMark* _cm;
ysr@777 1967 CMBitMap* _bitMap;
ysr@777 1968 public:
ysr@777 1969 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
ysr@777 1970 CMBitMap* bitMap) :
ysr@777 1971 _g1(g1), _cm(cm),
ysr@777 1972 _bitMap(bitMap) {}
ysr@777 1973
ysr@1280 1974 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 1975 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 1976
ysr@1280 1977 template <class T> void do_oop_work(T* p) {
johnc@2494 1978 oop obj = oopDesc::load_decode_heap_oop(p);
johnc@2494 1979 HeapWord* addr = (HeapWord*)obj;
johnc@2494 1980
tonyp@2973 1981 if (_cm->verbose_high()) {
johnc@2494 1982 gclog_or_tty->print_cr("\t[0] we're looking at location "
tonyp@2973 1983 "*"PTR_FORMAT" = "PTR_FORMAT,
tonyp@2973 1984 p, (void*) obj);
tonyp@2973 1985 }
johnc@2494 1986
johnc@2494 1987 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
ysr@777 1988 _bitMap->mark(addr);
johnc@2494 1989 _cm->mark_stack_push(obj);
ysr@777 1990 }
ysr@777 1991 }
ysr@777 1992 };
ysr@777 1993
ysr@777 1994 class G1CMDrainMarkingStackClosure: public VoidClosure {
ysr@777 1995 CMMarkStack* _markStack;
ysr@777 1996 CMBitMap* _bitMap;
ysr@777 1997 G1CMKeepAliveClosure* _oopClosure;
ysr@777 1998 public:
ysr@777 1999 G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack,
ysr@777 2000 G1CMKeepAliveClosure* oopClosure) :
ysr@777 2001 _bitMap(bitMap),
ysr@777 2002 _markStack(markStack),
ysr@777 2003 _oopClosure(oopClosure)
ysr@777 2004 {}
ysr@777 2005
ysr@777 2006 void do_void() {
ysr@777 2007 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
ysr@777 2008 }
ysr@777 2009 };
ysr@777 2010
johnc@2494 2011 // 'Keep Alive' closure used by parallel reference processing.
johnc@2494 2012 // An instance of this closure is used in the parallel reference processing
johnc@2494 2013 // code rather than an instance of G1CMKeepAliveClosure. We could have used
johnc@2494 2014 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
johnc@2494 2015 // placed on to discovered ref lists once so we can mark and push with no
johnc@2494 2016 // need to check whether the object has already been marked. Using the
johnc@2494 2017 // G1CMKeepAliveClosure would mean, however, having all the worker threads
johnc@2494 2018 // operating on the global mark stack. This means that an individual
johnc@2494 2019 // worker would be doing lock-free pushes while it processes its own
johnc@2494 2020 // discovered ref list followed by drain call. If the discovered ref lists
johnc@2494 2021 // are unbalanced then this could cause interference with the other
johnc@2494 2022 // workers. Using a CMTask (and its embedded local data structures)
johnc@2494 2023 // avoids that potential interference.
johnc@2494 2024 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
johnc@2494 2025 ConcurrentMark* _cm;
johnc@2494 2026 CMTask* _task;
johnc@2494 2027 int _ref_counter_limit;
johnc@2494 2028 int _ref_counter;
johnc@2494 2029 public:
johnc@3292 2030 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
johnc@3292 2031 _cm(cm), _task(task),
johnc@3292 2032 _ref_counter_limit(G1RefProcDrainInterval) {
johnc@2494 2033 assert(_ref_counter_limit > 0, "sanity");
johnc@2494 2034 _ref_counter = _ref_counter_limit;
johnc@2494 2035 }
johnc@2494 2036
johnc@2494 2037 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@2494 2038 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@2494 2039
johnc@2494 2040 template <class T> void do_oop_work(T* p) {
johnc@2494 2041 if (!_cm->has_overflown()) {
johnc@2494 2042 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@2973 2043 if (_cm->verbose_high()) {
johnc@2494 2044 gclog_or_tty->print_cr("\t[%d] we're looking at location "
johnc@2494 2045 "*"PTR_FORMAT" = "PTR_FORMAT,
johnc@2494 2046 _task->task_id(), p, (void*) obj);
tonyp@2973 2047 }
johnc@2494 2048
johnc@2494 2049 _task->deal_with_reference(obj);
johnc@2494 2050 _ref_counter--;
johnc@2494 2051
johnc@2494 2052 if (_ref_counter == 0) {
johnc@2494 2053 // We have dealt with _ref_counter_limit references, pushing them and objects
johnc@2494 2054 // reachable from them on to the local stack (and possibly the global stack).
johnc@2494 2055 // Call do_marking_step() to process these entries. We call the routine in a
johnc@2494 2056 // loop, which we'll exit if there's nothing more to do (i.e. we're done
johnc@2494 2057 // with the entries that we've pushed as a result of the deal_with_reference
johnc@2494 2058 // calls above) or we overflow.
johnc@2494 2059 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
johnc@2494 2060 // while there may still be some work to do. (See the comment at the
johnc@2494 2061 // beginning of CMTask::do_marking_step() for those conditions - one of which
johnc@2494 2062 // is reaching the specified time target.) It is only when
johnc@2494 2063 // CMTask::do_marking_step() returns without setting the has_aborted() flag
johnc@2494 2064 // that the marking has completed.
johnc@2494 2065 do {
johnc@2494 2066 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 2067 _task->do_marking_step(mark_step_duration_ms,
johnc@2494 2068 false /* do_stealing */,
johnc@2494 2069 false /* do_termination */);
johnc@2494 2070 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2071 _ref_counter = _ref_counter_limit;
johnc@2494 2072 }
johnc@2494 2073 } else {
tonyp@2973 2074 if (_cm->verbose_high()) {
johnc@2494 2075 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
tonyp@2973 2076 }
johnc@2494 2077 }
johnc@2494 2078 }
johnc@2494 2079 };
johnc@2494 2080
johnc@2494 2081 class G1CMParDrainMarkingStackClosure: public VoidClosure {
johnc@2494 2082 ConcurrentMark* _cm;
johnc@2494 2083 CMTask* _task;
johnc@2494 2084 public:
johnc@2494 2085 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
johnc@2494 2086 _cm(cm), _task(task)
johnc@2494 2087 {}
johnc@2494 2088
johnc@2494 2089 void do_void() {
johnc@2494 2090 do {
tonyp@2973 2091 if (_cm->verbose_high()) {
tonyp@2973 2092 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step",
tonyp@2973 2093 _task->task_id());
tonyp@2973 2094 }
johnc@2494 2095
johnc@2494 2096 // We call CMTask::do_marking_step() to completely drain the local and
johnc@2494 2097 // global marking stacks. The routine is called in a loop, which we'll
johnc@2494 2098 // exit if there's nothing more to do (i.e. we'completely drained the
johnc@2494 2099 // entries that were pushed as a result of applying the
johnc@2494 2100 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
johnc@2494 2101 // lists above) or we overflow the global marking stack.
johnc@2494 2102 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
johnc@2494 2103 // while there may still be some work to do. (See the comment at the
johnc@2494 2104 // beginning of CMTask::do_marking_step() for those conditions - one of which
johnc@2494 2105 // is reaching the specified time target.) It is only when
johnc@2494 2106 // CMTask::do_marking_step() returns without setting the has_aborted() flag
johnc@2494 2107 // that the marking has completed.
johnc@2494 2108
johnc@2494 2109 _task->do_marking_step(1000000000.0 /* something very large */,
johnc@2494 2110 true /* do_stealing */,
johnc@2494 2111 true /* do_termination */);
johnc@2494 2112 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2113 }
johnc@2494 2114 };
johnc@2494 2115
johnc@3175 2116 // Implementation of AbstractRefProcTaskExecutor for parallel
johnc@3175 2117 // reference processing at the end of G1 concurrent marking
johnc@3175 2118
johnc@3175 2119 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
johnc@2494 2120 private:
johnc@2494 2121 G1CollectedHeap* _g1h;
johnc@2494 2122 ConcurrentMark* _cm;
johnc@2494 2123 WorkGang* _workers;
johnc@2494 2124 int _active_workers;
johnc@2494 2125
johnc@2494 2126 public:
johnc@3175 2127 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
johnc@2494 2128 ConcurrentMark* cm,
johnc@2494 2129 WorkGang* workers,
johnc@2494 2130 int n_workers) :
johnc@3292 2131 _g1h(g1h), _cm(cm),
johnc@3292 2132 _workers(workers), _active_workers(n_workers) { }
johnc@2494 2133
johnc@2494 2134 // Executes the given task using concurrent marking worker threads.
johnc@2494 2135 virtual void execute(ProcessTask& task);
johnc@2494 2136 virtual void execute(EnqueueTask& task);
johnc@2494 2137 };
johnc@2494 2138
johnc@3175 2139 class G1CMRefProcTaskProxy: public AbstractGangTask {
johnc@2494 2140 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
johnc@2494 2141 ProcessTask& _proc_task;
johnc@2494 2142 G1CollectedHeap* _g1h;
johnc@2494 2143 ConcurrentMark* _cm;
johnc@2494 2144
johnc@2494 2145 public:
johnc@3175 2146 G1CMRefProcTaskProxy(ProcessTask& proc_task,
johnc@2494 2147 G1CollectedHeap* g1h,
johnc@3292 2148 ConcurrentMark* cm) :
johnc@2494 2149 AbstractGangTask("Process reference objects in parallel"),
johnc@3292 2150 _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
johnc@2494 2151
jmasa@3357 2152 virtual void work(uint worker_id) {
jmasa@3357 2153 CMTask* marking_task = _cm->task(worker_id);
johnc@2494 2154 G1CMIsAliveClosure g1_is_alive(_g1h);
johnc@3292 2155 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
johnc@2494 2156 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
johnc@2494 2157
jmasa@3357 2158 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
johnc@2494 2159 }
johnc@2494 2160 };
johnc@2494 2161
johnc@3175 2162 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
johnc@2494 2163 assert(_workers != NULL, "Need parallel worker threads.");
johnc@2494 2164
johnc@3292 2165 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
johnc@2494 2166
johnc@2494 2167 // We need to reset the phase for each task execution so that
johnc@2494 2168 // the termination protocol of CMTask::do_marking_step works.
johnc@2494 2169 _cm->set_phase(_active_workers, false /* concurrent */);
johnc@2494 2170 _g1h->set_par_threads(_active_workers);
johnc@2494 2171 _workers->run_task(&proc_task_proxy);
johnc@2494 2172 _g1h->set_par_threads(0);
johnc@2494 2173 }
johnc@2494 2174
johnc@3175 2175 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
johnc@2494 2176 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
johnc@2494 2177 EnqueueTask& _enq_task;
johnc@2494 2178
johnc@2494 2179 public:
johnc@3175 2180 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
johnc@2494 2181 AbstractGangTask("Enqueue reference objects in parallel"),
johnc@3292 2182 _enq_task(enq_task) { }
johnc@2494 2183
jmasa@3357 2184 virtual void work(uint worker_id) {
jmasa@3357 2185 _enq_task.work(worker_id);
johnc@2494 2186 }
johnc@2494 2187 };
johnc@2494 2188
johnc@3175 2189 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
johnc@2494 2190 assert(_workers != NULL, "Need parallel worker threads.");
johnc@2494 2191
johnc@3175 2192 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
johnc@2494 2193
johnc@2494 2194 _g1h->set_par_threads(_active_workers);
johnc@2494 2195 _workers->run_task(&enq_task_proxy);
johnc@2494 2196 _g1h->set_par_threads(0);
johnc@2494 2197 }
johnc@2494 2198
ysr@777 2199 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
ysr@777 2200 ResourceMark rm;
ysr@777 2201 HandleMark hm;
johnc@3171 2202
johnc@3171 2203 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@3171 2204
johnc@3171 2205 // Is alive closure.
johnc@3171 2206 G1CMIsAliveClosure g1_is_alive(g1h);
johnc@3171 2207
johnc@3171 2208 // Inner scope to exclude the cleaning of the string and symbol
johnc@3171 2209 // tables from the displayed time.
johnc@3171 2210 {
johnc@3171 2211 bool verbose = PrintGC && PrintGCDetails;
johnc@3171 2212 if (verbose) {
johnc@3171 2213 gclog_or_tty->put(' ');
johnc@3171 2214 }
johnc@3171 2215 TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
johnc@3171 2216
johnc@3175 2217 ReferenceProcessor* rp = g1h->ref_processor_cm();
johnc@3171 2218
johnc@3171 2219 // See the comment in G1CollectedHeap::ref_processing_init()
johnc@3171 2220 // about how reference processing currently works in G1.
johnc@3171 2221
johnc@3171 2222 // Process weak references.
johnc@3171 2223 rp->setup_policy(clear_all_soft_refs);
johnc@3171 2224 assert(_markStack.isEmpty(), "mark stack should be empty");
johnc@3171 2225
johnc@3171 2226 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
johnc@3171 2227 G1CMDrainMarkingStackClosure
johnc@3171 2228 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
johnc@3171 2229
johnc@3171 2230 // We use the work gang from the G1CollectedHeap and we utilize all
johnc@3171 2231 // the worker threads.
jmasa@3357 2232 uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
jmasa@3357 2233 active_workers = MAX2(MIN2(active_workers, _max_task_num), 1U);
johnc@3171 2234
johnc@3292 2235 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
johnc@3175 2236 g1h->workers(), active_workers);
johnc@3171 2237
johnc@3171 2238 if (rp->processing_is_mt()) {
johnc@3171 2239 // Set the degree of MT here. If the discovery is done MT, there
johnc@3171 2240 // may have been a different number of threads doing the discovery
johnc@3171 2241 // and a different number of discovered lists may have Ref objects.
johnc@3171 2242 // That is OK as long as the Reference lists are balanced (see
johnc@3171 2243 // balance_all_queues() and balance_queues()).
johnc@3171 2244 rp->set_active_mt_degree(active_workers);
johnc@3171 2245
johnc@3171 2246 rp->process_discovered_references(&g1_is_alive,
johnc@2494 2247 &g1_keep_alive,
johnc@2494 2248 &g1_drain_mark_stack,
johnc@2494 2249 &par_task_executor);
johnc@2494 2250
johnc@3171 2251 // The work routines of the parallel keep_alive and drain_marking_stack
johnc@3171 2252 // will set the has_overflown flag if we overflow the global marking
johnc@3171 2253 // stack.
johnc@3171 2254 } else {
johnc@3171 2255 rp->process_discovered_references(&g1_is_alive,
johnc@3171 2256 &g1_keep_alive,
johnc@3171 2257 &g1_drain_mark_stack,
johnc@3171 2258 NULL);
johnc@3171 2259 }
johnc@3171 2260
johnc@3171 2261 assert(_markStack.overflow() || _markStack.isEmpty(),
johnc@3171 2262 "mark stack should be empty (unless it overflowed)");
johnc@3171 2263 if (_markStack.overflow()) {
johnc@3171 2264 // Should have been done already when we tried to push an
johnc@3171 2265 // entry on to the global mark stack. But let's do it again.
johnc@3171 2266 set_has_overflown();
johnc@3171 2267 }
johnc@3171 2268
johnc@3171 2269 if (rp->processing_is_mt()) {
johnc@3171 2270 assert(rp->num_q() == active_workers, "why not");
johnc@3171 2271 rp->enqueue_discovered_references(&par_task_executor);
johnc@3171 2272 } else {
johnc@3171 2273 rp->enqueue_discovered_references();
johnc@3171 2274 }
johnc@3171 2275
johnc@3171 2276 rp->verify_no_references_recorded();
johnc@3175 2277 assert(!rp->discovery_enabled(), "Post condition");
johnc@2494 2278 }
johnc@2494 2279
coleenp@2497 2280 // Now clean up stale oops in StringTable
johnc@2379 2281 StringTable::unlink(&g1_is_alive);
coleenp@2497 2282 // Clean up unreferenced symbols in symbol table.
coleenp@2497 2283 SymbolTable::unlink();
ysr@777 2284 }
ysr@777 2285
ysr@777 2286 void ConcurrentMark::swapMarkBitMaps() {
ysr@777 2287 CMBitMapRO* temp = _prevMarkBitMap;
ysr@777 2288 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
ysr@777 2289 _nextMarkBitMap = (CMBitMap*) temp;
ysr@777 2290 }
ysr@777 2291
ysr@777 2292 class CMRemarkTask: public AbstractGangTask {
ysr@777 2293 private:
ysr@777 2294 ConcurrentMark *_cm;
ysr@777 2295
ysr@777 2296 public:
jmasa@3357 2297 void work(uint worker_id) {
ysr@777 2298 // Since all available tasks are actually started, we should
ysr@777 2299 // only proceed if we're supposed to be actived.
jmasa@3357 2300 if (worker_id < _cm->active_tasks()) {
jmasa@3357 2301 CMTask* task = _cm->task(worker_id);
ysr@777 2302 task->record_start_time();
ysr@777 2303 do {
johnc@2494 2304 task->do_marking_step(1000000000.0 /* something very large */,
johnc@2494 2305 true /* do_stealing */,
johnc@2494 2306 true /* do_termination */);
ysr@777 2307 } while (task->has_aborted() && !_cm->has_overflown());
ysr@777 2308 // If we overflow, then we do not want to restart. We instead
ysr@777 2309 // want to abort remark and do concurrent marking again.
ysr@777 2310 task->record_end_time();
ysr@777 2311 }
ysr@777 2312 }
ysr@777 2313
johnc@3338 2314 CMRemarkTask(ConcurrentMark* cm, int active_workers) :
jmasa@3294 2315 AbstractGangTask("Par Remark"), _cm(cm) {
johnc@3338 2316 _cm->terminator()->reset_for_reuse(active_workers);
jmasa@3294 2317 }
ysr@777 2318 };
ysr@777 2319
ysr@777 2320 void ConcurrentMark::checkpointRootsFinalWork() {
ysr@777 2321 ResourceMark rm;
ysr@777 2322 HandleMark hm;
ysr@777 2323 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 2324
ysr@777 2325 g1h->ensure_parsability(false);
ysr@777 2326
jmasa@2188 2327 if (G1CollectedHeap::use_parallel_gc_threads()) {
jrose@1424 2328 G1CollectedHeap::StrongRootsScope srs(g1h);
jmasa@3294 2329 // this is remark, so we'll use up all active threads
jmasa@3357 2330 uint active_workers = g1h->workers()->active_workers();
jmasa@3294 2331 if (active_workers == 0) {
jmasa@3294 2332 assert(active_workers > 0, "Should have been set earlier");
jmasa@3357 2333 active_workers = (uint) ParallelGCThreads;
jmasa@3294 2334 g1h->workers()->set_active_workers(active_workers);
jmasa@3294 2335 }
johnc@2494 2336 set_phase(active_workers, false /* concurrent */);
jmasa@3294 2337 // Leave _parallel_marking_threads at it's
jmasa@3294 2338 // value originally calculated in the ConcurrentMark
jmasa@3294 2339 // constructor and pass values of the active workers
jmasa@3294 2340 // through the gang in the task.
ysr@777 2341
johnc@3338 2342 CMRemarkTask remarkTask(this, active_workers);
jmasa@3294 2343 g1h->set_par_threads(active_workers);
ysr@777 2344 g1h->workers()->run_task(&remarkTask);
ysr@777 2345 g1h->set_par_threads(0);
ysr@777 2346 } else {
jrose@1424 2347 G1CollectedHeap::StrongRootsScope srs(g1h);
ysr@777 2348 // this is remark, so we'll use up all available threads
jmasa@3357 2349 uint active_workers = 1;
johnc@2494 2350 set_phase(active_workers, false /* concurrent */);
ysr@777 2351
johnc@3338 2352 CMRemarkTask remarkTask(this, active_workers);
ysr@777 2353 // We will start all available threads, even if we decide that the
ysr@777 2354 // active_workers will be fewer. The extra ones will just bail out
ysr@777 2355 // immediately.
ysr@777 2356 remarkTask.work(0);
ysr@777 2357 }
tonyp@1458 2358 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1458 2359 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 2360
ysr@777 2361 print_stats();
ysr@777 2362
ysr@777 2363 #if VERIFY_OBJS_PROCESSED
ysr@777 2364 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
ysr@777 2365 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
ysr@777 2366 _scan_obj_cl.objs_processed,
ysr@777 2367 ThreadLocalObjQueue::objs_enqueued);
ysr@777 2368 guarantee(_scan_obj_cl.objs_processed ==
ysr@777 2369 ThreadLocalObjQueue::objs_enqueued,
ysr@777 2370 "Different number of objs processed and enqueued.");
ysr@777 2371 }
ysr@777 2372 #endif
ysr@777 2373 }
ysr@777 2374
tonyp@1479 2375 #ifndef PRODUCT
tonyp@1479 2376
tonyp@1823 2377 class PrintReachableOopClosure: public OopClosure {
ysr@777 2378 private:
ysr@777 2379 G1CollectedHeap* _g1h;
ysr@777 2380 outputStream* _out;
johnc@2969 2381 VerifyOption _vo;
tonyp@1823 2382 bool _all;
ysr@777 2383
ysr@777 2384 public:
johnc@2969 2385 PrintReachableOopClosure(outputStream* out,
johnc@2969 2386 VerifyOption vo,
tonyp@1823 2387 bool all) :
tonyp@1479 2388 _g1h(G1CollectedHeap::heap()),
johnc@2969 2389 _out(out), _vo(vo), _all(all) { }
ysr@777 2390
ysr@1280 2391 void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 2392 void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 2393
ysr@1280 2394 template <class T> void do_oop_work(T* p) {
ysr@1280 2395 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 2396 const char* str = NULL;
ysr@777 2397 const char* str2 = "";
ysr@777 2398
tonyp@1823 2399 if (obj == NULL) {
tonyp@1823 2400 str = "";
tonyp@1823 2401 } else if (!_g1h->is_in_g1_reserved(obj)) {
tonyp@1823 2402 str = " O";
tonyp@1823 2403 } else {
ysr@777 2404 HeapRegion* hr = _g1h->heap_region_containing(obj);
tonyp@1458 2405 guarantee(hr != NULL, "invariant");
tonyp@1479 2406 bool over_tams = false;
johnc@2969 2407 bool marked = false;
johnc@2969 2408
johnc@2969 2409 switch (_vo) {
johnc@2969 2410 case VerifyOption_G1UsePrevMarking:
johnc@2969 2411 over_tams = hr->obj_allocated_since_prev_marking(obj);
johnc@2969 2412 marked = _g1h->isMarkedPrev(obj);
johnc@2969 2413 break;
johnc@2969 2414 case VerifyOption_G1UseNextMarking:
johnc@2969 2415 over_tams = hr->obj_allocated_since_next_marking(obj);
johnc@2969 2416 marked = _g1h->isMarkedNext(obj);
johnc@2969 2417 break;
johnc@2969 2418 case VerifyOption_G1UseMarkWord:
johnc@2969 2419 marked = obj->is_gc_marked();
johnc@2969 2420 break;
johnc@2969 2421 default:
johnc@2969 2422 ShouldNotReachHere();
tonyp@1479 2423 }
tonyp@1479 2424
tonyp@1479 2425 if (over_tams) {
tonyp@1823 2426 str = " >";
tonyp@1823 2427 if (marked) {
ysr@777 2428 str2 = " AND MARKED";
tonyp@1479 2429 }
tonyp@1823 2430 } else if (marked) {
tonyp@1823 2431 str = " M";
tonyp@1479 2432 } else {
tonyp@1823 2433 str = " NOT";
tonyp@1479 2434 }
ysr@777 2435 }
ysr@777 2436
tonyp@1823 2437 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
ysr@777 2438 p, (void*) obj, str, str2);
ysr@777 2439 }
ysr@777 2440 };
ysr@777 2441
tonyp@1823 2442 class PrintReachableObjectClosure : public ObjectClosure {
ysr@777 2443 private:
johnc@2969 2444 G1CollectedHeap* _g1h;
johnc@2969 2445 outputStream* _out;
johnc@2969 2446 VerifyOption _vo;
johnc@2969 2447 bool _all;
johnc@2969 2448 HeapRegion* _hr;
ysr@777 2449
ysr@777 2450 public:
johnc@2969 2451 PrintReachableObjectClosure(outputStream* out,
johnc@2969 2452 VerifyOption vo,
tonyp@1823 2453 bool all,
tonyp@1823 2454 HeapRegion* hr) :
johnc@2969 2455 _g1h(G1CollectedHeap::heap()),
johnc@2969 2456 _out(out), _vo(vo), _all(all), _hr(hr) { }
tonyp@1823 2457
tonyp@1823 2458 void do_object(oop o) {
johnc@2969 2459 bool over_tams = false;
johnc@2969 2460 bool marked = false;
johnc@2969 2461
johnc@2969 2462 switch (_vo) {
johnc@2969 2463 case VerifyOption_G1UsePrevMarking:
johnc@2969 2464 over_tams = _hr->obj_allocated_since_prev_marking(o);
johnc@2969 2465 marked = _g1h->isMarkedPrev(o);
johnc@2969 2466 break;
johnc@2969 2467 case VerifyOption_G1UseNextMarking:
johnc@2969 2468 over_tams = _hr->obj_allocated_since_next_marking(o);
johnc@2969 2469 marked = _g1h->isMarkedNext(o);
johnc@2969 2470 break;
johnc@2969 2471 case VerifyOption_G1UseMarkWord:
johnc@2969 2472 marked = o->is_gc_marked();
johnc@2969 2473 break;
johnc@2969 2474 default:
johnc@2969 2475 ShouldNotReachHere();
tonyp@1823 2476 }
tonyp@1823 2477 bool print_it = _all || over_tams || marked;
tonyp@1823 2478
tonyp@1823 2479 if (print_it) {
tonyp@1823 2480 _out->print_cr(" "PTR_FORMAT"%s",
tonyp@1823 2481 o, (over_tams) ? " >" : (marked) ? " M" : "");
johnc@2969 2482 PrintReachableOopClosure oopCl(_out, _vo, _all);
tonyp@1823 2483 o->oop_iterate(&oopCl);
tonyp@1823 2484 }
ysr@777 2485 }
ysr@777 2486 };
ysr@777 2487
tonyp@1823 2488 class PrintReachableRegionClosure : public HeapRegionClosure {
ysr@777 2489 private:
ysr@777 2490 outputStream* _out;
johnc@2969 2491 VerifyOption _vo;
tonyp@1823 2492 bool _all;
ysr@777 2493
ysr@777 2494 public:
ysr@777 2495 bool doHeapRegion(HeapRegion* hr) {
ysr@777 2496 HeapWord* b = hr->bottom();
ysr@777 2497 HeapWord* e = hr->end();
ysr@777 2498 HeapWord* t = hr->top();
tonyp@1479 2499 HeapWord* p = NULL;
johnc@2969 2500
johnc@2969 2501 switch (_vo) {
johnc@2969 2502 case VerifyOption_G1UsePrevMarking:
johnc@2969 2503 p = hr->prev_top_at_mark_start();
johnc@2969 2504 break;
johnc@2969 2505 case VerifyOption_G1UseNextMarking:
johnc@2969 2506 p = hr->next_top_at_mark_start();
johnc@2969 2507 break;
johnc@2969 2508 case VerifyOption_G1UseMarkWord:
johnc@2969 2509 // When we are verifying marking using the mark word
johnc@2969 2510 // TAMS has no relevance.
johnc@2969 2511 assert(p == NULL, "post-condition");
johnc@2969 2512 break;
johnc@2969 2513 default:
johnc@2969 2514 ShouldNotReachHere();
tonyp@1479 2515 }
ysr@777 2516 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
tonyp@1479 2517 "TAMS: "PTR_FORMAT, b, e, t, p);
tonyp@1823 2518 _out->cr();
tonyp@1823 2519
tonyp@1823 2520 HeapWord* from = b;
tonyp@1823 2521 HeapWord* to = t;
tonyp@1823 2522
tonyp@1823 2523 if (to > from) {
tonyp@1823 2524 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
tonyp@1823 2525 _out->cr();
johnc@2969 2526 PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
tonyp@1823 2527 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
tonyp@1823 2528 _out->cr();
tonyp@1823 2529 }
ysr@777 2530
ysr@777 2531 return false;
ysr@777 2532 }
ysr@777 2533
johnc@2969 2534 PrintReachableRegionClosure(outputStream* out,
johnc@2969 2535 VerifyOption vo,
tonyp@1823 2536 bool all) :
johnc@2969 2537 _out(out), _vo(vo), _all(all) { }
ysr@777 2538 };
ysr@777 2539
johnc@2969 2540 static const char* verify_option_to_tams(VerifyOption vo) {
johnc@2969 2541 switch (vo) {
johnc@2969 2542 case VerifyOption_G1UsePrevMarking:
johnc@2969 2543 return "PTAMS";
johnc@2969 2544 case VerifyOption_G1UseNextMarking:
johnc@2969 2545 return "NTAMS";
johnc@2969 2546 default:
johnc@2969 2547 return "NONE";
johnc@2969 2548 }
johnc@2969 2549 }
johnc@2969 2550
tonyp@1823 2551 void ConcurrentMark::print_reachable(const char* str,
johnc@2969 2552 VerifyOption vo,
tonyp@1823 2553 bool all) {
tonyp@1823 2554 gclog_or_tty->cr();
tonyp@1823 2555 gclog_or_tty->print_cr("== Doing heap dump... ");
tonyp@1479 2556
tonyp@1479 2557 if (G1PrintReachableBaseFile == NULL) {
tonyp@1479 2558 gclog_or_tty->print_cr(" #### error: no base file defined");
tonyp@1479 2559 return;
tonyp@1479 2560 }
tonyp@1479 2561
tonyp@1479 2562 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
tonyp@1479 2563 (JVM_MAXPATHLEN - 1)) {
tonyp@1479 2564 gclog_or_tty->print_cr(" #### error: file name too long");
tonyp@1479 2565 return;
tonyp@1479 2566 }
tonyp@1479 2567
tonyp@1479 2568 char file_name[JVM_MAXPATHLEN];
tonyp@1479 2569 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
tonyp@1479 2570 gclog_or_tty->print_cr(" dumping to file %s", file_name);
tonyp@1479 2571
tonyp@1479 2572 fileStream fout(file_name);
tonyp@1479 2573 if (!fout.is_open()) {
tonyp@1479 2574 gclog_or_tty->print_cr(" #### error: could not open file");
tonyp@1479 2575 return;
tonyp@1479 2576 }
tonyp@1479 2577
tonyp@1479 2578 outputStream* out = &fout;
johnc@2969 2579 out->print_cr("-- USING %s", verify_option_to_tams(vo));
tonyp@1479 2580 out->cr();
tonyp@1479 2581
tonyp@1823 2582 out->print_cr("--- ITERATING OVER REGIONS");
tonyp@1479 2583 out->cr();
johnc@2969 2584 PrintReachableRegionClosure rcl(out, vo, all);
ysr@777 2585 _g1h->heap_region_iterate(&rcl);
tonyp@1479 2586 out->cr();
tonyp@1479 2587
tonyp@1479 2588 gclog_or_tty->print_cr(" done");
tonyp@1823 2589 gclog_or_tty->flush();
ysr@777 2590 }
ysr@777 2591
tonyp@1479 2592 #endif // PRODUCT
tonyp@1479 2593
ysr@777 2594 // This note is for drainAllSATBBuffers and the code in between.
ysr@777 2595 // In the future we could reuse a task to do this work during an
ysr@777 2596 // evacuation pause (since now tasks are not active and can be claimed
ysr@777 2597 // during an evacuation pause). This was a late change to the code and
ysr@777 2598 // is currently not being taken advantage of.
ysr@777 2599
ysr@777 2600 class CMGlobalObjectClosure : public ObjectClosure {
ysr@777 2601 private:
ysr@777 2602 ConcurrentMark* _cm;
ysr@777 2603
ysr@777 2604 public:
ysr@777 2605 void do_object(oop obj) {
ysr@777 2606 _cm->deal_with_reference(obj);
ysr@777 2607 }
ysr@777 2608
ysr@777 2609 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
ysr@777 2610 };
ysr@777 2611
ysr@777 2612 void ConcurrentMark::deal_with_reference(oop obj) {
tonyp@2968 2613 if (verbose_high()) {
ysr@777 2614 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
ysr@777 2615 (void*) obj);
tonyp@2968 2616 }
ysr@777 2617
ysr@777 2618 HeapWord* objAddr = (HeapWord*) obj;
ysr@1280 2619 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
ysr@777 2620 if (_g1h->is_in_g1_reserved(objAddr)) {
tonyp@2968 2621 assert(obj != NULL, "null check is implicit");
tonyp@2968 2622 if (!_nextMarkBitMap->isMarked(objAddr)) {
tonyp@2968 2623 // Only get the containing region if the object is not marked on the
tonyp@2968 2624 // bitmap (otherwise, it's a waste of time since we won't do
tonyp@2968 2625 // anything with it).
tonyp@2968 2626 HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
tonyp@2968 2627 if (!hr->obj_allocated_since_next_marking(obj)) {
tonyp@2968 2628 if (verbose_high()) {
tonyp@2968 2629 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
tonyp@2968 2630 "marked", (void*) obj);
tonyp@2968 2631 }
tonyp@2968 2632
tonyp@2968 2633 // we need to mark it first
tonyp@2968 2634 if (_nextMarkBitMap->parMark(objAddr)) {
tonyp@2968 2635 // No OrderAccess:store_load() is needed. It is implicit in the
tonyp@2968 2636 // CAS done in parMark(objAddr) above
tonyp@2968 2637 HeapWord* finger = _finger;
tonyp@2968 2638 if (objAddr < finger) {
tonyp@2968 2639 if (verbose_high()) {
tonyp@2968 2640 gclog_or_tty->print_cr("[global] below the global finger "
tonyp@2968 2641 "("PTR_FORMAT"), pushing it", finger);
tonyp@2968 2642 }
tonyp@2968 2643 if (!mark_stack_push(obj)) {
tonyp@2968 2644 if (verbose_low()) {
tonyp@2968 2645 gclog_or_tty->print_cr("[global] global stack overflow during "
tonyp@2968 2646 "deal_with_reference");
tonyp@2968 2647 }
tonyp@2968 2648 }
ysr@777 2649 }
ysr@777 2650 }
ysr@777 2651 }
ysr@777 2652 }
ysr@777 2653 }
ysr@777 2654 }
ysr@777 2655
ysr@777 2656 void ConcurrentMark::drainAllSATBBuffers() {
tonyp@3416 2657 guarantee(false, "drainAllSATBBuffers(): don't call this any more");
tonyp@3416 2658
ysr@777 2659 CMGlobalObjectClosure oc(this);
ysr@777 2660 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 2661 satb_mq_set.set_closure(&oc);
ysr@777 2662
ysr@777 2663 while (satb_mq_set.apply_closure_to_completed_buffer()) {
tonyp@2973 2664 if (verbose_medium()) {
ysr@777 2665 gclog_or_tty->print_cr("[global] processed an SATB buffer");
tonyp@2973 2666 }
ysr@777 2667 }
ysr@777 2668
ysr@777 2669 // no need to check whether we should do this, as this is only
ysr@777 2670 // called during an evacuation pause
ysr@777 2671 satb_mq_set.iterate_closure_all_threads();
ysr@777 2672
ysr@777 2673 satb_mq_set.set_closure(NULL);
tonyp@1458 2674 assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 2675 }
ysr@777 2676
ysr@777 2677 void ConcurrentMark::clear(oop p) {
ysr@777 2678 assert(p != NULL && p->is_oop(), "expected an oop");
ysr@777 2679 HeapWord* addr = (HeapWord*)p;
ysr@777 2680 assert(addr >= _nextMarkBitMap->startWord() ||
ysr@777 2681 addr < _nextMarkBitMap->endWord(), "in a region");
ysr@777 2682
ysr@777 2683 _nextMarkBitMap->clear(addr);
ysr@777 2684 }
ysr@777 2685
tonyp@3416 2686 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
ysr@777 2687 // Note we are overriding the read-only view of the prev map here, via
ysr@777 2688 // the cast.
ysr@777 2689 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
tonyp@3416 2690 }
tonyp@3416 2691
tonyp@3416 2692 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
ysr@777 2693 _nextMarkBitMap->clearRange(mr);
ysr@777 2694 }
ysr@777 2695
tonyp@3416 2696 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
tonyp@3416 2697 clearRangePrevBitmap(mr);
tonyp@3416 2698 clearRangeNextBitmap(mr);
tonyp@3416 2699 }
tonyp@3416 2700
ysr@777 2701 HeapRegion*
ysr@777 2702 ConcurrentMark::claim_region(int task_num) {
ysr@777 2703 // "checkpoint" the finger
ysr@777 2704 HeapWord* finger = _finger;
ysr@777 2705
ysr@777 2706 // _heap_end will not change underneath our feet; it only changes at
ysr@777 2707 // yield points.
ysr@777 2708 while (finger < _heap_end) {
tonyp@1458 2709 assert(_g1h->is_in_g1_reserved(finger), "invariant");
ysr@777 2710
tonyp@2968 2711 // Note on how this code handles humongous regions. In the
tonyp@2968 2712 // normal case the finger will reach the start of a "starts
tonyp@2968 2713 // humongous" (SH) region. Its end will either be the end of the
tonyp@2968 2714 // last "continues humongous" (CH) region in the sequence, or the
tonyp@2968 2715 // standard end of the SH region (if the SH is the only region in
tonyp@2968 2716 // the sequence). That way claim_region() will skip over the CH
tonyp@2968 2717 // regions. However, there is a subtle race between a CM thread
tonyp@2968 2718 // executing this method and a mutator thread doing a humongous
tonyp@2968 2719 // object allocation. The two are not mutually exclusive as the CM
tonyp@2968 2720 // thread does not need to hold the Heap_lock when it gets
tonyp@2968 2721 // here. So there is a chance that claim_region() will come across
tonyp@2968 2722 // a free region that's in the progress of becoming a SH or a CH
tonyp@2968 2723 // region. In the former case, it will either
tonyp@2968 2724 // a) Miss the update to the region's end, in which case it will
tonyp@2968 2725 // visit every subsequent CH region, will find their bitmaps
tonyp@2968 2726 // empty, and do nothing, or
tonyp@2968 2727 // b) Will observe the update of the region's end (in which case
tonyp@2968 2728 // it will skip the subsequent CH regions).
tonyp@2968 2729 // If it comes across a region that suddenly becomes CH, the
tonyp@2968 2730 // scenario will be similar to b). So, the race between
tonyp@2968 2731 // claim_region() and a humongous object allocation might force us
tonyp@2968 2732 // to do a bit of unnecessary work (due to some unnecessary bitmap
tonyp@2968 2733 // iterations) but it should not introduce and correctness issues.
tonyp@2968 2734 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
ysr@777 2735 HeapWord* bottom = curr_region->bottom();
ysr@777 2736 HeapWord* end = curr_region->end();
ysr@777 2737 HeapWord* limit = curr_region->next_top_at_mark_start();
ysr@777 2738
tonyp@2968 2739 if (verbose_low()) {
ysr@777 2740 gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" "
ysr@777 2741 "["PTR_FORMAT", "PTR_FORMAT"), "
ysr@777 2742 "limit = "PTR_FORMAT,
ysr@777 2743 task_num, curr_region, bottom, end, limit);
tonyp@2968 2744 }
tonyp@2968 2745
tonyp@2968 2746 // Is the gap between reading the finger and doing the CAS too long?
tonyp@2968 2747 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
ysr@777 2748 if (res == finger) {
ysr@777 2749 // we succeeded
ysr@777 2750
ysr@777 2751 // notice that _finger == end cannot be guaranteed here since,
ysr@777 2752 // someone else might have moved the finger even further
tonyp@1458 2753 assert(_finger >= end, "the finger should have moved forward");
ysr@777 2754
tonyp@2973 2755 if (verbose_low()) {
ysr@777 2756 gclog_or_tty->print_cr("[%d] we were successful with region = "
ysr@777 2757 PTR_FORMAT, task_num, curr_region);
tonyp@2973 2758 }
ysr@777 2759
ysr@777 2760 if (limit > bottom) {
tonyp@2973 2761 if (verbose_low()) {
ysr@777 2762 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, "
ysr@777 2763 "returning it ", task_num, curr_region);
tonyp@2973 2764 }
ysr@777 2765 return curr_region;
ysr@777 2766 } else {
tonyp@1458 2767 assert(limit == bottom,
tonyp@1458 2768 "the region limit should be at bottom");
tonyp@2973 2769 if (verbose_low()) {
ysr@777 2770 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
ysr@777 2771 "returning NULL", task_num, curr_region);
tonyp@2973 2772 }
ysr@777 2773 // we return NULL and the caller should try calling
ysr@777 2774 // claim_region() again.
ysr@777 2775 return NULL;
ysr@777 2776 }
ysr@777 2777 } else {
tonyp@1458 2778 assert(_finger > finger, "the finger should have moved forward");
tonyp@2973 2779 if (verbose_low()) {
ysr@777 2780 gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
ysr@777 2781 "global finger = "PTR_FORMAT", "
ysr@777 2782 "our finger = "PTR_FORMAT,
ysr@777 2783 task_num, _finger, finger);
tonyp@2973 2784 }
ysr@777 2785
ysr@777 2786 // read it again
ysr@777 2787 finger = _finger;
ysr@777 2788 }
ysr@777 2789 }
ysr@777 2790
ysr@777 2791 return NULL;
ysr@777 2792 }
ysr@777 2793
johnc@2190 2794 bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
tonyp@3416 2795 guarantee(false, "invalidate_aborted_regions_in_cset(): "
tonyp@3416 2796 "don't call this any more");
tonyp@3416 2797
johnc@2190 2798 bool result = false;
johnc@2190 2799 for (int i = 0; i < (int)_max_task_num; ++i) {
johnc@2190 2800 CMTask* the_task = _tasks[i];
johnc@2190 2801 MemRegion mr = the_task->aborted_region();
johnc@2190 2802 if (mr.start() != NULL) {
johnc@2190 2803 assert(mr.end() != NULL, "invariant");
johnc@2190 2804 assert(mr.word_size() > 0, "invariant");
johnc@2190 2805 HeapRegion* hr = _g1h->heap_region_containing(mr.start());
johnc@2190 2806 assert(hr != NULL, "invariant");
johnc@2190 2807 if (hr->in_collection_set()) {
johnc@2190 2808 // The region points into the collection set
johnc@2190 2809 the_task->set_aborted_region(MemRegion());
johnc@2190 2810 result = true;
johnc@2190 2811 }
johnc@2190 2812 }
johnc@2190 2813 }
johnc@2190 2814 return result;
johnc@2190 2815 }
johnc@2190 2816
johnc@2190 2817 bool ConcurrentMark::has_aborted_regions() {
johnc@2190 2818 for (int i = 0; i < (int)_max_task_num; ++i) {
johnc@2190 2819 CMTask* the_task = _tasks[i];
johnc@2190 2820 MemRegion mr = the_task->aborted_region();
johnc@2190 2821 if (mr.start() != NULL) {
johnc@2190 2822 assert(mr.end() != NULL, "invariant");
johnc@2190 2823 assert(mr.word_size() > 0, "invariant");
johnc@2190 2824 return true;
johnc@2190 2825 }
johnc@2190 2826 }
johnc@2190 2827 return false;
johnc@2190 2828 }
johnc@2190 2829
ysr@777 2830 void ConcurrentMark::oops_do(OopClosure* cl) {
tonyp@2973 2831 if (_markStack.size() > 0 && verbose_low()) {
ysr@777 2832 gclog_or_tty->print_cr("[global] scanning the global marking stack, "
ysr@777 2833 "size = %d", _markStack.size());
tonyp@2973 2834 }
ysr@777 2835 // we first iterate over the contents of the mark stack...
ysr@777 2836 _markStack.oops_do(cl);
ysr@777 2837
ysr@777 2838 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 2839 OopTaskQueue* queue = _task_queues->queue((int)i);
ysr@777 2840
tonyp@2973 2841 if (queue->size() > 0 && verbose_low()) {
ysr@777 2842 gclog_or_tty->print_cr("[global] scanning task queue of task %d, "
ysr@777 2843 "size = %d", i, queue->size());
tonyp@2973 2844 }
ysr@777 2845
ysr@777 2846 // ...then over the contents of the all the task queues.
ysr@777 2847 queue->oops_do(cl);
ysr@777 2848 }
tonyp@3416 2849 }
tonyp@3416 2850
tonyp@3416 2851 #ifndef PRODUCT
tonyp@3416 2852 enum VerifyNoCSetOopsPhase {
tonyp@3416 2853 VerifyNoCSetOopsStack,
tonyp@3416 2854 VerifyNoCSetOopsQueues,
tonyp@3416 2855 VerifyNoCSetOopsSATBCompleted,
tonyp@3416 2856 VerifyNoCSetOopsSATBThread
tonyp@3416 2857 };
tonyp@3416 2858
tonyp@3416 2859 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
tonyp@3416 2860 private:
tonyp@3416 2861 G1CollectedHeap* _g1h;
tonyp@3416 2862 VerifyNoCSetOopsPhase _phase;
tonyp@3416 2863 int _info;
tonyp@3416 2864
tonyp@3416 2865 const char* phase_str() {
tonyp@3416 2866 switch (_phase) {
tonyp@3416 2867 case VerifyNoCSetOopsStack: return "Stack";
tonyp@3416 2868 case VerifyNoCSetOopsQueues: return "Queue";
tonyp@3416 2869 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
tonyp@3416 2870 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
tonyp@3416 2871 default: ShouldNotReachHere();
tonyp@3416 2872 }
tonyp@3416 2873 return NULL;
ysr@777 2874 }
johnc@2190 2875
tonyp@3416 2876 void do_object_work(oop obj) {
tonyp@3416 2877 guarantee(!_g1h->obj_in_cs(obj),
tonyp@3416 2878 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
tonyp@3416 2879 (void*) obj, phase_str(), _info));
johnc@2190 2880 }
johnc@2190 2881
tonyp@3416 2882 public:
tonyp@3416 2883 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
tonyp@3416 2884
tonyp@3416 2885 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
tonyp@3416 2886 _phase = phase;
tonyp@3416 2887 _info = info;
tonyp@3416 2888 }
tonyp@3416 2889
tonyp@3416 2890 virtual void do_oop(oop* p) {
tonyp@3416 2891 oop obj = oopDesc::load_decode_heap_oop(p);
tonyp@3416 2892 do_object_work(obj);
tonyp@3416 2893 }
tonyp@3416 2894
tonyp@3416 2895 virtual void do_oop(narrowOop* p) {
tonyp@3416 2896 // We should not come across narrow oops while scanning marking
tonyp@3416 2897 // stacks and SATB buffers.
tonyp@3416 2898 ShouldNotReachHere();
tonyp@3416 2899 }
tonyp@3416 2900
tonyp@3416 2901 virtual void do_object(oop obj) {
tonyp@3416 2902 do_object_work(obj);
tonyp@3416 2903 }
tonyp@3416 2904 };
tonyp@3416 2905
tonyp@3416 2906 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
tonyp@3416 2907 bool verify_enqueued_buffers,
tonyp@3416 2908 bool verify_thread_buffers,
tonyp@3416 2909 bool verify_fingers) {
tonyp@3416 2910 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3416 2911 if (!G1CollectedHeap::heap()->mark_in_progress()) {
tonyp@3416 2912 return;
tonyp@3416 2913 }
tonyp@3416 2914
tonyp@3416 2915 VerifyNoCSetOopsClosure cl;
tonyp@3416 2916
tonyp@3416 2917 if (verify_stacks) {
tonyp@3416 2918 // Verify entries on the global mark stack
tonyp@3416 2919 cl.set_phase(VerifyNoCSetOopsStack);
tonyp@3416 2920 _markStack.oops_do(&cl);
tonyp@3416 2921
tonyp@3416 2922 // Verify entries on the task queues
tonyp@3416 2923 for (int i = 0; i < (int) _max_task_num; i += 1) {
tonyp@3416 2924 cl.set_phase(VerifyNoCSetOopsQueues, i);
tonyp@3416 2925 OopTaskQueue* queue = _task_queues->queue(i);
tonyp@3416 2926 queue->oops_do(&cl);
tonyp@3416 2927 }
tonyp@3416 2928 }
tonyp@3416 2929
tonyp@3416 2930 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@3416 2931
tonyp@3416 2932 // Verify entries on the enqueued SATB buffers
tonyp@3416 2933 if (verify_enqueued_buffers) {
tonyp@3416 2934 cl.set_phase(VerifyNoCSetOopsSATBCompleted);
tonyp@3416 2935 satb_qs.iterate_completed_buffers_read_only(&cl);
tonyp@3416 2936 }
tonyp@3416 2937
tonyp@3416 2938 // Verify entries on the per-thread SATB buffers
tonyp@3416 2939 if (verify_thread_buffers) {
tonyp@3416 2940 cl.set_phase(VerifyNoCSetOopsSATBThread);
tonyp@3416 2941 satb_qs.iterate_thread_buffers_read_only(&cl);
tonyp@3416 2942 }
tonyp@3416 2943
tonyp@3416 2944 if (verify_fingers) {
tonyp@3416 2945 // Verify the global finger
tonyp@3416 2946 HeapWord* global_finger = finger();
tonyp@3416 2947 if (global_finger != NULL && global_finger < _heap_end) {
tonyp@3416 2948 // The global finger always points to a heap region boundary. We
tonyp@3416 2949 // use heap_region_containing_raw() to get the containing region
tonyp@3416 2950 // given that the global finger could be pointing to a free region
tonyp@3416 2951 // which subsequently becomes continues humongous. If that
tonyp@3416 2952 // happens, heap_region_containing() will return the bottom of the
tonyp@3416 2953 // corresponding starts humongous region and the check below will
tonyp@3416 2954 // not hold any more.
tonyp@3416 2955 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
tonyp@3416 2956 guarantee(global_finger == global_hr->bottom(),
tonyp@3416 2957 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
tonyp@3416 2958 global_finger, HR_FORMAT_PARAMS(global_hr)));
tonyp@3416 2959 }
tonyp@3416 2960
tonyp@3416 2961 // Verify the task fingers
tonyp@3416 2962 assert(parallel_marking_threads() <= _max_task_num, "sanity");
tonyp@3416 2963 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
tonyp@3416 2964 CMTask* task = _tasks[i];
tonyp@3416 2965 HeapWord* task_finger = task->finger();
tonyp@3416 2966 if (task_finger != NULL && task_finger < _heap_end) {
tonyp@3416 2967 // See above note on the global finger verification.
tonyp@3416 2968 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
tonyp@3416 2969 guarantee(task_finger == task_hr->bottom() ||
tonyp@3416 2970 !task_hr->in_collection_set(),
tonyp@3416 2971 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
tonyp@3416 2972 task_finger, HR_FORMAT_PARAMS(task_hr)));
tonyp@3416 2973 }
tonyp@3416 2974 }
tonyp@3416 2975 }
ysr@777 2976 }
tonyp@3416 2977 #endif // PRODUCT
ysr@777 2978
tonyp@2848 2979 void ConcurrentMark::clear_marking_state(bool clear_overflow) {
ysr@777 2980 _markStack.setEmpty();
ysr@777 2981 _markStack.clear_overflow();
ysr@777 2982 _regionStack.setEmpty();
ysr@777 2983 _regionStack.clear_overflow();
tonyp@2848 2984 if (clear_overflow) {
tonyp@2848 2985 clear_has_overflown();
tonyp@2848 2986 } else {
tonyp@2848 2987 assert(has_overflown(), "pre-condition");
tonyp@2848 2988 }
ysr@777 2989 _finger = _heap_start;
ysr@777 2990
ysr@777 2991 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 2992 OopTaskQueue* queue = _task_queues->queue(i);
ysr@777 2993 queue->set_empty();
johnc@2240 2994 // Clear any partial regions from the CMTasks
johnc@2240 2995 _tasks[i]->clear_aborted_region();
ysr@777 2996 }
ysr@777 2997 }
ysr@777 2998
ysr@777 2999 void ConcurrentMark::print_stats() {
ysr@777 3000 if (verbose_stats()) {
ysr@777 3001 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3002 for (size_t i = 0; i < _active_tasks; ++i) {
ysr@777 3003 _tasks[i]->print_stats();
ysr@777 3004 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 3005 }
ysr@777 3006 }
ysr@777 3007 }
ysr@777 3008
johnc@3296 3009 // Closures used by ConcurrentMark::complete_marking_in_collection_set().
johnc@3296 3010
johnc@3296 3011 class CSetMarkOopClosure: public OopClosure {
johnc@3296 3012 friend class CSetMarkBitMapClosure;
ysr@777 3013
ysr@777 3014 G1CollectedHeap* _g1h;
ysr@777 3015 CMBitMap* _bm;
ysr@777 3016 ConcurrentMark* _cm;
ysr@777 3017 oop* _ms;
ysr@777 3018 jint* _array_ind_stack;
ysr@777 3019 int _ms_size;
ysr@777 3020 int _ms_ind;
ysr@777 3021 int _array_increment;
jmasa@3357 3022 uint _worker_id;
ysr@777 3023
ysr@777 3024 bool push(oop obj, int arr_ind = 0) {
ysr@777 3025 if (_ms_ind == _ms_size) {
ysr@777 3026 gclog_or_tty->print_cr("Mark stack is full.");
ysr@777 3027 return false;
ysr@777 3028 }
ysr@777 3029 _ms[_ms_ind] = obj;
tonyp@2973 3030 if (obj->is_objArray()) {
tonyp@2973 3031 _array_ind_stack[_ms_ind] = arr_ind;
tonyp@2973 3032 }
ysr@777 3033 _ms_ind++;
ysr@777 3034 return true;
ysr@777 3035 }
ysr@777 3036
ysr@777 3037 oop pop() {
tonyp@2973 3038 if (_ms_ind == 0) {
tonyp@2973 3039 return NULL;
tonyp@2973 3040 } else {
ysr@777 3041 _ms_ind--;
ysr@777 3042 return _ms[_ms_ind];
ysr@777 3043 }
ysr@777 3044 }
ysr@777 3045
ysr@1280 3046 template <class T> bool drain() {
ysr@777 3047 while (_ms_ind > 0) {
ysr@777 3048 oop obj = pop();
ysr@777 3049 assert(obj != NULL, "Since index was non-zero.");
ysr@777 3050 if (obj->is_objArray()) {
ysr@777 3051 jint arr_ind = _array_ind_stack[_ms_ind];
ysr@777 3052 objArrayOop aobj = objArrayOop(obj);
ysr@777 3053 jint len = aobj->length();
ysr@777 3054 jint next_arr_ind = arr_ind + _array_increment;
ysr@777 3055 if (next_arr_ind < len) {
ysr@777 3056 push(obj, next_arr_ind);
ysr@777 3057 }
ysr@777 3058 // Now process this portion of this one.
ysr@777 3059 int lim = MIN2(next_arr_ind, len);
ysr@777 3060 for (int j = arr_ind; j < lim; j++) {
apetrusenko@1347 3061 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j));
ysr@777 3062 }
ysr@777 3063 } else {
ysr@777 3064 obj->oop_iterate(this);
ysr@777 3065 }
ysr@777 3066 if (abort()) return false;
ysr@777 3067 }
ysr@777 3068 return true;
ysr@777 3069 }
ysr@777 3070
ysr@777 3071 public:
jmasa@3357 3072 CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
ysr@777 3073 _g1h(G1CollectedHeap::heap()),
ysr@777 3074 _cm(cm),
ysr@777 3075 _bm(cm->nextMarkBitMap()),
ysr@777 3076 _ms_size(ms_size), _ms_ind(0),
ysr@777 3077 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
ysr@777 3078 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
johnc@3296 3079 _array_increment(MAX2(ms_size/8, 16)),
jmasa@3357 3080 _worker_id(worker_id) { }
johnc@3296 3081
johnc@3296 3082 ~CSetMarkOopClosure() {
ysr@777 3083 FREE_C_HEAP_ARRAY(oop, _ms);
ysr@777 3084 FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
ysr@777 3085 }
ysr@777 3086
ysr@1280 3087 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 3088 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 3089
ysr@1280 3090 template <class T> void do_oop_work(T* p) {
ysr@1280 3091 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 3092 if (oopDesc::is_null(heap_oop)) return;
ysr@1280 3093 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@777 3094 if (obj->is_forwarded()) {
ysr@777 3095 // If the object has already been forwarded, we have to make sure
ysr@777 3096 // that it's marked. So follow the forwarding pointer. Note that
ysr@777 3097 // this does the right thing for self-forwarding pointers in the
ysr@777 3098 // evacuation failure case.
ysr@777 3099 obj = obj->forwardee();
ysr@777 3100 }
ysr@777 3101 HeapRegion* hr = _g1h->heap_region_containing(obj);
ysr@777 3102 if (hr != NULL) {
ysr@777 3103 if (hr->in_collection_set()) {
ysr@777 3104 if (_g1h->is_obj_ill(obj)) {
johnc@3296 3105 if (_bm->parMark((HeapWord*)obj)) {
johnc@3296 3106 if (!push(obj)) {
johnc@3296 3107 gclog_or_tty->print_cr("Setting abort in CSetMarkOopClosure because push failed.");
johnc@3296 3108 set_abort();
johnc@3296 3109 }
ysr@777 3110 }
ysr@777 3111 }
ysr@777 3112 } else {
ysr@777 3113 // Outside the collection set; we need to gray it
ysr@777 3114 _cm->deal_with_reference(obj);
ysr@777 3115 }
ysr@777 3116 }
ysr@777 3117 }
ysr@777 3118 };
ysr@777 3119
johnc@3296 3120 class CSetMarkBitMapClosure: public BitMapClosure {
johnc@3296 3121 G1CollectedHeap* _g1h;
johnc@3296 3122 CMBitMap* _bitMap;
johnc@3296 3123 ConcurrentMark* _cm;
johnc@3296 3124 CSetMarkOopClosure _oop_cl;
jmasa@3357 3125 uint _worker_id;
johnc@3296 3126
ysr@777 3127 public:
jmasa@3357 3128 CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
ysr@777 3129 _g1h(G1CollectedHeap::heap()),
ysr@777 3130 _bitMap(cm->nextMarkBitMap()),
jmasa@3357 3131 _oop_cl(cm, ms_size, worker_id),
jmasa@3357 3132 _worker_id(worker_id) { }
ysr@777 3133
ysr@777 3134 bool do_bit(size_t offset) {
ysr@777 3135 // convert offset into a HeapWord*
ysr@777 3136 HeapWord* addr = _bitMap->offsetToHeapWord(offset);
ysr@777 3137 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
ysr@777 3138 "address out of range");
ysr@777 3139 assert(_bitMap->isMarked(addr), "tautology");
ysr@777 3140 oop obj = oop(addr);
ysr@777 3141 if (!obj->is_forwarded()) {
ysr@777 3142 if (!_oop_cl.push(obj)) return false;
ysr@1280 3143 if (UseCompressedOops) {
ysr@1280 3144 if (!_oop_cl.drain<narrowOop>()) return false;
ysr@1280 3145 } else {
ysr@1280 3146 if (!_oop_cl.drain<oop>()) return false;
ysr@1280 3147 }
ysr@777 3148 }
ysr@777 3149 // Otherwise...
ysr@777 3150 return true;
ysr@777 3151 }
ysr@777 3152 };
ysr@777 3153
johnc@3296 3154 class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
johnc@3296 3155 CMBitMap* _bm;
johnc@3296 3156 CSetMarkBitMapClosure _bit_cl;
jmasa@3357 3157 uint _worker_id;
johnc@3296 3158
ysr@777 3159 enum SomePrivateConstants {
ysr@777 3160 MSSize = 1000
ysr@777 3161 };
johnc@3296 3162
ysr@777 3163 public:
jmasa@3357 3164 CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
ysr@777 3165 _bm(cm->nextMarkBitMap()),
jmasa@3357 3166 _bit_cl(cm, MSSize, worker_id),
jmasa@3357 3167 _worker_id(worker_id) { }
johnc@3296 3168
johnc@3296 3169 bool doHeapRegion(HeapRegion* hr) {
johnc@3296 3170 if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
johnc@3296 3171 // The current worker has successfully claimed the region.
johnc@3296 3172 if (!hr->evacuation_failed()) {
johnc@3296 3173 MemRegion mr = MemRegion(hr->bottom(), hr->next_top_at_mark_start());
johnc@3296 3174 if (!mr.is_empty()) {
johnc@3296 3175 bool done = false;
johnc@3296 3176 while (!done) {
johnc@3296 3177 done = _bm->iterate(&_bit_cl, mr);
johnc@3296 3178 }
ysr@777 3179 }
ysr@777 3180 }
ysr@777 3181 }
ysr@777 3182 return false;
ysr@777 3183 }
ysr@777 3184 };
ysr@777 3185
johnc@3296 3186 class G1ParCompleteMarkInCSetTask: public AbstractGangTask {
johnc@3296 3187 protected:
johnc@3296 3188 G1CollectedHeap* _g1h;
johnc@3296 3189 ConcurrentMark* _cm;
johnc@3296 3190
johnc@3296 3191 public:
johnc@3296 3192 G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h,
johnc@3296 3193 ConcurrentMark* cm) :
johnc@3296 3194 AbstractGangTask("Complete Mark in CSet"),
johnc@3296 3195 _g1h(g1h), _cm(cm) { }
johnc@3296 3196
jmasa@3357 3197 void work(uint worker_id) {
jmasa@3357 3198 CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
jmasa@3357 3199 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
johnc@3296 3200 _g1h->collection_set_iterate_from(hr, &cmplt);
johnc@3296 3201 }
johnc@3296 3202 };
johnc@3296 3203
ysr@777 3204 void ConcurrentMark::complete_marking_in_collection_set() {
tonyp@3416 3205 guarantee(false, "complete_marking_in_collection_set(): "
tonyp@3416 3206 "don't call this any more");
tonyp@3416 3207
ysr@777 3208 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 3209
ysr@777 3210 if (!g1h->mark_in_progress()) {
ysr@777 3211 g1h->g1_policy()->record_mark_closure_time(0.0);
ysr@777 3212 return;
ysr@777 3213 }
ysr@777 3214
ysr@777 3215 double start = os::elapsedTime();
johnc@3296 3216 G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
johnc@3296 3217
johnc@3296 3218 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
johnc@3296 3219
johnc@3296 3220 if (G1CollectedHeap::use_parallel_gc_threads()) {
johnc@3338 3221 int n_workers = g1h->workers()->active_workers();
johnc@3296 3222 g1h->set_par_threads(n_workers);
johnc@3296 3223 g1h->workers()->run_task(&complete_mark_task);
johnc@3296 3224 g1h->set_par_threads(0);
johnc@3296 3225 } else {
johnc@3296 3226 complete_mark_task.work(0);
ysr@777 3227 }
johnc@3292 3228
johnc@3296 3229 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
johnc@3296 3230
johnc@3412 3231 // Reset the claim values in the regions in the collection set.
johnc@3412 3232 g1h->reset_cset_heap_region_claim_values();
johnc@3296 3233
johnc@3296 3234 assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
johnc@3292 3235
ysr@777 3236 double end_time = os::elapsedTime();
ysr@777 3237 double elapsed_time_ms = (end_time - start) * 1000.0;
ysr@777 3238 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
ysr@777 3239 }
ysr@777 3240
ysr@777 3241 // The next two methods deal with the following optimisation. Some
ysr@777 3242 // objects are gray by being marked and located above the finger. If
ysr@777 3243 // they are copied, during an evacuation pause, below the finger then
ysr@777 3244 // the need to be pushed on the stack. The observation is that, if
ysr@777 3245 // there are no regions in the collection set located above the
ysr@777 3246 // finger, then the above cannot happen, hence we do not need to
ysr@777 3247 // explicitly gray any objects when copying them to below the
ysr@777 3248 // finger. The global stack will be scanned to ensure that, if it
ysr@777 3249 // points to objects being copied, it will update their
ysr@777 3250 // location. There is a tricky situation with the gray objects in
ysr@777 3251 // region stack that are being coped, however. See the comment in
ysr@777 3252 // newCSet().
ysr@777 3253
ysr@777 3254 void ConcurrentMark::newCSet() {
tonyp@3416 3255 guarantee(false, "newCSet(): don't call this any more");
tonyp@3416 3256
tonyp@2973 3257 if (!concurrent_marking_in_progress()) {
ysr@777 3258 // nothing to do if marking is not in progress
ysr@777 3259 return;
tonyp@2973 3260 }
ysr@777 3261
ysr@777 3262 // find what the lowest finger is among the global and local fingers
ysr@777 3263 _min_finger = _finger;
ysr@777 3264 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 3265 CMTask* task = _tasks[i];
ysr@777 3266 HeapWord* task_finger = task->finger();
tonyp@2973 3267 if (task_finger != NULL && task_finger < _min_finger) {
ysr@777 3268 _min_finger = task_finger;
tonyp@2973 3269 }
ysr@777 3270 }
ysr@777 3271
ysr@777 3272 _should_gray_objects = false;
ysr@777 3273
ysr@777 3274 // This fixes a very subtle and fustrating bug. It might be the case
ysr@777 3275 // that, during en evacuation pause, heap regions that contain
ysr@777 3276 // objects that are gray (by being in regions contained in the
ysr@777 3277 // region stack) are included in the collection set. Since such gray
ysr@777 3278 // objects will be moved, and because it's not easy to redirect
ysr@777 3279 // region stack entries to point to a new location (because objects
ysr@777 3280 // in one region might be scattered to multiple regions after they
ysr@777 3281 // are copied), one option is to ensure that all marked objects
ysr@777 3282 // copied during a pause are pushed on the stack. Notice, however,
ysr@777 3283 // that this problem can only happen when the region stack is not
ysr@777 3284 // empty during an evacuation pause. So, we make the fix a bit less
ysr@777 3285 // conservative and ensure that regions are pushed on the stack,
ysr@777 3286 // irrespective whether all collection set regions are below the
ysr@777 3287 // finger, if the region stack is not empty. This is expected to be
ysr@777 3288 // a rare case, so I don't think it's necessary to be smarted about it.
tonyp@2973 3289 if (!region_stack_empty() || has_aborted_regions()) {
ysr@777 3290 _should_gray_objects = true;
tonyp@2973 3291 }
ysr@777 3292 }
ysr@777 3293
ysr@777 3294 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
tonyp@3416 3295 guarantee(false, "registerCSetRegion(): don't call this any more");
tonyp@3416 3296
tonyp@2973 3297 if (!concurrent_marking_in_progress()) return;
ysr@777 3298
ysr@777 3299 HeapWord* region_end = hr->end();
tonyp@2973 3300 if (region_end > _min_finger) {
ysr@777 3301 _should_gray_objects = true;
tonyp@2973 3302 }
ysr@777 3303 }
ysr@777 3304
johnc@2910 3305 // Resets the region fields of active CMTasks whose values point
johnc@2910 3306 // into the collection set.
johnc@2910 3307 void ConcurrentMark::reset_active_task_region_fields_in_cset() {
tonyp@3416 3308 guarantee(false, "reset_active_task_region_fields_in_cset(): "
tonyp@3416 3309 "don't call this any more");
tonyp@3416 3310
johnc@2910 3311 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
johnc@2910 3312 assert(parallel_marking_threads() <= _max_task_num, "sanity");
johnc@2910 3313
johnc@2910 3314 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
johnc@2910 3315 CMTask* task = _tasks[i];
johnc@2910 3316 HeapWord* task_finger = task->finger();
johnc@2910 3317 if (task_finger != NULL) {
johnc@2910 3318 assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
johnc@2910 3319 HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
johnc@2910 3320 if (finger_region->in_collection_set()) {
johnc@2910 3321 // The task's current region is in the collection set.
johnc@2910 3322 // This region will be evacuated in the current GC and
johnc@2910 3323 // the region fields in the task will be stale.
johnc@2910 3324 task->giveup_current_region();
johnc@2910 3325 }
johnc@2910 3326 }
johnc@2910 3327 }
johnc@2910 3328 }
johnc@2910 3329
ysr@777 3330 // abandon current marking iteration due to a Full GC
ysr@777 3331 void ConcurrentMark::abort() {
ysr@777 3332 // Clear all marks to force marking thread to do nothing
ysr@777 3333 _nextMarkBitMap->clearAll();
ysr@777 3334 // Empty mark stack
ysr@777 3335 clear_marking_state();
johnc@2190 3336 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 3337 _tasks[i]->clear_region_fields();
johnc@2190 3338 }
ysr@777 3339 _has_aborted = true;
ysr@777 3340
ysr@777 3341 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3342 satb_mq_set.abandon_partial_marking();
tonyp@1752 3343 // This can be called either during or outside marking, we'll read
tonyp@1752 3344 // the expected_active value from the SATB queue set.
tonyp@1752 3345 satb_mq_set.set_active_all_threads(
tonyp@1752 3346 false, /* new active value */
tonyp@1752 3347 satb_mq_set.is_active() /* expected_active */);
ysr@777 3348 }
ysr@777 3349
ysr@777 3350 static void print_ms_time_info(const char* prefix, const char* name,
ysr@777 3351 NumberSeq& ns) {
ysr@777 3352 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3353 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
ysr@777 3354 if (ns.num() > 0) {
ysr@777 3355 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 3356 prefix, ns.sd(), ns.maximum());
ysr@777 3357 }
ysr@777 3358 }
ysr@777 3359
ysr@777 3360 void ConcurrentMark::print_summary_info() {
ysr@777 3361 gclog_or_tty->print_cr(" Concurrent marking:");
ysr@777 3362 print_ms_time_info(" ", "init marks", _init_times);
ysr@777 3363 print_ms_time_info(" ", "remarks", _remark_times);
ysr@777 3364 {
ysr@777 3365 print_ms_time_info(" ", "final marks", _remark_mark_times);
ysr@777 3366 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
ysr@777 3367
ysr@777 3368 }
ysr@777 3369 print_ms_time_info(" ", "cleanups", _cleanup_times);
ysr@777 3370 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3371 _total_counting_time,
ysr@777 3372 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
ysr@777 3373 (double)_cleanup_times.num()
ysr@777 3374 : 0.0));
ysr@777 3375 if (G1ScrubRemSets) {
ysr@777 3376 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3377 _total_rs_scrub_time,
ysr@777 3378 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
ysr@777 3379 (double)_cleanup_times.num()
ysr@777 3380 : 0.0));
ysr@777 3381 }
ysr@777 3382 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
ysr@777 3383 (_init_times.sum() + _remark_times.sum() +
ysr@777 3384 _cleanup_times.sum())/1000.0);
ysr@777 3385 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
ysr@777 3386 "(%8.2f s marking, %8.2f s counting).",
ysr@777 3387 cmThread()->vtime_accum(),
ysr@777 3388 cmThread()->vtime_mark_accum(),
ysr@777 3389 cmThread()->vtime_count_accum());
ysr@777 3390 }
ysr@777 3391
tonyp@1454 3392 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
tonyp@1454 3393 _parallel_workers->print_worker_threads_on(st);
tonyp@1454 3394 }
tonyp@1454 3395
ysr@777 3396 // We take a break if someone is trying to stop the world.
jmasa@3357 3397 bool ConcurrentMark::do_yield_check(uint worker_id) {
ysr@777 3398 if (should_yield()) {
jmasa@3357 3399 if (worker_id == 0) {
ysr@777 3400 _g1h->g1_policy()->record_concurrent_pause();
tonyp@2973 3401 }
ysr@777 3402 cmThread()->yield();
jmasa@3357 3403 if (worker_id == 0) {
ysr@777 3404 _g1h->g1_policy()->record_concurrent_pause_end();
tonyp@2973 3405 }
ysr@777 3406 return true;
ysr@777 3407 } else {
ysr@777 3408 return false;
ysr@777 3409 }
ysr@777 3410 }
ysr@777 3411
ysr@777 3412 bool ConcurrentMark::should_yield() {
ysr@777 3413 return cmThread()->should_yield();
ysr@777 3414 }
ysr@777 3415
ysr@777 3416 bool ConcurrentMark::containing_card_is_marked(void* p) {
ysr@777 3417 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
ysr@777 3418 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
ysr@777 3419 }
ysr@777 3420
ysr@777 3421 bool ConcurrentMark::containing_cards_are_marked(void* start,
ysr@777 3422 void* last) {
tonyp@2973 3423 return containing_card_is_marked(start) &&
tonyp@2973 3424 containing_card_is_marked(last);
ysr@777 3425 }
ysr@777 3426
ysr@777 3427 #ifndef PRODUCT
ysr@777 3428 // for debugging purposes
ysr@777 3429 void ConcurrentMark::print_finger() {
ysr@777 3430 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
ysr@777 3431 _heap_start, _heap_end, _finger);
ysr@777 3432 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 3433 gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger());
ysr@777 3434 }
ysr@777 3435 gclog_or_tty->print_cr("");
ysr@777 3436 }
ysr@777 3437 #endif
ysr@777 3438
tonyp@2968 3439 void CMTask::scan_object(oop obj) {
tonyp@2968 3440 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
tonyp@2968 3441
tonyp@2968 3442 if (_cm->verbose_high()) {
tonyp@2968 3443 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
tonyp@2968 3444 _task_id, (void*) obj);
tonyp@2968 3445 }
tonyp@2968 3446
tonyp@2968 3447 size_t obj_size = obj->size();
tonyp@2968 3448 _words_scanned += obj_size;
tonyp@2968 3449
tonyp@2968 3450 obj->oop_iterate(_cm_oop_closure);
tonyp@2968 3451 statsOnly( ++_objs_scanned );
tonyp@2968 3452 check_limits();
tonyp@2968 3453 }
tonyp@2968 3454
ysr@777 3455 // Closure for iteration over bitmaps
ysr@777 3456 class CMBitMapClosure : public BitMapClosure {
ysr@777 3457 private:
ysr@777 3458 // the bitmap that is being iterated over
ysr@777 3459 CMBitMap* _nextMarkBitMap;
ysr@777 3460 ConcurrentMark* _cm;
ysr@777 3461 CMTask* _task;
ysr@777 3462 // true if we're scanning a heap region claimed by the task (so that
ysr@777 3463 // we move the finger along), false if we're not, i.e. currently when
ysr@777 3464 // scanning a heap region popped from the region stack (so that we
ysr@777 3465 // do not move the task finger along; it'd be a mistake if we did so).
ysr@777 3466 bool _scanning_heap_region;
ysr@777 3467
ysr@777 3468 public:
ysr@777 3469 CMBitMapClosure(CMTask *task,
ysr@777 3470 ConcurrentMark* cm,
ysr@777 3471 CMBitMap* nextMarkBitMap)
ysr@777 3472 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
ysr@777 3473
ysr@777 3474 void set_scanning_heap_region(bool scanning_heap_region) {
ysr@777 3475 _scanning_heap_region = scanning_heap_region;
ysr@777 3476 }
ysr@777 3477
ysr@777 3478 bool do_bit(size_t offset) {
ysr@777 3479 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
tonyp@1458 3480 assert(_nextMarkBitMap->isMarked(addr), "invariant");
tonyp@1458 3481 assert( addr < _cm->finger(), "invariant");
ysr@777 3482
ysr@777 3483 if (_scanning_heap_region) {
ysr@777 3484 statsOnly( _task->increase_objs_found_on_bitmap() );
tonyp@1458 3485 assert(addr >= _task->finger(), "invariant");
ysr@777 3486 // We move that task's local finger along.
ysr@777 3487 _task->move_finger_to(addr);
ysr@777 3488 } else {
ysr@777 3489 // We move the task's region finger along.
ysr@777 3490 _task->move_region_finger_to(addr);
ysr@777 3491 }
ysr@777 3492
ysr@777 3493 _task->scan_object(oop(addr));
ysr@777 3494 // we only partially drain the local queue and global stack
ysr@777 3495 _task->drain_local_queue(true);
ysr@777 3496 _task->drain_global_stack(true);
ysr@777 3497
ysr@777 3498 // if the has_aborted flag has been raised, we need to bail out of
ysr@777 3499 // the iteration
ysr@777 3500 return !_task->has_aborted();
ysr@777 3501 }
ysr@777 3502 };
ysr@777 3503
ysr@777 3504 // Closure for iterating over objects, currently only used for
ysr@777 3505 // processing SATB buffers.
ysr@777 3506 class CMObjectClosure : public ObjectClosure {
ysr@777 3507 private:
ysr@777 3508 CMTask* _task;
ysr@777 3509
ysr@777 3510 public:
ysr@777 3511 void do_object(oop obj) {
ysr@777 3512 _task->deal_with_reference(obj);
ysr@777 3513 }
ysr@777 3514
ysr@777 3515 CMObjectClosure(CMTask* task) : _task(task) { }
ysr@777 3516 };
ysr@777 3517
tonyp@2968 3518 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
tonyp@2968 3519 ConcurrentMark* cm,
tonyp@2968 3520 CMTask* task)
tonyp@2968 3521 : _g1h(g1h), _cm(cm), _task(task) {
tonyp@2968 3522 assert(_ref_processor == NULL, "should be initialized to NULL");
tonyp@2968 3523
tonyp@2968 3524 if (G1UseConcMarkReferenceProcessing) {
johnc@3175 3525 _ref_processor = g1h->ref_processor_cm();
tonyp@2968 3526 assert(_ref_processor != NULL, "should not be NULL");
ysr@777 3527 }
tonyp@2968 3528 }
ysr@777 3529
ysr@777 3530 void CMTask::setup_for_region(HeapRegion* hr) {
tonyp@1458 3531 // Separated the asserts so that we know which one fires.
tonyp@1458 3532 assert(hr != NULL,
tonyp@1458 3533 "claim_region() should have filtered out continues humongous regions");
tonyp@1458 3534 assert(!hr->continuesHumongous(),
tonyp@1458 3535 "claim_region() should have filtered out continues humongous regions");
ysr@777 3536
tonyp@2973 3537 if (_cm->verbose_low()) {
ysr@777 3538 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
ysr@777 3539 _task_id, hr);
tonyp@2973 3540 }
ysr@777 3541
ysr@777 3542 _curr_region = hr;
ysr@777 3543 _finger = hr->bottom();
ysr@777 3544 update_region_limit();
ysr@777 3545 }
ysr@777 3546
ysr@777 3547 void CMTask::update_region_limit() {
ysr@777 3548 HeapRegion* hr = _curr_region;
ysr@777 3549 HeapWord* bottom = hr->bottom();
ysr@777 3550 HeapWord* limit = hr->next_top_at_mark_start();
ysr@777 3551
ysr@777 3552 if (limit == bottom) {
tonyp@2973 3553 if (_cm->verbose_low()) {
ysr@777 3554 gclog_or_tty->print_cr("[%d] found an empty region "
ysr@777 3555 "["PTR_FORMAT", "PTR_FORMAT")",
ysr@777 3556 _task_id, bottom, limit);
tonyp@2973 3557 }
ysr@777 3558 // The region was collected underneath our feet.
ysr@777 3559 // We set the finger to bottom to ensure that the bitmap
ysr@777 3560 // iteration that will follow this will not do anything.
ysr@777 3561 // (this is not a condition that holds when we set the region up,
ysr@777 3562 // as the region is not supposed to be empty in the first place)
ysr@777 3563 _finger = bottom;
ysr@777 3564 } else if (limit >= _region_limit) {
tonyp@1458 3565 assert(limit >= _finger, "peace of mind");
ysr@777 3566 } else {
tonyp@1458 3567 assert(limit < _region_limit, "only way to get here");
ysr@777 3568 // This can happen under some pretty unusual circumstances. An
ysr@777 3569 // evacuation pause empties the region underneath our feet (NTAMS
ysr@777 3570 // at bottom). We then do some allocation in the region (NTAMS
ysr@777 3571 // stays at bottom), followed by the region being used as a GC
ysr@777 3572 // alloc region (NTAMS will move to top() and the objects
ysr@777 3573 // originally below it will be grayed). All objects now marked in
ysr@777 3574 // the region are explicitly grayed, if below the global finger,
ysr@777 3575 // and we do not need in fact to scan anything else. So, we simply
ysr@777 3576 // set _finger to be limit to ensure that the bitmap iteration
ysr@777 3577 // doesn't do anything.
ysr@777 3578 _finger = limit;
ysr@777 3579 }
ysr@777 3580
ysr@777 3581 _region_limit = limit;
ysr@777 3582 }
ysr@777 3583
ysr@777 3584 void CMTask::giveup_current_region() {
tonyp@1458 3585 assert(_curr_region != NULL, "invariant");
tonyp@2973 3586 if (_cm->verbose_low()) {
ysr@777 3587 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
ysr@777 3588 _task_id, _curr_region);
tonyp@2973 3589 }
ysr@777 3590 clear_region_fields();
ysr@777 3591 }
ysr@777 3592
ysr@777 3593 void CMTask::clear_region_fields() {
ysr@777 3594 // Values for these three fields that indicate that we're not
ysr@777 3595 // holding on to a region.
ysr@777 3596 _curr_region = NULL;
ysr@777 3597 _finger = NULL;
ysr@777 3598 _region_limit = NULL;
ysr@777 3599
ysr@777 3600 _region_finger = NULL;
ysr@777 3601 }
ysr@777 3602
tonyp@2968 3603 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
tonyp@2968 3604 if (cm_oop_closure == NULL) {
tonyp@2968 3605 assert(_cm_oop_closure != NULL, "invariant");
tonyp@2968 3606 } else {
tonyp@2968 3607 assert(_cm_oop_closure == NULL, "invariant");
tonyp@2968 3608 }
tonyp@2968 3609 _cm_oop_closure = cm_oop_closure;
tonyp@2968 3610 }
tonyp@2968 3611
ysr@777 3612 void CMTask::reset(CMBitMap* nextMarkBitMap) {
tonyp@1458 3613 guarantee(nextMarkBitMap != NULL, "invariant");
ysr@777 3614
tonyp@2973 3615 if (_cm->verbose_low()) {
ysr@777 3616 gclog_or_tty->print_cr("[%d] resetting", _task_id);
tonyp@2973 3617 }
ysr@777 3618
ysr@777 3619 _nextMarkBitMap = nextMarkBitMap;
ysr@777 3620 clear_region_fields();
johnc@2240 3621 assert(_aborted_region.is_empty(), "should have been cleared");
ysr@777 3622
ysr@777 3623 _calls = 0;
ysr@777 3624 _elapsed_time_ms = 0.0;
ysr@777 3625 _termination_time_ms = 0.0;
ysr@777 3626 _termination_start_time_ms = 0.0;
ysr@777 3627
ysr@777 3628 #if _MARKING_STATS_
ysr@777 3629 _local_pushes = 0;
ysr@777 3630 _local_pops = 0;
ysr@777 3631 _local_max_size = 0;
ysr@777 3632 _objs_scanned = 0;
ysr@777 3633 _global_pushes = 0;
ysr@777 3634 _global_pops = 0;
ysr@777 3635 _global_max_size = 0;
ysr@777 3636 _global_transfers_to = 0;
ysr@777 3637 _global_transfers_from = 0;
ysr@777 3638 _region_stack_pops = 0;
ysr@777 3639 _regions_claimed = 0;
ysr@777 3640 _objs_found_on_bitmap = 0;
ysr@777 3641 _satb_buffers_processed = 0;
ysr@777 3642 _steal_attempts = 0;
ysr@777 3643 _steals = 0;
ysr@777 3644 _aborted = 0;
ysr@777 3645 _aborted_overflow = 0;
ysr@777 3646 _aborted_cm_aborted = 0;
ysr@777 3647 _aborted_yield = 0;
ysr@777 3648 _aborted_timed_out = 0;
ysr@777 3649 _aborted_satb = 0;
ysr@777 3650 _aborted_termination = 0;
ysr@777 3651 #endif // _MARKING_STATS_
ysr@777 3652 }
ysr@777 3653
ysr@777 3654 bool CMTask::should_exit_termination() {
ysr@777 3655 regular_clock_call();
ysr@777 3656 // This is called when we are in the termination protocol. We should
ysr@777 3657 // quit if, for some reason, this task wants to abort or the global
ysr@777 3658 // stack is not empty (this means that we can get work from it).
ysr@777 3659 return !_cm->mark_stack_empty() || has_aborted();
ysr@777 3660 }
ysr@777 3661
ysr@777 3662 void CMTask::reached_limit() {
tonyp@1458 3663 assert(_words_scanned >= _words_scanned_limit ||
tonyp@1458 3664 _refs_reached >= _refs_reached_limit ,
tonyp@1458 3665 "shouldn't have been called otherwise");
ysr@777 3666 regular_clock_call();
ysr@777 3667 }
ysr@777 3668
ysr@777 3669 void CMTask::regular_clock_call() {
tonyp@2973 3670 if (has_aborted()) return;
ysr@777 3671
ysr@777 3672 // First, we need to recalculate the words scanned and refs reached
ysr@777 3673 // limits for the next clock call.
ysr@777 3674 recalculate_limits();
ysr@777 3675
ysr@777 3676 // During the regular clock call we do the following
ysr@777 3677
ysr@777 3678 // (1) If an overflow has been flagged, then we abort.
ysr@777 3679 if (_cm->has_overflown()) {
ysr@777 3680 set_has_aborted();
ysr@777 3681 return;
ysr@777 3682 }
ysr@777 3683
ysr@777 3684 // If we are not concurrent (i.e. we're doing remark) we don't need
ysr@777 3685 // to check anything else. The other steps are only needed during
ysr@777 3686 // the concurrent marking phase.
tonyp@2973 3687 if (!concurrent()) return;
ysr@777 3688
ysr@777 3689 // (2) If marking has been aborted for Full GC, then we also abort.
ysr@777 3690 if (_cm->has_aborted()) {
ysr@777 3691 set_has_aborted();
ysr@777 3692 statsOnly( ++_aborted_cm_aborted );
ysr@777 3693 return;
ysr@777 3694 }
ysr@777 3695
ysr@777 3696 double curr_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 3697
ysr@777 3698 // (3) If marking stats are enabled, then we update the step history.
ysr@777 3699 #if _MARKING_STATS_
tonyp@2973 3700 if (_words_scanned >= _words_scanned_limit) {
ysr@777 3701 ++_clock_due_to_scanning;
tonyp@2973 3702 }
tonyp@2973 3703 if (_refs_reached >= _refs_reached_limit) {
ysr@777 3704 ++_clock_due_to_marking;
tonyp@2973 3705 }
ysr@777 3706
ysr@777 3707 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
ysr@777 3708 _interval_start_time_ms = curr_time_ms;
ysr@777 3709 _all_clock_intervals_ms.add(last_interval_ms);
ysr@777 3710
ysr@777 3711 if (_cm->verbose_medium()) {
tonyp@2973 3712 gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
tonyp@2973 3713 "scanned = %d%s, refs reached = %d%s",
tonyp@2973 3714 _task_id, last_interval_ms,
tonyp@2973 3715 _words_scanned,
tonyp@2973 3716 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
tonyp@2973 3717 _refs_reached,
tonyp@2973 3718 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
ysr@777 3719 }
ysr@777 3720 #endif // _MARKING_STATS_
ysr@777 3721
ysr@777 3722 // (4) We check whether we should yield. If we have to, then we abort.
ysr@777 3723 if (_cm->should_yield()) {
ysr@777 3724 // We should yield. To do this we abort the task. The caller is
ysr@777 3725 // responsible for yielding.
ysr@777 3726 set_has_aborted();
ysr@777 3727 statsOnly( ++_aborted_yield );
ysr@777 3728 return;
ysr@777 3729 }
ysr@777 3730
ysr@777 3731 // (5) We check whether we've reached our time quota. If we have,
ysr@777 3732 // then we abort.
ysr@777 3733 double elapsed_time_ms = curr_time_ms - _start_time_ms;
ysr@777 3734 if (elapsed_time_ms > _time_target_ms) {
ysr@777 3735 set_has_aborted();
johnc@2494 3736 _has_timed_out = true;
ysr@777 3737 statsOnly( ++_aborted_timed_out );
ysr@777 3738 return;
ysr@777 3739 }
ysr@777 3740
ysr@777 3741 // (6) Finally, we check whether there are enough completed STAB
ysr@777 3742 // buffers available for processing. If there are, we abort.
ysr@777 3743 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3744 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
tonyp@2973 3745 if (_cm->verbose_low()) {
ysr@777 3746 gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers",
ysr@777 3747 _task_id);
tonyp@2973 3748 }
ysr@777 3749 // we do need to process SATB buffers, we'll abort and restart
ysr@777 3750 // the marking task to do so
ysr@777 3751 set_has_aborted();
ysr@777 3752 statsOnly( ++_aborted_satb );
ysr@777 3753 return;
ysr@777 3754 }
ysr@777 3755 }
ysr@777 3756
ysr@777 3757 void CMTask::recalculate_limits() {
ysr@777 3758 _real_words_scanned_limit = _words_scanned + words_scanned_period;
ysr@777 3759 _words_scanned_limit = _real_words_scanned_limit;
ysr@777 3760
ysr@777 3761 _real_refs_reached_limit = _refs_reached + refs_reached_period;
ysr@777 3762 _refs_reached_limit = _real_refs_reached_limit;
ysr@777 3763 }
ysr@777 3764
ysr@777 3765 void CMTask::decrease_limits() {
ysr@777 3766 // This is called when we believe that we're going to do an infrequent
ysr@777 3767 // operation which will increase the per byte scanned cost (i.e. move
ysr@777 3768 // entries to/from the global stack). It basically tries to decrease the
ysr@777 3769 // scanning limit so that the clock is called earlier.
ysr@777 3770
tonyp@2973 3771 if (_cm->verbose_medium()) {
ysr@777 3772 gclog_or_tty->print_cr("[%d] decreasing limits", _task_id);
tonyp@2973 3773 }
ysr@777 3774
ysr@777 3775 _words_scanned_limit = _real_words_scanned_limit -
ysr@777 3776 3 * words_scanned_period / 4;
ysr@777 3777 _refs_reached_limit = _real_refs_reached_limit -
ysr@777 3778 3 * refs_reached_period / 4;
ysr@777 3779 }
ysr@777 3780
ysr@777 3781 void CMTask::move_entries_to_global_stack() {
ysr@777 3782 // local array where we'll store the entries that will be popped
ysr@777 3783 // from the local queue
ysr@777 3784 oop buffer[global_stack_transfer_size];
ysr@777 3785
ysr@777 3786 int n = 0;
ysr@777 3787 oop obj;
ysr@777 3788 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
ysr@777 3789 buffer[n] = obj;
ysr@777 3790 ++n;
ysr@777 3791 }
ysr@777 3792
ysr@777 3793 if (n > 0) {
ysr@777 3794 // we popped at least one entry from the local queue
ysr@777 3795
ysr@777 3796 statsOnly( ++_global_transfers_to; _local_pops += n );
ysr@777 3797
ysr@777 3798 if (!_cm->mark_stack_push(buffer, n)) {
tonyp@2973 3799 if (_cm->verbose_low()) {
tonyp@2973 3800 gclog_or_tty->print_cr("[%d] aborting due to global stack overflow",
tonyp@2973 3801 _task_id);
tonyp@2973 3802 }
ysr@777 3803 set_has_aborted();
ysr@777 3804 } else {
ysr@777 3805 // the transfer was successful
ysr@777 3806
tonyp@2973 3807 if (_cm->verbose_medium()) {
ysr@777 3808 gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack",
ysr@777 3809 _task_id, n);
tonyp@2973 3810 }
ysr@777 3811 statsOnly( int tmp_size = _cm->mark_stack_size();
tonyp@2973 3812 if (tmp_size > _global_max_size) {
ysr@777 3813 _global_max_size = tmp_size;
tonyp@2973 3814 }
ysr@777 3815 _global_pushes += n );
ysr@777 3816 }
ysr@777 3817 }
ysr@777 3818
ysr@777 3819 // this operation was quite expensive, so decrease the limits
ysr@777 3820 decrease_limits();
ysr@777 3821 }
ysr@777 3822
ysr@777 3823 void CMTask::get_entries_from_global_stack() {
ysr@777 3824 // local array where we'll store the entries that will be popped
ysr@777 3825 // from the global stack.
ysr@777 3826 oop buffer[global_stack_transfer_size];
ysr@777 3827 int n;
ysr@777 3828 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
tonyp@1458 3829 assert(n <= global_stack_transfer_size,
tonyp@1458 3830 "we should not pop more than the given limit");
ysr@777 3831 if (n > 0) {
ysr@777 3832 // yes, we did actually pop at least one entry
ysr@777 3833
ysr@777 3834 statsOnly( ++_global_transfers_from; _global_pops += n );
tonyp@2973 3835 if (_cm->verbose_medium()) {
ysr@777 3836 gclog_or_tty->print_cr("[%d] popped %d entries from the global stack",
ysr@777 3837 _task_id, n);
tonyp@2973 3838 }
ysr@777 3839 for (int i = 0; i < n; ++i) {
ysr@777 3840 bool success = _task_queue->push(buffer[i]);
ysr@777 3841 // We only call this when the local queue is empty or under a
ysr@777 3842 // given target limit. So, we do not expect this push to fail.
tonyp@1458 3843 assert(success, "invariant");
ysr@777 3844 }
ysr@777 3845
ysr@777 3846 statsOnly( int tmp_size = _task_queue->size();
tonyp@2973 3847 if (tmp_size > _local_max_size) {
ysr@777 3848 _local_max_size = tmp_size;
tonyp@2973 3849 }
ysr@777 3850 _local_pushes += n );
ysr@777 3851 }
ysr@777 3852
ysr@777 3853 // this operation was quite expensive, so decrease the limits
ysr@777 3854 decrease_limits();
ysr@777 3855 }
ysr@777 3856
ysr@777 3857 void CMTask::drain_local_queue(bool partially) {
tonyp@2973 3858 if (has_aborted()) return;
ysr@777 3859
ysr@777 3860 // Decide what the target size is, depending whether we're going to
ysr@777 3861 // drain it partially (so that other tasks can steal if they run out
ysr@777 3862 // of things to do) or totally (at the very end).
ysr@777 3863 size_t target_size;
tonyp@2973 3864 if (partially) {
ysr@777 3865 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
tonyp@2973 3866 } else {
ysr@777 3867 target_size = 0;
tonyp@2973 3868 }
ysr@777 3869
ysr@777 3870 if (_task_queue->size() > target_size) {
tonyp@2973 3871 if (_cm->verbose_high()) {
ysr@777 3872 gclog_or_tty->print_cr("[%d] draining local queue, target size = %d",
ysr@777 3873 _task_id, target_size);
tonyp@2973 3874 }
ysr@777 3875
ysr@777 3876 oop obj;
ysr@777 3877 bool ret = _task_queue->pop_local(obj);
ysr@777 3878 while (ret) {
ysr@777 3879 statsOnly( ++_local_pops );
ysr@777 3880
tonyp@2973 3881 if (_cm->verbose_high()) {
ysr@777 3882 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
ysr@777 3883 (void*) obj);
tonyp@2973 3884 }
ysr@777 3885
tonyp@1458 3886 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
tonyp@2643 3887 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3888 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
ysr@777 3889
ysr@777 3890 scan_object(obj);
ysr@777 3891
tonyp@2973 3892 if (_task_queue->size() <= target_size || has_aborted()) {
ysr@777 3893 ret = false;
tonyp@2973 3894 } else {
ysr@777 3895 ret = _task_queue->pop_local(obj);
tonyp@2973 3896 }
ysr@777 3897 }
ysr@777 3898
tonyp@2973 3899 if (_cm->verbose_high()) {
ysr@777 3900 gclog_or_tty->print_cr("[%d] drained local queue, size = %d",
ysr@777 3901 _task_id, _task_queue->size());
tonyp@2973 3902 }
ysr@777 3903 }
ysr@777 3904 }
ysr@777 3905
ysr@777 3906 void CMTask::drain_global_stack(bool partially) {
tonyp@2973 3907 if (has_aborted()) return;
ysr@777 3908
ysr@777 3909 // We have a policy to drain the local queue before we attempt to
ysr@777 3910 // drain the global stack.
tonyp@1458 3911 assert(partially || _task_queue->size() == 0, "invariant");
ysr@777 3912
ysr@777 3913 // Decide what the target size is, depending whether we're going to
ysr@777 3914 // drain it partially (so that other tasks can steal if they run out
ysr@777 3915 // of things to do) or totally (at the very end). Notice that,
ysr@777 3916 // because we move entries from the global stack in chunks or
ysr@777 3917 // because another task might be doing the same, we might in fact
ysr@777 3918 // drop below the target. But, this is not a problem.
ysr@777 3919 size_t target_size;
tonyp@2973 3920 if (partially) {
ysr@777 3921 target_size = _cm->partial_mark_stack_size_target();
tonyp@2973 3922 } else {
ysr@777 3923 target_size = 0;
tonyp@2973 3924 }
ysr@777 3925
ysr@777 3926 if (_cm->mark_stack_size() > target_size) {
tonyp@2973 3927 if (_cm->verbose_low()) {
ysr@777 3928 gclog_or_tty->print_cr("[%d] draining global_stack, target size %d",
ysr@777 3929 _task_id, target_size);
tonyp@2973 3930 }
ysr@777 3931
ysr@777 3932 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
ysr@777 3933 get_entries_from_global_stack();
ysr@777 3934 drain_local_queue(partially);
ysr@777 3935 }
ysr@777 3936
tonyp@2973 3937 if (_cm->verbose_low()) {
ysr@777 3938 gclog_or_tty->print_cr("[%d] drained global stack, size = %d",
ysr@777 3939 _task_id, _cm->mark_stack_size());
tonyp@2973 3940 }
ysr@777 3941 }
ysr@777 3942 }
ysr@777 3943
ysr@777 3944 // SATB Queue has several assumptions on whether to call the par or
ysr@777 3945 // non-par versions of the methods. this is why some of the code is
ysr@777 3946 // replicated. We should really get rid of the single-threaded version
ysr@777 3947 // of the code to simplify things.
ysr@777 3948 void CMTask::drain_satb_buffers() {
tonyp@2973 3949 if (has_aborted()) return;
ysr@777 3950
ysr@777 3951 // We set this so that the regular clock knows that we're in the
ysr@777 3952 // middle of draining buffers and doesn't set the abort flag when it
ysr@777 3953 // notices that SATB buffers are available for draining. It'd be
ysr@777 3954 // very counter productive if it did that. :-)
ysr@777 3955 _draining_satb_buffers = true;
ysr@777 3956
ysr@777 3957 CMObjectClosure oc(this);
ysr@777 3958 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@2973 3959 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3960 satb_mq_set.set_par_closure(_task_id, &oc);
tonyp@2973 3961 } else {
ysr@777 3962 satb_mq_set.set_closure(&oc);
tonyp@2973 3963 }
ysr@777 3964
ysr@777 3965 // This keeps claiming and applying the closure to completed buffers
ysr@777 3966 // until we run out of buffers or we need to abort.
jmasa@2188 3967 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3968 while (!has_aborted() &&
ysr@777 3969 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
tonyp@2973 3970 if (_cm->verbose_medium()) {
ysr@777 3971 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
tonyp@2973 3972 }
ysr@777 3973 statsOnly( ++_satb_buffers_processed );
ysr@777 3974 regular_clock_call();
ysr@777 3975 }
ysr@777 3976 } else {
ysr@777 3977 while (!has_aborted() &&
ysr@777 3978 satb_mq_set.apply_closure_to_completed_buffer()) {
tonyp@2973 3979 if (_cm->verbose_medium()) {
ysr@777 3980 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
tonyp@2973 3981 }
ysr@777 3982 statsOnly( ++_satb_buffers_processed );
ysr@777 3983 regular_clock_call();
ysr@777 3984 }
ysr@777 3985 }
ysr@777 3986
ysr@777 3987 if (!concurrent() && !has_aborted()) {
ysr@777 3988 // We should only do this during remark.
tonyp@2973 3989 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3990 satb_mq_set.par_iterate_closure_all_threads(_task_id);
tonyp@2973 3991 } else {
ysr@777 3992 satb_mq_set.iterate_closure_all_threads();
tonyp@2973 3993 }
ysr@777 3994 }
ysr@777 3995
ysr@777 3996 _draining_satb_buffers = false;
ysr@777 3997
tonyp@1458 3998 assert(has_aborted() ||
tonyp@1458 3999 concurrent() ||
tonyp@1458 4000 satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 4001
tonyp@2973 4002 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 4003 satb_mq_set.set_par_closure(_task_id, NULL);
tonyp@2973 4004 } else {
ysr@777 4005 satb_mq_set.set_closure(NULL);
tonyp@2973 4006 }
ysr@777 4007
ysr@777 4008 // again, this was a potentially expensive operation, decrease the
ysr@777 4009 // limits to get the regular clock call early
ysr@777 4010 decrease_limits();
ysr@777 4011 }
ysr@777 4012
ysr@777 4013 void CMTask::drain_region_stack(BitMapClosure* bc) {
tonyp@3416 4014 assert(_cm->region_stack_empty(), "region stack should be empty");
tonyp@3416 4015 assert(_aborted_region.is_empty(), "aborted region should be empty");
tonyp@3416 4016 return;
tonyp@3416 4017
tonyp@2973 4018 if (has_aborted()) return;
ysr@777 4019
tonyp@1458 4020 assert(_region_finger == NULL,
tonyp@1458 4021 "it should be NULL when we're not scanning a region");
ysr@777 4022
johnc@2190 4023 if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) {
tonyp@2973 4024 if (_cm->verbose_low()) {
ysr@777 4025 gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
ysr@777 4026 _task_id, _cm->region_stack_size());
tonyp@2973 4027 }
ysr@777 4028
johnc@2190 4029 MemRegion mr;
johnc@2190 4030
johnc@2190 4031 if (!_aborted_region.is_empty()) {
johnc@2190 4032 mr = _aborted_region;
johnc@2190 4033 _aborted_region = MemRegion();
johnc@2190 4034
tonyp@2973 4035 if (_cm->verbose_low()) {
tonyp@2973 4036 gclog_or_tty->print_cr("[%d] scanning aborted region "
tonyp@2973 4037 "[ " PTR_FORMAT ", " PTR_FORMAT " )",
tonyp@2973 4038 _task_id, mr.start(), mr.end());
tonyp@2973 4039 }
johnc@2190 4040 } else {
johnc@2190 4041 mr = _cm->region_stack_pop_lock_free();
johnc@2190 4042 // it returns MemRegion() if the pop fails
johnc@2190 4043 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
johnc@2190 4044 }
ysr@777 4045
ysr@777 4046 while (mr.start() != NULL) {
tonyp@2973 4047 if (_cm->verbose_medium()) {
ysr@777 4048 gclog_or_tty->print_cr("[%d] we are scanning region "
ysr@777 4049 "["PTR_FORMAT", "PTR_FORMAT")",
ysr@777 4050 _task_id, mr.start(), mr.end());
tonyp@2973 4051 }
johnc@2190 4052
tonyp@1458 4053 assert(mr.end() <= _cm->finger(),
tonyp@1458 4054 "otherwise the region shouldn't be on the stack");
ysr@777 4055 assert(!mr.is_empty(), "Only non-empty regions live on the region stack");
ysr@777 4056 if (_nextMarkBitMap->iterate(bc, mr)) {
tonyp@1458 4057 assert(!has_aborted(),
tonyp@1458 4058 "cannot abort the task without aborting the bitmap iteration");
ysr@777 4059
ysr@777 4060 // We finished iterating over the region without aborting.
ysr@777 4061 regular_clock_call();
tonyp@2973 4062 if (has_aborted()) {
ysr@777 4063 mr = MemRegion();
tonyp@2973 4064 } else {
johnc@2190 4065 mr = _cm->region_stack_pop_lock_free();
ysr@777 4066 // it returns MemRegion() if the pop fails
ysr@777 4067 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
ysr@777 4068 }
ysr@777 4069 } else {
tonyp@1458 4070 assert(has_aborted(), "currently the only way to do so");
ysr@777 4071
ysr@777 4072 // The only way to abort the bitmap iteration is to return
ysr@777 4073 // false from the do_bit() method. However, inside the
ysr@777 4074 // do_bit() method we move the _region_finger to point to the
ysr@777 4075 // object currently being looked at. So, if we bail out, we
ysr@777 4076 // have definitely set _region_finger to something non-null.
tonyp@1458 4077 assert(_region_finger != NULL, "invariant");
ysr@777 4078
johnc@2190 4079 // Make sure that any previously aborted region has been
johnc@2190 4080 // cleared.
johnc@2190 4081 assert(_aborted_region.is_empty(), "aborted region not cleared");
johnc@2190 4082
ysr@777 4083 // The iteration was actually aborted. So now _region_finger
ysr@777 4084 // points to the address of the object we last scanned. If we
ysr@777 4085 // leave it there, when we restart this task, we will rescan
ysr@777 4086 // the object. It is easy to avoid this. We move the finger by
ysr@777 4087 // enough to point to the next possible object header (the
ysr@777 4088 // bitmap knows by how much we need to move it as it knows its
ysr@777 4089 // granularity).
ysr@777 4090 MemRegion newRegion =
ysr@777 4091 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end());
ysr@777 4092
ysr@777 4093 if (!newRegion.is_empty()) {
ysr@777 4094 if (_cm->verbose_low()) {
johnc@2190 4095 gclog_or_tty->print_cr("[%d] recording unscanned region"
johnc@2190 4096 "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask",
ysr@777 4097 _task_id,
ysr@777 4098 newRegion.start(), newRegion.end());
ysr@777 4099 }
johnc@2190 4100 // Now record the part of the region we didn't scan to
johnc@2190 4101 // make sure this task scans it later.
johnc@2190 4102 _aborted_region = newRegion;
ysr@777 4103 }
ysr@777 4104 // break from while
ysr@777 4105 mr = MemRegion();
ysr@777 4106 }
ysr@777 4107 _region_finger = NULL;
ysr@777 4108 }
ysr@777 4109
tonyp@2973 4110 if (_cm->verbose_low()) {
ysr@777 4111 gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
ysr@777 4112 _task_id, _cm->region_stack_size());
tonyp@2973 4113 }
ysr@777 4114 }
ysr@777 4115 }
ysr@777 4116
ysr@777 4117 void CMTask::print_stats() {
ysr@777 4118 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d",
ysr@777 4119 _task_id, _calls);
ysr@777 4120 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
ysr@777 4121 _elapsed_time_ms, _termination_time_ms);
ysr@777 4122 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 4123 _step_times_ms.num(), _step_times_ms.avg(),
ysr@777 4124 _step_times_ms.sd());
ysr@777 4125 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 4126 _step_times_ms.maximum(), _step_times_ms.sum());
ysr@777 4127
ysr@777 4128 #if _MARKING_STATS_
ysr@777 4129 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 4130 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
ysr@777 4131 _all_clock_intervals_ms.sd());
ysr@777 4132 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 4133 _all_clock_intervals_ms.maximum(),
ysr@777 4134 _all_clock_intervals_ms.sum());
ysr@777 4135 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
ysr@777 4136 _clock_due_to_scanning, _clock_due_to_marking);
ysr@777 4137 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
ysr@777 4138 _objs_scanned, _objs_found_on_bitmap);
ysr@777 4139 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
ysr@777 4140 _local_pushes, _local_pops, _local_max_size);
ysr@777 4141 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
ysr@777 4142 _global_pushes, _global_pops, _global_max_size);
ysr@777 4143 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
ysr@777 4144 _global_transfers_to,_global_transfers_from);
ysr@777 4145 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d",
ysr@777 4146 _regions_claimed, _region_stack_pops);
ysr@777 4147 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
ysr@777 4148 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
ysr@777 4149 _steal_attempts, _steals);
ysr@777 4150 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
ysr@777 4151 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
ysr@777 4152 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
ysr@777 4153 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
ysr@777 4154 _aborted_timed_out, _aborted_satb, _aborted_termination);
ysr@777 4155 #endif // _MARKING_STATS_
ysr@777 4156 }
ysr@777 4157
ysr@777 4158 /*****************************************************************************
ysr@777 4159
ysr@777 4160 The do_marking_step(time_target_ms) method is the building block
ysr@777 4161 of the parallel marking framework. It can be called in parallel
ysr@777 4162 with other invocations of do_marking_step() on different tasks
ysr@777 4163 (but only one per task, obviously) and concurrently with the
ysr@777 4164 mutator threads, or during remark, hence it eliminates the need
ysr@777 4165 for two versions of the code. When called during remark, it will
ysr@777 4166 pick up from where the task left off during the concurrent marking
ysr@777 4167 phase. Interestingly, tasks are also claimable during evacuation
ysr@777 4168 pauses too, since do_marking_step() ensures that it aborts before
ysr@777 4169 it needs to yield.
ysr@777 4170
ysr@777 4171 The data structures that is uses to do marking work are the
ysr@777 4172 following:
ysr@777 4173
ysr@777 4174 (1) Marking Bitmap. If there are gray objects that appear only
ysr@777 4175 on the bitmap (this happens either when dealing with an overflow
ysr@777 4176 or when the initial marking phase has simply marked the roots
ysr@777 4177 and didn't push them on the stack), then tasks claim heap
ysr@777 4178 regions whose bitmap they then scan to find gray objects. A
ysr@777 4179 global finger indicates where the end of the last claimed region
ysr@777 4180 is. A local finger indicates how far into the region a task has
ysr@777 4181 scanned. The two fingers are used to determine how to gray an
ysr@777 4182 object (i.e. whether simply marking it is OK, as it will be
ysr@777 4183 visited by a task in the future, or whether it needs to be also
ysr@777 4184 pushed on a stack).
ysr@777 4185
ysr@777 4186 (2) Local Queue. The local queue of the task which is accessed
ysr@777 4187 reasonably efficiently by the task. Other tasks can steal from
ysr@777 4188 it when they run out of work. Throughout the marking phase, a
ysr@777 4189 task attempts to keep its local queue short but not totally
ysr@777 4190 empty, so that entries are available for stealing by other
ysr@777 4191 tasks. Only when there is no more work, a task will totally
ysr@777 4192 drain its local queue.
ysr@777 4193
ysr@777 4194 (3) Global Mark Stack. This handles local queue overflow. During
ysr@777 4195 marking only sets of entries are moved between it and the local
ysr@777 4196 queues, as access to it requires a mutex and more fine-grain
ysr@777 4197 interaction with it which might cause contention. If it
ysr@777 4198 overflows, then the marking phase should restart and iterate
ysr@777 4199 over the bitmap to identify gray objects. Throughout the marking
ysr@777 4200 phase, tasks attempt to keep the global mark stack at a small
ysr@777 4201 length but not totally empty, so that entries are available for
ysr@777 4202 popping by other tasks. Only when there is no more work, tasks
ysr@777 4203 will totally drain the global mark stack.
ysr@777 4204
ysr@777 4205 (4) Global Region Stack. Entries on it correspond to areas of
ysr@777 4206 the bitmap that need to be scanned since they contain gray
ysr@777 4207 objects. Pushes on the region stack only happen during
ysr@777 4208 evacuation pauses and typically correspond to areas covered by
ysr@777 4209 GC LABS. If it overflows, then the marking phase should restart
ysr@777 4210 and iterate over the bitmap to identify gray objects. Tasks will
ysr@777 4211 try to totally drain the region stack as soon as possible.
ysr@777 4212
ysr@777 4213 (5) SATB Buffer Queue. This is where completed SATB buffers are
ysr@777 4214 made available. Buffers are regularly removed from this queue
ysr@777 4215 and scanned for roots, so that the queue doesn't get too
ysr@777 4216 long. During remark, all completed buffers are processed, as
ysr@777 4217 well as the filled in parts of any uncompleted buffers.
ysr@777 4218
ysr@777 4219 The do_marking_step() method tries to abort when the time target
ysr@777 4220 has been reached. There are a few other cases when the
ysr@777 4221 do_marking_step() method also aborts:
ysr@777 4222
ysr@777 4223 (1) When the marking phase has been aborted (after a Full GC).
ysr@777 4224
ysr@777 4225 (2) When a global overflow (either on the global stack or the
ysr@777 4226 region stack) has been triggered. Before the task aborts, it
ysr@777 4227 will actually sync up with the other tasks to ensure that all
ysr@777 4228 the marking data structures (local queues, stacks, fingers etc.)
ysr@777 4229 are re-initialised so that when do_marking_step() completes,
ysr@777 4230 the marking phase can immediately restart.
ysr@777 4231
ysr@777 4232 (3) When enough completed SATB buffers are available. The
ysr@777 4233 do_marking_step() method only tries to drain SATB buffers right
ysr@777 4234 at the beginning. So, if enough buffers are available, the
ysr@777 4235 marking step aborts and the SATB buffers are processed at
ysr@777 4236 the beginning of the next invocation.
ysr@777 4237
ysr@777 4238 (4) To yield. when we have to yield then we abort and yield
ysr@777 4239 right at the end of do_marking_step(). This saves us from a lot
ysr@777 4240 of hassle as, by yielding we might allow a Full GC. If this
ysr@777 4241 happens then objects will be compacted underneath our feet, the
ysr@777 4242 heap might shrink, etc. We save checking for this by just
ysr@777 4243 aborting and doing the yield right at the end.
ysr@777 4244
ysr@777 4245 From the above it follows that the do_marking_step() method should
ysr@777 4246 be called in a loop (or, otherwise, regularly) until it completes.
ysr@777 4247
ysr@777 4248 If a marking step completes without its has_aborted() flag being
ysr@777 4249 true, it means it has completed the current marking phase (and
ysr@777 4250 also all other marking tasks have done so and have all synced up).
ysr@777 4251
ysr@777 4252 A method called regular_clock_call() is invoked "regularly" (in
ysr@777 4253 sub ms intervals) throughout marking. It is this clock method that
ysr@777 4254 checks all the abort conditions which were mentioned above and
ysr@777 4255 decides when the task should abort. A work-based scheme is used to
ysr@777 4256 trigger this clock method: when the number of object words the
ysr@777 4257 marking phase has scanned or the number of references the marking
ysr@777 4258 phase has visited reach a given limit. Additional invocations to
ysr@777 4259 the method clock have been planted in a few other strategic places
ysr@777 4260 too. The initial reason for the clock method was to avoid calling
ysr@777 4261 vtime too regularly, as it is quite expensive. So, once it was in
ysr@777 4262 place, it was natural to piggy-back all the other conditions on it
ysr@777 4263 too and not constantly check them throughout the code.
ysr@777 4264
ysr@777 4265 *****************************************************************************/
ysr@777 4266
johnc@2494 4267 void CMTask::do_marking_step(double time_target_ms,
johnc@2494 4268 bool do_stealing,
johnc@2494 4269 bool do_termination) {
tonyp@1458 4270 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
tonyp@1458 4271 assert(concurrent() == _cm->concurrent(), "they should be the same");
tonyp@1458 4272
tonyp@1458 4273 assert(concurrent() || _cm->region_stack_empty(),
tonyp@1458 4274 "the region stack should have been cleared before remark");
johnc@2190 4275 assert(concurrent() || !_cm->has_aborted_regions(),
johnc@2190 4276 "aborted regions should have been cleared before remark");
tonyp@1458 4277 assert(_region_finger == NULL,
tonyp@1458 4278 "this should be non-null only when a region is being scanned");
ysr@777 4279
ysr@777 4280 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
tonyp@1458 4281 assert(_task_queues != NULL, "invariant");
tonyp@1458 4282 assert(_task_queue != NULL, "invariant");
tonyp@1458 4283 assert(_task_queues->queue(_task_id) == _task_queue, "invariant");
tonyp@1458 4284
tonyp@1458 4285 assert(!_claimed,
tonyp@1458 4286 "only one thread should claim this task at any one time");
ysr@777 4287
ysr@777 4288 // OK, this doesn't safeguard again all possible scenarios, as it is
ysr@777 4289 // possible for two threads to set the _claimed flag at the same
ysr@777 4290 // time. But it is only for debugging purposes anyway and it will
ysr@777 4291 // catch most problems.
ysr@777 4292 _claimed = true;
ysr@777 4293
ysr@777 4294 _start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4295 statsOnly( _interval_start_time_ms = _start_time_ms );
ysr@777 4296
ysr@777 4297 double diff_prediction_ms =
ysr@777 4298 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
ysr@777 4299 _time_target_ms = time_target_ms - diff_prediction_ms;
ysr@777 4300
ysr@777 4301 // set up the variables that are used in the work-based scheme to
ysr@777 4302 // call the regular clock method
ysr@777 4303 _words_scanned = 0;
ysr@777 4304 _refs_reached = 0;
ysr@777 4305 recalculate_limits();
ysr@777 4306
ysr@777 4307 // clear all flags
ysr@777 4308 clear_has_aborted();
johnc@2494 4309 _has_timed_out = false;
ysr@777 4310 _draining_satb_buffers = false;
ysr@777 4311
ysr@777 4312 ++_calls;
ysr@777 4313
tonyp@2973 4314 if (_cm->verbose_low()) {
ysr@777 4315 gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, "
ysr@777 4316 "target = %1.2lfms >>>>>>>>>>",
ysr@777 4317 _task_id, _calls, _time_target_ms);
tonyp@2973 4318 }
ysr@777 4319
ysr@777 4320 // Set up the bitmap and oop closures. Anything that uses them is
ysr@777 4321 // eventually called from this method, so it is OK to allocate these
ysr@777 4322 // statically.
ysr@777 4323 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
tonyp@2968 4324 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
tonyp@2968 4325 set_cm_oop_closure(&cm_oop_closure);
ysr@777 4326
ysr@777 4327 if (_cm->has_overflown()) {
ysr@777 4328 // This can happen if the region stack or the mark stack overflows
ysr@777 4329 // during a GC pause and this task, after a yield point,
ysr@777 4330 // restarts. We have to abort as we need to get into the overflow
ysr@777 4331 // protocol which happens right at the end of this task.
ysr@777 4332 set_has_aborted();
ysr@777 4333 }
ysr@777 4334
ysr@777 4335 // First drain any available SATB buffers. After this, we will not
ysr@777 4336 // look at SATB buffers before the next invocation of this method.
ysr@777 4337 // If enough completed SATB buffers are queued up, the regular clock
ysr@777 4338 // will abort this task so that it restarts.
ysr@777 4339 drain_satb_buffers();
ysr@777 4340 // ...then partially drain the local queue and the global stack
ysr@777 4341 drain_local_queue(true);
ysr@777 4342 drain_global_stack(true);
ysr@777 4343
ysr@777 4344 // Then totally drain the region stack. We will not look at
ysr@777 4345 // it again before the next invocation of this method. Entries on
ysr@777 4346 // the region stack are only added during evacuation pauses, for
ysr@777 4347 // which we have to yield. When we do, we abort the task anyway so
ysr@777 4348 // it will look at the region stack again when it restarts.
ysr@777 4349 bitmap_closure.set_scanning_heap_region(false);
ysr@777 4350 drain_region_stack(&bitmap_closure);
ysr@777 4351 // ...then partially drain the local queue and the global stack
ysr@777 4352 drain_local_queue(true);
ysr@777 4353 drain_global_stack(true);
ysr@777 4354
ysr@777 4355 do {
ysr@777 4356 if (!has_aborted() && _curr_region != NULL) {
ysr@777 4357 // This means that we're already holding on to a region.
tonyp@1458 4358 assert(_finger != NULL, "if region is not NULL, then the finger "
tonyp@1458 4359 "should not be NULL either");
ysr@777 4360
ysr@777 4361 // We might have restarted this task after an evacuation pause
ysr@777 4362 // which might have evacuated the region we're holding on to
ysr@777 4363 // underneath our feet. Let's read its limit again to make sure
ysr@777 4364 // that we do not iterate over a region of the heap that
ysr@777 4365 // contains garbage (update_region_limit() will also move
ysr@777 4366 // _finger to the start of the region if it is found empty).
ysr@777 4367 update_region_limit();
ysr@777 4368 // We will start from _finger not from the start of the region,
ysr@777 4369 // as we might be restarting this task after aborting half-way
ysr@777 4370 // through scanning this region. In this case, _finger points to
ysr@777 4371 // the address where we last found a marked object. If this is a
ysr@777 4372 // fresh region, _finger points to start().
ysr@777 4373 MemRegion mr = MemRegion(_finger, _region_limit);
ysr@777 4374
tonyp@2973 4375 if (_cm->verbose_low()) {
ysr@777 4376 gclog_or_tty->print_cr("[%d] we're scanning part "
ysr@777 4377 "["PTR_FORMAT", "PTR_FORMAT") "
ysr@777 4378 "of region "PTR_FORMAT,
ysr@777 4379 _task_id, _finger, _region_limit, _curr_region);
tonyp@2973 4380 }
ysr@777 4381
ysr@777 4382 // Let's iterate over the bitmap of the part of the
ysr@777 4383 // region that is left.
ysr@777 4384 bitmap_closure.set_scanning_heap_region(true);
ysr@777 4385 if (mr.is_empty() ||
ysr@777 4386 _nextMarkBitMap->iterate(&bitmap_closure, mr)) {
ysr@777 4387 // We successfully completed iterating over the region. Now,
ysr@777 4388 // let's give up the region.
ysr@777 4389 giveup_current_region();
ysr@777 4390 regular_clock_call();
ysr@777 4391 } else {
tonyp@1458 4392 assert(has_aborted(), "currently the only way to do so");
ysr@777 4393 // The only way to abort the bitmap iteration is to return
ysr@777 4394 // false from the do_bit() method. However, inside the
ysr@777 4395 // do_bit() method we move the _finger to point to the
ysr@777 4396 // object currently being looked at. So, if we bail out, we
ysr@777 4397 // have definitely set _finger to something non-null.
tonyp@1458 4398 assert(_finger != NULL, "invariant");
ysr@777 4399
ysr@777 4400 // Region iteration was actually aborted. So now _finger
ysr@777 4401 // points to the address of the object we last scanned. If we
ysr@777 4402 // leave it there, when we restart this task, we will rescan
ysr@777 4403 // the object. It is easy to avoid this. We move the finger by
ysr@777 4404 // enough to point to the next possible object header (the
ysr@777 4405 // bitmap knows by how much we need to move it as it knows its
ysr@777 4406 // granularity).
apetrusenko@1749 4407 assert(_finger < _region_limit, "invariant");
apetrusenko@1749 4408 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
apetrusenko@1749 4409 // Check if bitmap iteration was aborted while scanning the last object
apetrusenko@1749 4410 if (new_finger >= _region_limit) {
apetrusenko@1749 4411 giveup_current_region();
apetrusenko@1749 4412 } else {
apetrusenko@1749 4413 move_finger_to(new_finger);
apetrusenko@1749 4414 }
ysr@777 4415 }
ysr@777 4416 }
ysr@777 4417 // At this point we have either completed iterating over the
ysr@777 4418 // region we were holding on to, or we have aborted.
ysr@777 4419
ysr@777 4420 // We then partially drain the local queue and the global stack.
ysr@777 4421 // (Do we really need this?)
ysr@777 4422 drain_local_queue(true);
ysr@777 4423 drain_global_stack(true);
ysr@777 4424
ysr@777 4425 // Read the note on the claim_region() method on why it might
ysr@777 4426 // return NULL with potentially more regions available for
ysr@777 4427 // claiming and why we have to check out_of_regions() to determine
ysr@777 4428 // whether we're done or not.
ysr@777 4429 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
ysr@777 4430 // We are going to try to claim a new region. We should have
ysr@777 4431 // given up on the previous one.
tonyp@1458 4432 // Separated the asserts so that we know which one fires.
tonyp@1458 4433 assert(_curr_region == NULL, "invariant");
tonyp@1458 4434 assert(_finger == NULL, "invariant");
tonyp@1458 4435 assert(_region_limit == NULL, "invariant");
tonyp@2973 4436 if (_cm->verbose_low()) {
ysr@777 4437 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
tonyp@2973 4438 }
ysr@777 4439 HeapRegion* claimed_region = _cm->claim_region(_task_id);
ysr@777 4440 if (claimed_region != NULL) {
ysr@777 4441 // Yes, we managed to claim one
ysr@777 4442 statsOnly( ++_regions_claimed );
ysr@777 4443
tonyp@2973 4444 if (_cm->verbose_low()) {
ysr@777 4445 gclog_or_tty->print_cr("[%d] we successfully claimed "
ysr@777 4446 "region "PTR_FORMAT,
ysr@777 4447 _task_id, claimed_region);
tonyp@2973 4448 }
ysr@777 4449
ysr@777 4450 setup_for_region(claimed_region);
tonyp@1458 4451 assert(_curr_region == claimed_region, "invariant");
ysr@777 4452 }
ysr@777 4453 // It is important to call the regular clock here. It might take
ysr@777 4454 // a while to claim a region if, for example, we hit a large
ysr@777 4455 // block of empty regions. So we need to call the regular clock
ysr@777 4456 // method once round the loop to make sure it's called
ysr@777 4457 // frequently enough.
ysr@777 4458 regular_clock_call();
ysr@777 4459 }
ysr@777 4460
ysr@777 4461 if (!has_aborted() && _curr_region == NULL) {
tonyp@1458 4462 assert(_cm->out_of_regions(),
tonyp@1458 4463 "at this point we should be out of regions");
ysr@777 4464 }
ysr@777 4465 } while ( _curr_region != NULL && !has_aborted());
ysr@777 4466
ysr@777 4467 if (!has_aborted()) {
ysr@777 4468 // We cannot check whether the global stack is empty, since other
iveresov@778 4469 // tasks might be pushing objects to it concurrently. We also cannot
iveresov@778 4470 // check if the region stack is empty because if a thread is aborting
iveresov@778 4471 // it can push a partially done region back.
tonyp@1458 4472 assert(_cm->out_of_regions(),
tonyp@1458 4473 "at this point we should be out of regions");
ysr@777 4474
tonyp@2973 4475 if (_cm->verbose_low()) {
ysr@777 4476 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
tonyp@2973 4477 }
ysr@777 4478
ysr@777 4479 // Try to reduce the number of available SATB buffers so that
ysr@777 4480 // remark has less work to do.
ysr@777 4481 drain_satb_buffers();
ysr@777 4482 }
ysr@777 4483
ysr@777 4484 // Since we've done everything else, we can now totally drain the
ysr@777 4485 // local queue and global stack.
ysr@777 4486 drain_local_queue(false);
ysr@777 4487 drain_global_stack(false);
ysr@777 4488
ysr@777 4489 // Attempt at work stealing from other task's queues.
johnc@2494 4490 if (do_stealing && !has_aborted()) {
ysr@777 4491 // We have not aborted. This means that we have finished all that
ysr@777 4492 // we could. Let's try to do some stealing...
ysr@777 4493
ysr@777 4494 // We cannot check whether the global stack is empty, since other
iveresov@778 4495 // tasks might be pushing objects to it concurrently. We also cannot
iveresov@778 4496 // check if the region stack is empty because if a thread is aborting
iveresov@778 4497 // it can push a partially done region back.
tonyp@1458 4498 assert(_cm->out_of_regions() && _task_queue->size() == 0,
tonyp@1458 4499 "only way to reach here");
ysr@777 4500
tonyp@2973 4501 if (_cm->verbose_low()) {
ysr@777 4502 gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
tonyp@2973 4503 }
ysr@777 4504
ysr@777 4505 while (!has_aborted()) {
ysr@777 4506 oop obj;
ysr@777 4507 statsOnly( ++_steal_attempts );
ysr@777 4508
ysr@777 4509 if (_cm->try_stealing(_task_id, &_hash_seed, obj)) {
tonyp@2973 4510 if (_cm->verbose_medium()) {
ysr@777 4511 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully",
ysr@777 4512 _task_id, (void*) obj);
tonyp@2973 4513 }
ysr@777 4514
ysr@777 4515 statsOnly( ++_steals );
ysr@777 4516
tonyp@1458 4517 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
tonyp@1458 4518 "any stolen object should be marked");
ysr@777 4519 scan_object(obj);
ysr@777 4520
ysr@777 4521 // And since we're towards the end, let's totally drain the
ysr@777 4522 // local queue and global stack.
ysr@777 4523 drain_local_queue(false);
ysr@777 4524 drain_global_stack(false);
ysr@777 4525 } else {
ysr@777 4526 break;
ysr@777 4527 }
ysr@777 4528 }
ysr@777 4529 }
ysr@777 4530
tonyp@2848 4531 // If we are about to wrap up and go into termination, check if we
tonyp@2848 4532 // should raise the overflow flag.
tonyp@2848 4533 if (do_termination && !has_aborted()) {
tonyp@2848 4534 if (_cm->force_overflow()->should_force()) {
tonyp@2848 4535 _cm->set_has_overflown();
tonyp@2848 4536 regular_clock_call();
tonyp@2848 4537 }
tonyp@2848 4538 }
tonyp@2848 4539
ysr@777 4540 // We still haven't aborted. Now, let's try to get into the
ysr@777 4541 // termination protocol.
johnc@2494 4542 if (do_termination && !has_aborted()) {
ysr@777 4543 // We cannot check whether the global stack is empty, since other
iveresov@778 4544 // tasks might be concurrently pushing objects on it. We also cannot
iveresov@778 4545 // check if the region stack is empty because if a thread is aborting
iveresov@778 4546 // it can push a partially done region back.
tonyp@1458 4547 // Separated the asserts so that we know which one fires.
tonyp@1458 4548 assert(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4549 assert(_task_queue->size() == 0, "only way to reach here");
ysr@777 4550
tonyp@2973 4551 if (_cm->verbose_low()) {
ysr@777 4552 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
tonyp@2973 4553 }
ysr@777 4554
ysr@777 4555 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4556 // The CMTask class also extends the TerminatorTerminator class,
ysr@777 4557 // hence its should_exit_termination() method will also decide
ysr@777 4558 // whether to exit the termination protocol or not.
ysr@777 4559 bool finished = _cm->terminator()->offer_termination(this);
ysr@777 4560 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4561 _termination_time_ms +=
ysr@777 4562 termination_end_time_ms - _termination_start_time_ms;
ysr@777 4563
ysr@777 4564 if (finished) {
ysr@777 4565 // We're all done.
ysr@777 4566
ysr@777 4567 if (_task_id == 0) {
ysr@777 4568 // let's allow task 0 to do this
ysr@777 4569 if (concurrent()) {
tonyp@1458 4570 assert(_cm->concurrent_marking_in_progress(), "invariant");
ysr@777 4571 // we need to set this to false before the next
ysr@777 4572 // safepoint. This way we ensure that the marking phase
ysr@777 4573 // doesn't observe any more heap expansions.
ysr@777 4574 _cm->clear_concurrent_marking_in_progress();
ysr@777 4575 }
ysr@777 4576 }
ysr@777 4577
ysr@777 4578 // We can now guarantee that the global stack is empty, since
tonyp@1458 4579 // all other tasks have finished. We separated the guarantees so
tonyp@1458 4580 // that, if a condition is false, we can immediately find out
tonyp@1458 4581 // which one.
tonyp@1458 4582 guarantee(_cm->out_of_regions(), "only way to reach here");
johnc@2190 4583 guarantee(_aborted_region.is_empty(), "only way to reach here");
tonyp@1458 4584 guarantee(_cm->region_stack_empty(), "only way to reach here");
tonyp@1458 4585 guarantee(_cm->mark_stack_empty(), "only way to reach here");
tonyp@1458 4586 guarantee(_task_queue->size() == 0, "only way to reach here");
tonyp@1458 4587 guarantee(!_cm->has_overflown(), "only way to reach here");
tonyp@1458 4588 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
tonyp@1458 4589 guarantee(!_cm->region_stack_overflow(), "only way to reach here");
ysr@777 4590
tonyp@2973 4591 if (_cm->verbose_low()) {
ysr@777 4592 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
tonyp@2973 4593 }
ysr@777 4594 } else {
ysr@777 4595 // Apparently there's more work to do. Let's abort this task. It
ysr@777 4596 // will restart it and we can hopefully find more things to do.
ysr@777 4597
tonyp@2973 4598 if (_cm->verbose_low()) {
tonyp@2973 4599 gclog_or_tty->print_cr("[%d] apparently there is more work to do",
tonyp@2973 4600 _task_id);
tonyp@2973 4601 }
ysr@777 4602
ysr@777 4603 set_has_aborted();
ysr@777 4604 statsOnly( ++_aborted_termination );
ysr@777 4605 }
ysr@777 4606 }
ysr@777 4607
ysr@777 4608 // Mainly for debugging purposes to make sure that a pointer to the
ysr@777 4609 // closure which was statically allocated in this frame doesn't
ysr@777 4610 // escape it by accident.
tonyp@2968 4611 set_cm_oop_closure(NULL);
ysr@777 4612 double end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4613 double elapsed_time_ms = end_time_ms - _start_time_ms;
ysr@777 4614 // Update the step history.
ysr@777 4615 _step_times_ms.add(elapsed_time_ms);
ysr@777 4616
ysr@777 4617 if (has_aborted()) {
ysr@777 4618 // The task was aborted for some reason.
ysr@777 4619
ysr@777 4620 statsOnly( ++_aborted );
ysr@777 4621
johnc@2494 4622 if (_has_timed_out) {
ysr@777 4623 double diff_ms = elapsed_time_ms - _time_target_ms;
ysr@777 4624 // Keep statistics of how well we did with respect to hitting
ysr@777 4625 // our target only if we actually timed out (if we aborted for
ysr@777 4626 // other reasons, then the results might get skewed).
ysr@777 4627 _marking_step_diffs_ms.add(diff_ms);
ysr@777 4628 }
ysr@777 4629
ysr@777 4630 if (_cm->has_overflown()) {
ysr@777 4631 // This is the interesting one. We aborted because a global
ysr@777 4632 // overflow was raised. This means we have to restart the
ysr@777 4633 // marking phase and start iterating over regions. However, in
ysr@777 4634 // order to do this we have to make sure that all tasks stop
ysr@777 4635 // what they are doing and re-initialise in a safe manner. We
ysr@777 4636 // will achieve this with the use of two barrier sync points.
ysr@777 4637
tonyp@2973 4638 if (_cm->verbose_low()) {
ysr@777 4639 gclog_or_tty->print_cr("[%d] detected overflow", _task_id);
tonyp@2973 4640 }
ysr@777 4641
ysr@777 4642 _cm->enter_first_sync_barrier(_task_id);
ysr@777 4643 // When we exit this sync barrier we know that all tasks have
ysr@777 4644 // stopped doing marking work. So, it's now safe to
ysr@777 4645 // re-initialise our data structures. At the end of this method,
ysr@777 4646 // task 0 will clear the global data structures.
ysr@777 4647
ysr@777 4648 statsOnly( ++_aborted_overflow );
ysr@777 4649
ysr@777 4650 // We clear the local state of this task...
ysr@777 4651 clear_region_fields();
ysr@777 4652
ysr@777 4653 // ...and enter the second barrier.
ysr@777 4654 _cm->enter_second_sync_barrier(_task_id);
ysr@777 4655 // At this point everything has bee re-initialised and we're
ysr@777 4656 // ready to restart.
ysr@777 4657 }
ysr@777 4658
ysr@777 4659 if (_cm->verbose_low()) {
ysr@777 4660 gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, "
ysr@777 4661 "elapsed = %1.2lfms <<<<<<<<<<",
ysr@777 4662 _task_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4663 if (_cm->has_aborted()) {
ysr@777 4664 gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========",
ysr@777 4665 _task_id);
tonyp@2973 4666 }
ysr@777 4667 }
ysr@777 4668 } else {
tonyp@2973 4669 if (_cm->verbose_low()) {
ysr@777 4670 gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, "
ysr@777 4671 "elapsed = %1.2lfms <<<<<<<<<<",
ysr@777 4672 _task_id, _time_target_ms, elapsed_time_ms);
tonyp@2973 4673 }
ysr@777 4674 }
ysr@777 4675
ysr@777 4676 _claimed = false;
ysr@777 4677 }
ysr@777 4678
ysr@777 4679 CMTask::CMTask(int task_id,
ysr@777 4680 ConcurrentMark* cm,
ysr@777 4681 CMTaskQueue* task_queue,
ysr@777 4682 CMTaskQueueSet* task_queues)
ysr@777 4683 : _g1h(G1CollectedHeap::heap()),
ysr@777 4684 _task_id(task_id), _cm(cm),
ysr@777 4685 _claimed(false),
ysr@777 4686 _nextMarkBitMap(NULL), _hash_seed(17),
ysr@777 4687 _task_queue(task_queue),
ysr@777 4688 _task_queues(task_queues),
tonyp@2968 4689 _cm_oop_closure(NULL),
johnc@2190 4690 _aborted_region(MemRegion()) {
tonyp@1458 4691 guarantee(task_queue != NULL, "invariant");
tonyp@1458 4692 guarantee(task_queues != NULL, "invariant");
ysr@777 4693
ysr@777 4694 statsOnly( _clock_due_to_scanning = 0;
ysr@777 4695 _clock_due_to_marking = 0 );
ysr@777 4696
ysr@777 4697 _marking_step_diffs_ms.add(0.5);
ysr@777 4698 }
tonyp@2717 4699
tonyp@2717 4700 // These are formatting macros that are used below to ensure
tonyp@2717 4701 // consistent formatting. The *_H_* versions are used to format the
tonyp@2717 4702 // header for a particular value and they should be kept consistent
tonyp@2717 4703 // with the corresponding macro. Also note that most of the macros add
tonyp@2717 4704 // the necessary white space (as a prefix) which makes them a bit
tonyp@2717 4705 // easier to compose.
tonyp@2717 4706
tonyp@2717 4707 // All the output lines are prefixed with this string to be able to
tonyp@2717 4708 // identify them easily in a large log file.
tonyp@2717 4709 #define G1PPRL_LINE_PREFIX "###"
tonyp@2717 4710
tonyp@2717 4711 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
tonyp@2717 4712 #ifdef _LP64
tonyp@2717 4713 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
tonyp@2717 4714 #else // _LP64
tonyp@2717 4715 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
tonyp@2717 4716 #endif // _LP64
tonyp@2717 4717
tonyp@2717 4718 // For per-region info
tonyp@2717 4719 #define G1PPRL_TYPE_FORMAT " %-4s"
tonyp@2717 4720 #define G1PPRL_TYPE_H_FORMAT " %4s"
tonyp@2717 4721 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
tonyp@2717 4722 #define G1PPRL_BYTE_H_FORMAT " %9s"
tonyp@2717 4723 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
tonyp@2717 4724 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
tonyp@2717 4725
tonyp@2717 4726 // For summary info
tonyp@2717 4727 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4728 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
tonyp@2717 4729 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
tonyp@2717 4730 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
tonyp@2717 4731
tonyp@2717 4732 G1PrintRegionLivenessInfoClosure::
tonyp@2717 4733 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
tonyp@2717 4734 : _out(out),
tonyp@2717 4735 _total_used_bytes(0), _total_capacity_bytes(0),
tonyp@2717 4736 _total_prev_live_bytes(0), _total_next_live_bytes(0),
tonyp@2717 4737 _hum_used_bytes(0), _hum_capacity_bytes(0),
tonyp@2717 4738 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
tonyp@2717 4739 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2717 4740 MemRegion g1_committed = g1h->g1_committed();
tonyp@2717 4741 MemRegion g1_reserved = g1h->g1_reserved();
tonyp@2717 4742 double now = os::elapsedTime();
tonyp@2717 4743
tonyp@2717 4744 // Print the header of the output.
tonyp@2717 4745 _out->cr();
tonyp@2717 4746 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
tonyp@2717 4747 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
tonyp@2717 4748 G1PPRL_SUM_ADDR_FORMAT("committed")
tonyp@2717 4749 G1PPRL_SUM_ADDR_FORMAT("reserved")
tonyp@2717 4750 G1PPRL_SUM_BYTE_FORMAT("region-size"),
tonyp@2717 4751 g1_committed.start(), g1_committed.end(),
tonyp@2717 4752 g1_reserved.start(), g1_reserved.end(),
johnc@3182 4753 HeapRegion::GrainBytes);
tonyp@2717 4754 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4755 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4756 G1PPRL_TYPE_H_FORMAT
tonyp@2717 4757 G1PPRL_ADDR_BASE_H_FORMAT
tonyp@2717 4758 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4759 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4760 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4761 G1PPRL_DOUBLE_H_FORMAT,
tonyp@2717 4762 "type", "address-range",
tonyp@2717 4763 "used", "prev-live", "next-live", "gc-eff");
johnc@3173 4764 _out->print_cr(G1PPRL_LINE_PREFIX
johnc@3173 4765 G1PPRL_TYPE_H_FORMAT
johnc@3173 4766 G1PPRL_ADDR_BASE_H_FORMAT
johnc@3173 4767 G1PPRL_BYTE_H_FORMAT
johnc@3173 4768 G1PPRL_BYTE_H_FORMAT
johnc@3173 4769 G1PPRL_BYTE_H_FORMAT
johnc@3173 4770 G1PPRL_DOUBLE_H_FORMAT,
johnc@3173 4771 "", "",
johnc@3173 4772 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
tonyp@2717 4773 }
tonyp@2717 4774
tonyp@2717 4775 // It takes as a parameter a reference to one of the _hum_* fields, it
tonyp@2717 4776 // deduces the corresponding value for a region in a humongous region
tonyp@2717 4777 // series (either the region size, or what's left if the _hum_* field
tonyp@2717 4778 // is < the region size), and updates the _hum_* field accordingly.
tonyp@2717 4779 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
tonyp@2717 4780 size_t bytes = 0;
tonyp@2717 4781 // The > 0 check is to deal with the prev and next live bytes which
tonyp@2717 4782 // could be 0.
tonyp@2717 4783 if (*hum_bytes > 0) {
johnc@3182 4784 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
tonyp@2717 4785 *hum_bytes -= bytes;
tonyp@2717 4786 }
tonyp@2717 4787 return bytes;
tonyp@2717 4788 }
tonyp@2717 4789
tonyp@2717 4790 // It deduces the values for a region in a humongous region series
tonyp@2717 4791 // from the _hum_* fields and updates those accordingly. It assumes
tonyp@2717 4792 // that that _hum_* fields have already been set up from the "starts
tonyp@2717 4793 // humongous" region and we visit the regions in address order.
tonyp@2717 4794 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
tonyp@2717 4795 size_t* capacity_bytes,
tonyp@2717 4796 size_t* prev_live_bytes,
tonyp@2717 4797 size_t* next_live_bytes) {
tonyp@2717 4798 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
tonyp@2717 4799 *used_bytes = get_hum_bytes(&_hum_used_bytes);
tonyp@2717 4800 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
tonyp@2717 4801 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
tonyp@2717 4802 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
tonyp@2717 4803 }
tonyp@2717 4804
tonyp@2717 4805 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
tonyp@2717 4806 const char* type = "";
tonyp@2717 4807 HeapWord* bottom = r->bottom();
tonyp@2717 4808 HeapWord* end = r->end();
tonyp@2717 4809 size_t capacity_bytes = r->capacity();
tonyp@2717 4810 size_t used_bytes = r->used();
tonyp@2717 4811 size_t prev_live_bytes = r->live_bytes();
tonyp@2717 4812 size_t next_live_bytes = r->next_live_bytes();
tonyp@2717 4813 double gc_eff = r->gc_efficiency();
tonyp@2717 4814 if (r->used() == 0) {
tonyp@2717 4815 type = "FREE";
tonyp@2717 4816 } else if (r->is_survivor()) {
tonyp@2717 4817 type = "SURV";
tonyp@2717 4818 } else if (r->is_young()) {
tonyp@2717 4819 type = "EDEN";
tonyp@2717 4820 } else if (r->startsHumongous()) {
tonyp@2717 4821 type = "HUMS";
tonyp@2717 4822
tonyp@2717 4823 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
tonyp@2717 4824 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
tonyp@2717 4825 "they should have been zeroed after the last time we used them");
tonyp@2717 4826 // Set up the _hum_* fields.
tonyp@2717 4827 _hum_capacity_bytes = capacity_bytes;
tonyp@2717 4828 _hum_used_bytes = used_bytes;
tonyp@2717 4829 _hum_prev_live_bytes = prev_live_bytes;
tonyp@2717 4830 _hum_next_live_bytes = next_live_bytes;
tonyp@2717 4831 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4832 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4833 end = bottom + HeapRegion::GrainWords;
tonyp@2717 4834 } else if (r->continuesHumongous()) {
tonyp@2717 4835 type = "HUMC";
tonyp@2717 4836 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4837 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4838 assert(end == bottom + HeapRegion::GrainWords, "invariant");
tonyp@2717 4839 } else {
tonyp@2717 4840 type = "OLD";
tonyp@2717 4841 }
tonyp@2717 4842
tonyp@2717 4843 _total_used_bytes += used_bytes;
tonyp@2717 4844 _total_capacity_bytes += capacity_bytes;
tonyp@2717 4845 _total_prev_live_bytes += prev_live_bytes;
tonyp@2717 4846 _total_next_live_bytes += next_live_bytes;
tonyp@2717 4847
tonyp@2717 4848 // Print a line for this particular region.
tonyp@2717 4849 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4850 G1PPRL_TYPE_FORMAT
tonyp@2717 4851 G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4852 G1PPRL_BYTE_FORMAT
tonyp@2717 4853 G1PPRL_BYTE_FORMAT
tonyp@2717 4854 G1PPRL_BYTE_FORMAT
tonyp@2717 4855 G1PPRL_DOUBLE_FORMAT,
tonyp@2717 4856 type, bottom, end,
tonyp@2717 4857 used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
tonyp@2717 4858
tonyp@2717 4859 return false;
tonyp@2717 4860 }
tonyp@2717 4861
tonyp@2717 4862 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
tonyp@2717 4863 // Print the footer of the output.
tonyp@2717 4864 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4865 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4866 " SUMMARY"
tonyp@2717 4867 G1PPRL_SUM_MB_FORMAT("capacity")
tonyp@2717 4868 G1PPRL_SUM_MB_PERC_FORMAT("used")
tonyp@2717 4869 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
tonyp@2717 4870 G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
tonyp@2717 4871 bytes_to_mb(_total_capacity_bytes),
tonyp@2717 4872 bytes_to_mb(_total_used_bytes),
tonyp@2717 4873 perc(_total_used_bytes, _total_capacity_bytes),
tonyp@2717 4874 bytes_to_mb(_total_prev_live_bytes),
tonyp@2717 4875 perc(_total_prev_live_bytes, _total_capacity_bytes),
tonyp@2717 4876 bytes_to_mb(_total_next_live_bytes),
tonyp@2717 4877 perc(_total_next_live_bytes, _total_capacity_bytes));
tonyp@2717 4878 _out->cr();
tonyp@2717 4879 }

mercurial