src/share/vm/gc_implementation/g1/concurrentMark.cpp

Fri, 29 Apr 2011 12:40:49 -0400

author
tonyp
date
Fri, 29 Apr 2011 12:40:49 -0400
changeset 2848
cd8e33b2a8ad
parent 2718
8f1042ff784d
child 2849
063382f9b575
permissions
-rw-r--r--

7034139: G1: assert(Thread::current()->is_ConcurrentGC_thread()) failed: only a conc GC thread can call this.
Summary: We were calling STS join and leave during a STW pause and we are not suppoesed to. I now only call those during concurrent phase. I also added stress code in the non-product builds to force an overflows (the condition that ws uncovering the bug) to make sure it does not happen again.
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@2469 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1RemSet.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
kamg@2445 34 #include "gc_implementation/shared/vmGCOperations.hpp"
stefank@2314 35 #include "memory/genOopClosures.inline.hpp"
stefank@2314 36 #include "memory/referencePolicy.hpp"
stefank@2314 37 #include "memory/resourceArea.hpp"
stefank@2314 38 #include "oops/oop.inline.hpp"
stefank@2314 39 #include "runtime/handles.inline.hpp"
stefank@2314 40 #include "runtime/java.hpp"
ysr@777 41
ysr@777 42 //
ysr@777 43 // CMS Bit Map Wrapper
ysr@777 44
ysr@777 45 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter):
ysr@777 46 _bm((uintptr_t*)NULL,0),
ysr@777 47 _shifter(shifter) {
ysr@777 48 _bmStartWord = (HeapWord*)(rs.base());
ysr@777 49 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes
ysr@777 50 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
ysr@777 51 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
ysr@777 52
ysr@777 53 guarantee(brs.is_reserved(), "couldn't allocate CMS bit map");
ysr@777 54 // For now we'll just commit all of the bit map up fromt.
ysr@777 55 // Later on we'll try to be more parsimonious with swap.
ysr@777 56 guarantee(_virtual_space.initialize(brs, brs.size()),
ysr@777 57 "couldn't reseve backing store for CMS bit map");
ysr@777 58 assert(_virtual_space.committed_size() == brs.size(),
ysr@777 59 "didn't reserve backing store for all of CMS bit map?");
ysr@777 60 _bm.set_map((uintptr_t*)_virtual_space.low());
ysr@777 61 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
ysr@777 62 _bmWordSize, "inconsistency in bit map sizing");
ysr@777 63 _bm.set_size(_bmWordSize >> _shifter);
ysr@777 64 }
ysr@777 65
ysr@777 66 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
ysr@777 67 HeapWord* limit) const {
ysr@777 68 // First we must round addr *up* to a possible object boundary.
ysr@777 69 addr = (HeapWord*)align_size_up((intptr_t)addr,
ysr@777 70 HeapWordSize << _shifter);
ysr@777 71 size_t addrOffset = heapWordToOffset(addr);
ysr@777 72 if (limit == NULL) limit = _bmStartWord + _bmWordSize;
ysr@777 73 size_t limitOffset = heapWordToOffset(limit);
ysr@777 74 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
ysr@777 75 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 76 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 77 assert(nextAddr == limit || isMarked(nextAddr),
ysr@777 78 "get_next_one postcondition");
ysr@777 79 return nextAddr;
ysr@777 80 }
ysr@777 81
ysr@777 82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
ysr@777 83 HeapWord* limit) const {
ysr@777 84 size_t addrOffset = heapWordToOffset(addr);
ysr@777 85 if (limit == NULL) limit = _bmStartWord + _bmWordSize;
ysr@777 86 size_t limitOffset = heapWordToOffset(limit);
ysr@777 87 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
ysr@777 88 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
ysr@777 89 assert(nextAddr >= addr, "get_next_one postcondition");
ysr@777 90 assert(nextAddr == limit || !isMarked(nextAddr),
ysr@777 91 "get_next_one postcondition");
ysr@777 92 return nextAddr;
ysr@777 93 }
ysr@777 94
ysr@777 95 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
ysr@777 96 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
ysr@777 97 return (int) (diff >> _shifter);
ysr@777 98 }
ysr@777 99
ysr@777 100 bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
ysr@777 101 HeapWord* left = MAX2(_bmStartWord, mr.start());
ysr@777 102 HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end());
ysr@777 103 if (right > left) {
ysr@777 104 // Right-open interval [leftOffset, rightOffset).
ysr@777 105 return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
ysr@777 106 } else {
ysr@777 107 return true;
ysr@777 108 }
ysr@777 109 }
ysr@777 110
ysr@777 111 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap,
ysr@777 112 size_t from_start_index,
ysr@777 113 HeapWord* to_start_word,
ysr@777 114 size_t word_num) {
ysr@777 115 _bm.mostly_disjoint_range_union(from_bitmap,
ysr@777 116 from_start_index,
ysr@777 117 heapWordToOffset(to_start_word),
ysr@777 118 word_num);
ysr@777 119 }
ysr@777 120
ysr@777 121 #ifndef PRODUCT
ysr@777 122 bool CMBitMapRO::covers(ReservedSpace rs) const {
ysr@777 123 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
kvn@1080 124 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize,
ysr@777 125 "size inconsistency");
ysr@777 126 return _bmStartWord == (HeapWord*)(rs.base()) &&
ysr@777 127 _bmWordSize == rs.size()>>LogHeapWordSize;
ysr@777 128 }
ysr@777 129 #endif
ysr@777 130
ysr@777 131 void CMBitMap::clearAll() {
ysr@777 132 _bm.clear();
ysr@777 133 return;
ysr@777 134 }
ysr@777 135
ysr@777 136 void CMBitMap::markRange(MemRegion mr) {
ysr@777 137 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 138 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 139 assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
ysr@777 140 ((HeapWord *) mr.end())),
ysr@777 141 "markRange memory region end is not card aligned");
ysr@777 142 // convert address range into offset range
ysr@777 143 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 144 heapWordToOffset(mr.end()), true);
ysr@777 145 }
ysr@777 146
ysr@777 147 void CMBitMap::clearRange(MemRegion mr) {
ysr@777 148 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
ysr@777 149 assert(!mr.is_empty(), "unexpected empty region");
ysr@777 150 // convert address range into offset range
ysr@777 151 _bm.at_put_range(heapWordToOffset(mr.start()),
ysr@777 152 heapWordToOffset(mr.end()), false);
ysr@777 153 }
ysr@777 154
ysr@777 155 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
ysr@777 156 HeapWord* end_addr) {
ysr@777 157 HeapWord* start = getNextMarkedWordAddress(addr);
ysr@777 158 start = MIN2(start, end_addr);
ysr@777 159 HeapWord* end = getNextUnmarkedWordAddress(start);
ysr@777 160 end = MIN2(end, end_addr);
ysr@777 161 assert(start <= end, "Consistency check");
ysr@777 162 MemRegion mr(start, end);
ysr@777 163 if (!mr.is_empty()) {
ysr@777 164 clearRange(mr);
ysr@777 165 }
ysr@777 166 return mr;
ysr@777 167 }
ysr@777 168
ysr@777 169 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
ysr@777 170 _base(NULL), _cm(cm)
ysr@777 171 #ifdef ASSERT
ysr@777 172 , _drain_in_progress(false)
ysr@777 173 , _drain_in_progress_yields(false)
ysr@777 174 #endif
ysr@777 175 {}
ysr@777 176
ysr@777 177 void CMMarkStack::allocate(size_t size) {
ysr@777 178 _base = NEW_C_HEAP_ARRAY(oop, size);
ysr@777 179 if (_base == NULL)
ysr@777 180 vm_exit_during_initialization("Failed to allocate "
ysr@777 181 "CM region mark stack");
ysr@777 182 _index = 0;
ysr@777 183 // QQQQ cast ...
ysr@777 184 _capacity = (jint) size;
ysr@777 185 _oops_do_bound = -1;
ysr@777 186 NOT_PRODUCT(_max_depth = 0);
ysr@777 187 }
ysr@777 188
ysr@777 189 CMMarkStack::~CMMarkStack() {
ysr@777 190 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
ysr@777 191 }
ysr@777 192
ysr@777 193 void CMMarkStack::par_push(oop ptr) {
ysr@777 194 while (true) {
ysr@777 195 if (isFull()) {
ysr@777 196 _overflow = true;
ysr@777 197 return;
ysr@777 198 }
ysr@777 199 // Otherwise...
ysr@777 200 jint index = _index;
ysr@777 201 jint next_index = index+1;
ysr@777 202 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 203 if (res == index) {
ysr@777 204 _base[index] = ptr;
ysr@777 205 // Note that we don't maintain this atomically. We could, but it
ysr@777 206 // doesn't seem necessary.
ysr@777 207 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 208 return;
ysr@777 209 }
ysr@777 210 // Otherwise, we need to try again.
ysr@777 211 }
ysr@777 212 }
ysr@777 213
ysr@777 214 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
ysr@777 215 while (true) {
ysr@777 216 if (isFull()) {
ysr@777 217 _overflow = true;
ysr@777 218 return;
ysr@777 219 }
ysr@777 220 // Otherwise...
ysr@777 221 jint index = _index;
ysr@777 222 jint next_index = index + n;
ysr@777 223 if (next_index > _capacity) {
ysr@777 224 _overflow = true;
ysr@777 225 return;
ysr@777 226 }
ysr@777 227 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 228 if (res == index) {
ysr@777 229 for (int i = 0; i < n; i++) {
ysr@777 230 int ind = index + i;
ysr@777 231 assert(ind < _capacity, "By overflow test above.");
ysr@777 232 _base[ind] = ptr_arr[i];
ysr@777 233 }
ysr@777 234 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
ysr@777 235 return;
ysr@777 236 }
ysr@777 237 // Otherwise, we need to try again.
ysr@777 238 }
ysr@777 239 }
ysr@777 240
ysr@777 241
ysr@777 242 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
ysr@777 243 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 244 jint start = _index;
ysr@777 245 jint next_index = start + n;
ysr@777 246 if (next_index > _capacity) {
ysr@777 247 _overflow = true;
ysr@777 248 return;
ysr@777 249 }
ysr@777 250 // Otherwise.
ysr@777 251 _index = next_index;
ysr@777 252 for (int i = 0; i < n; i++) {
ysr@777 253 int ind = start + i;
tonyp@1458 254 assert(ind < _capacity, "By overflow test above.");
ysr@777 255 _base[ind] = ptr_arr[i];
ysr@777 256 }
ysr@777 257 }
ysr@777 258
ysr@777 259
ysr@777 260 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
ysr@777 261 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 262 jint index = _index;
ysr@777 263 if (index == 0) {
ysr@777 264 *n = 0;
ysr@777 265 return false;
ysr@777 266 } else {
ysr@777 267 int k = MIN2(max, index);
ysr@777 268 jint new_ind = index - k;
ysr@777 269 for (int j = 0; j < k; j++) {
ysr@777 270 ptr_arr[j] = _base[new_ind + j];
ysr@777 271 }
ysr@777 272 _index = new_ind;
ysr@777 273 *n = k;
ysr@777 274 return true;
ysr@777 275 }
ysr@777 276 }
ysr@777 277
ysr@777 278
ysr@777 279 CMRegionStack::CMRegionStack() : _base(NULL) {}
ysr@777 280
ysr@777 281 void CMRegionStack::allocate(size_t size) {
ysr@777 282 _base = NEW_C_HEAP_ARRAY(MemRegion, size);
ysr@777 283 if (_base == NULL)
ysr@777 284 vm_exit_during_initialization("Failed to allocate "
ysr@777 285 "CM region mark stack");
ysr@777 286 _index = 0;
ysr@777 287 // QQQQ cast ...
ysr@777 288 _capacity = (jint) size;
ysr@777 289 }
ysr@777 290
ysr@777 291 CMRegionStack::~CMRegionStack() {
ysr@777 292 if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
ysr@777 293 }
ysr@777 294
johnc@2190 295 void CMRegionStack::push_lock_free(MemRegion mr) {
ysr@777 296 assert(mr.word_size() > 0, "Precondition");
ysr@777 297 while (true) {
johnc@2190 298 jint index = _index;
johnc@2190 299
johnc@2190 300 if (index >= _capacity) {
ysr@777 301 _overflow = true;
ysr@777 302 return;
ysr@777 303 }
ysr@777 304 // Otherwise...
ysr@777 305 jint next_index = index+1;
ysr@777 306 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 307 if (res == index) {
ysr@777 308 _base[index] = mr;
ysr@777 309 return;
ysr@777 310 }
ysr@777 311 // Otherwise, we need to try again.
ysr@777 312 }
ysr@777 313 }
ysr@777 314
johnc@2190 315 // Lock-free pop of the region stack. Called during the concurrent
johnc@2190 316 // marking / remark phases. Should only be called in tandem with
johnc@2190 317 // other lock-free pops.
johnc@2190 318 MemRegion CMRegionStack::pop_lock_free() {
ysr@777 319 while (true) {
ysr@777 320 jint index = _index;
ysr@777 321
ysr@777 322 if (index == 0) {
ysr@777 323 return MemRegion();
ysr@777 324 }
johnc@2190 325 // Otherwise...
ysr@777 326 jint next_index = index-1;
ysr@777 327 jint res = Atomic::cmpxchg(next_index, &_index, index);
ysr@777 328 if (res == index) {
ysr@777 329 MemRegion mr = _base[next_index];
ysr@777 330 if (mr.start() != NULL) {
tonyp@1458 331 assert(mr.end() != NULL, "invariant");
tonyp@1458 332 assert(mr.word_size() > 0, "invariant");
ysr@777 333 return mr;
ysr@777 334 } else {
ysr@777 335 // that entry was invalidated... let's skip it
tonyp@1458 336 assert(mr.end() == NULL, "invariant");
ysr@777 337 }
ysr@777 338 }
ysr@777 339 // Otherwise, we need to try again.
ysr@777 340 }
ysr@777 341 }
johnc@2190 342
johnc@2190 343 #if 0
johnc@2190 344 // The routines that manipulate the region stack with a lock are
johnc@2190 345 // not currently used. They should be retained, however, as a
johnc@2190 346 // diagnostic aid.
tonyp@1793 347
tonyp@1793 348 void CMRegionStack::push_with_lock(MemRegion mr) {
tonyp@1793 349 assert(mr.word_size() > 0, "Precondition");
tonyp@1793 350 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
tonyp@1793 351
tonyp@1793 352 if (isFull()) {
tonyp@1793 353 _overflow = true;
tonyp@1793 354 return;
tonyp@1793 355 }
tonyp@1793 356
tonyp@1793 357 _base[_index] = mr;
tonyp@1793 358 _index += 1;
tonyp@1793 359 }
tonyp@1793 360
tonyp@1793 361 MemRegion CMRegionStack::pop_with_lock() {
tonyp@1793 362 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
tonyp@1793 363
tonyp@1793 364 while (true) {
tonyp@1793 365 if (_index == 0) {
tonyp@1793 366 return MemRegion();
tonyp@1793 367 }
tonyp@1793 368 _index -= 1;
tonyp@1793 369
tonyp@1793 370 MemRegion mr = _base[_index];
tonyp@1793 371 if (mr.start() != NULL) {
tonyp@1793 372 assert(mr.end() != NULL, "invariant");
tonyp@1793 373 assert(mr.word_size() > 0, "invariant");
tonyp@1793 374 return mr;
tonyp@1793 375 } else {
tonyp@1793 376 // that entry was invalidated... let's skip it
tonyp@1793 377 assert(mr.end() == NULL, "invariant");
tonyp@1793 378 }
tonyp@1793 379 }
tonyp@1793 380 }
johnc@2190 381 #endif
ysr@777 382
ysr@777 383 bool CMRegionStack::invalidate_entries_into_cset() {
ysr@777 384 bool result = false;
ysr@777 385 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 386 for (int i = 0; i < _oops_do_bound; ++i) {
ysr@777 387 MemRegion mr = _base[i];
ysr@777 388 if (mr.start() != NULL) {
tonyp@1458 389 assert(mr.end() != NULL, "invariant");
tonyp@1458 390 assert(mr.word_size() > 0, "invariant");
ysr@777 391 HeapRegion* hr = g1h->heap_region_containing(mr.start());
tonyp@1458 392 assert(hr != NULL, "invariant");
ysr@777 393 if (hr->in_collection_set()) {
ysr@777 394 // The region points into the collection set
ysr@777 395 _base[i] = MemRegion();
ysr@777 396 result = true;
ysr@777 397 }
ysr@777 398 } else {
ysr@777 399 // that entry was invalidated... let's skip it
tonyp@1458 400 assert(mr.end() == NULL, "invariant");
ysr@777 401 }
ysr@777 402 }
ysr@777 403 return result;
ysr@777 404 }
ysr@777 405
ysr@777 406 template<class OopClosureClass>
ysr@777 407 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
ysr@777 408 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
ysr@777 409 || SafepointSynchronize::is_at_safepoint(),
ysr@777 410 "Drain recursion must be yield-safe.");
ysr@777 411 bool res = true;
ysr@777 412 debug_only(_drain_in_progress = true);
ysr@777 413 debug_only(_drain_in_progress_yields = yield_after);
ysr@777 414 while (!isEmpty()) {
ysr@777 415 oop newOop = pop();
ysr@777 416 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
ysr@777 417 assert(newOop->is_oop(), "Expected an oop");
ysr@777 418 assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
ysr@777 419 "only grey objects on this stack");
ysr@777 420 // iterate over the oops in this oop, marking and pushing
ysr@777 421 // the ones in CMS generation.
ysr@777 422 newOop->oop_iterate(cl);
ysr@777 423 if (yield_after && _cm->do_yield_check()) {
ysr@777 424 res = false; break;
ysr@777 425 }
ysr@777 426 }
ysr@777 427 debug_only(_drain_in_progress = false);
ysr@777 428 return res;
ysr@777 429 }
ysr@777 430
ysr@777 431 void CMMarkStack::oops_do(OopClosure* f) {
ysr@777 432 if (_index == 0) return;
ysr@777 433 assert(_oops_do_bound != -1 && _oops_do_bound <= _index,
ysr@777 434 "Bound must be set.");
ysr@777 435 for (int i = 0; i < _oops_do_bound; i++) {
ysr@777 436 f->do_oop(&_base[i]);
ysr@777 437 }
ysr@777 438 _oops_do_bound = -1;
ysr@777 439 }
ysr@777 440
ysr@777 441 bool ConcurrentMark::not_yet_marked(oop obj) const {
ysr@777 442 return (_g1h->is_obj_ill(obj)
ysr@777 443 || (_g1h->is_in_permanent(obj)
ysr@777 444 && !nextMarkBitMap()->isMarked((HeapWord*)obj)));
ysr@777 445 }
ysr@777 446
ysr@777 447 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 448 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 449 #endif // _MSC_VER
ysr@777 450
ysr@777 451 ConcurrentMark::ConcurrentMark(ReservedSpace rs,
ysr@777 452 int max_regions) :
ysr@777 453 _markBitMap1(rs, MinObjAlignment - 1),
ysr@777 454 _markBitMap2(rs, MinObjAlignment - 1),
ysr@777 455
ysr@777 456 _parallel_marking_threads(0),
ysr@777 457 _sleep_factor(0.0),
ysr@777 458 _marking_task_overhead(1.0),
ysr@777 459 _cleanup_sleep_factor(0.0),
ysr@777 460 _cleanup_task_overhead(1.0),
tonyp@2472 461 _cleanup_list("Cleanup List"),
ysr@777 462 _region_bm(max_regions, false /* in_resource_area*/),
ysr@777 463 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
ysr@777 464 CardTableModRefBS::card_shift,
ysr@777 465 false /* in_resource_area*/),
ysr@777 466 _prevMarkBitMap(&_markBitMap1),
ysr@777 467 _nextMarkBitMap(&_markBitMap2),
ysr@777 468 _at_least_one_mark_complete(false),
ysr@777 469
ysr@777 470 _markStack(this),
ysr@777 471 _regionStack(),
ysr@777 472 // _finger set in set_non_marking_state
ysr@777 473
ysr@777 474 _max_task_num(MAX2(ParallelGCThreads, (size_t)1)),
ysr@777 475 // _active_tasks set in set_non_marking_state
ysr@777 476 // _tasks set inside the constructor
ysr@777 477 _task_queues(new CMTaskQueueSet((int) _max_task_num)),
ysr@777 478 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)),
ysr@777 479
ysr@777 480 _has_overflown(false),
ysr@777 481 _concurrent(false),
tonyp@1054 482 _has_aborted(false),
tonyp@1054 483 _restart_for_overflow(false),
tonyp@1054 484 _concurrent_marking_in_progress(false),
tonyp@1054 485 _should_gray_objects(false),
ysr@777 486
ysr@777 487 // _verbose_level set below
ysr@777 488
ysr@777 489 _init_times(),
ysr@777 490 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
ysr@777 491 _cleanup_times(),
ysr@777 492 _total_counting_time(0.0),
ysr@777 493 _total_rs_scrub_time(0.0),
ysr@777 494
tonyp@1371 495 _parallel_workers(NULL)
ysr@777 496 {
ysr@777 497 CMVerboseLevel verbose_level =
ysr@777 498 (CMVerboseLevel) G1MarkingVerboseLevel;
ysr@777 499 if (verbose_level < no_verbose)
ysr@777 500 verbose_level = no_verbose;
ysr@777 501 if (verbose_level > high_verbose)
ysr@777 502 verbose_level = high_verbose;
ysr@777 503 _verbose_level = verbose_level;
ysr@777 504
ysr@777 505 if (verbose_low())
ysr@777 506 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
ysr@777 507 "heap end = "PTR_FORMAT, _heap_start, _heap_end);
ysr@777 508
jmasa@1719 509 _markStack.allocate(MarkStackSize);
johnc@1186 510 _regionStack.allocate(G1MarkRegionStackSize);
ysr@777 511
ysr@777 512 // Create & start a ConcurrentMark thread.
ysr@1280 513 _cmThread = new ConcurrentMarkThread(this);
ysr@1280 514 assert(cmThread() != NULL, "CM Thread should have been created");
ysr@1280 515 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
ysr@1280 516
ysr@777 517 _g1h = G1CollectedHeap::heap();
ysr@777 518 assert(CGC_lock != NULL, "Where's the CGC_lock?");
ysr@777 519 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
ysr@777 520 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
ysr@777 521
ysr@777 522 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
tonyp@1717 523 satb_qs.set_buffer_size(G1SATBBufferSize);
ysr@777 524
ysr@777 525 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
ysr@777 526 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
ysr@777 527
ysr@777 528 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
ysr@777 529 _active_tasks = _max_task_num;
ysr@777 530 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 531 CMTaskQueue* task_queue = new CMTaskQueue();
ysr@777 532 task_queue->initialize();
ysr@777 533 _task_queues->register_queue(i, task_queue);
ysr@777 534
ysr@777 535 _tasks[i] = new CMTask(i, this, task_queue, _task_queues);
ysr@777 536 _accum_task_vtime[i] = 0.0;
ysr@777 537 }
ysr@777 538
jmasa@1719 539 if (ConcGCThreads > ParallelGCThreads) {
jmasa@1719 540 vm_exit_during_initialization("Can't have more ConcGCThreads "
ysr@777 541 "than ParallelGCThreads.");
ysr@777 542 }
ysr@777 543 if (ParallelGCThreads == 0) {
ysr@777 544 // if we are not running with any parallel GC threads we will not
ysr@777 545 // spawn any marking threads either
ysr@777 546 _parallel_marking_threads = 0;
ysr@777 547 _sleep_factor = 0.0;
ysr@777 548 _marking_task_overhead = 1.0;
ysr@777 549 } else {
jmasa@1719 550 if (ConcGCThreads > 0) {
jmasa@1719 551 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
ysr@777 552 // if both are set
ysr@777 553
jmasa@1719 554 _parallel_marking_threads = ConcGCThreads;
ysr@777 555 _sleep_factor = 0.0;
ysr@777 556 _marking_task_overhead = 1.0;
johnc@1186 557 } else if (G1MarkingOverheadPercent > 0) {
ysr@777 558 // we will calculate the number of parallel marking threads
ysr@777 559 // based on a target overhead with respect to the soft real-time
ysr@777 560 // goal
ysr@777 561
johnc@1186 562 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
ysr@777 563 double overall_cm_overhead =
johnc@1186 564 (double) MaxGCPauseMillis * marking_overhead /
johnc@1186 565 (double) GCPauseIntervalMillis;
ysr@777 566 double cpu_ratio = 1.0 / (double) os::processor_count();
ysr@777 567 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
ysr@777 568 double marking_task_overhead =
ysr@777 569 overall_cm_overhead / marking_thread_num *
ysr@777 570 (double) os::processor_count();
ysr@777 571 double sleep_factor =
ysr@777 572 (1.0 - marking_task_overhead) / marking_task_overhead;
ysr@777 573
ysr@777 574 _parallel_marking_threads = (size_t) marking_thread_num;
ysr@777 575 _sleep_factor = sleep_factor;
ysr@777 576 _marking_task_overhead = marking_task_overhead;
ysr@777 577 } else {
ysr@777 578 _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1);
ysr@777 579 _sleep_factor = 0.0;
ysr@777 580 _marking_task_overhead = 1.0;
ysr@777 581 }
ysr@777 582
ysr@777 583 if (parallel_marking_threads() > 1)
ysr@777 584 _cleanup_task_overhead = 1.0;
ysr@777 585 else
ysr@777 586 _cleanup_task_overhead = marking_task_overhead();
ysr@777 587 _cleanup_sleep_factor =
ysr@777 588 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
ysr@777 589
ysr@777 590 #if 0
ysr@777 591 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads());
ysr@777 592 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
ysr@777 593 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor());
ysr@777 594 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
ysr@777 595 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor());
ysr@777 596 #endif
ysr@777 597
tonyp@1458 598 guarantee(parallel_marking_threads() > 0, "peace of mind");
jmasa@2188 599 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
jmasa@2188 600 (int) _parallel_marking_threads, false, true);
jmasa@2188 601 if (_parallel_workers == NULL) {
ysr@777 602 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 603 } else {
jmasa@2188 604 _parallel_workers->initialize_workers();
jmasa@2188 605 }
ysr@777 606 }
ysr@777 607
ysr@777 608 // so that the call below can read a sensible value
ysr@777 609 _heap_start = (HeapWord*) rs.base();
ysr@777 610 set_non_marking_state();
ysr@777 611 }
ysr@777 612
ysr@777 613 void ConcurrentMark::update_g1_committed(bool force) {
ysr@777 614 // If concurrent marking is not in progress, then we do not need to
ysr@777 615 // update _heap_end. This has a subtle and important
ysr@777 616 // side-effect. Imagine that two evacuation pauses happen between
ysr@777 617 // marking completion and remark. The first one can grow the
ysr@777 618 // heap (hence now the finger is below the heap end). Then, the
ysr@777 619 // second one could unnecessarily push regions on the region
ysr@777 620 // stack. This causes the invariant that the region stack is empty
ysr@777 621 // at the beginning of remark to be false. By ensuring that we do
ysr@777 622 // not observe heap expansions after marking is complete, then we do
ysr@777 623 // not have this problem.
ysr@777 624 if (!concurrent_marking_in_progress() && !force)
ysr@777 625 return;
ysr@777 626
ysr@777 627 MemRegion committed = _g1h->g1_committed();
tonyp@1458 628 assert(committed.start() == _heap_start, "start shouldn't change");
ysr@777 629 HeapWord* new_end = committed.end();
ysr@777 630 if (new_end > _heap_end) {
ysr@777 631 // The heap has been expanded.
ysr@777 632
ysr@777 633 _heap_end = new_end;
ysr@777 634 }
ysr@777 635 // Notice that the heap can also shrink. However, this only happens
ysr@777 636 // during a Full GC (at least currently) and the entire marking
ysr@777 637 // phase will bail out and the task will not be restarted. So, let's
ysr@777 638 // do nothing.
ysr@777 639 }
ysr@777 640
ysr@777 641 void ConcurrentMark::reset() {
ysr@777 642 // Starting values for these two. This should be called in a STW
ysr@777 643 // phase. CM will be notified of any future g1_committed expansions
ysr@777 644 // will be at the end of evacuation pauses, when tasks are
ysr@777 645 // inactive.
ysr@777 646 MemRegion committed = _g1h->g1_committed();
ysr@777 647 _heap_start = committed.start();
ysr@777 648 _heap_end = committed.end();
ysr@777 649
tonyp@1458 650 // Separated the asserts so that we know which one fires.
tonyp@1458 651 assert(_heap_start != NULL, "heap bounds should look ok");
tonyp@1458 652 assert(_heap_end != NULL, "heap bounds should look ok");
tonyp@1458 653 assert(_heap_start < _heap_end, "heap bounds should look ok");
ysr@777 654
ysr@777 655 // reset all the marking data structures and any necessary flags
ysr@777 656 clear_marking_state();
ysr@777 657
ysr@777 658 if (verbose_low())
ysr@777 659 gclog_or_tty->print_cr("[global] resetting");
ysr@777 660
ysr@777 661 // We do reset all of them, since different phases will use
ysr@777 662 // different number of active threads. So, it's easiest to have all
ysr@777 663 // of them ready.
johnc@2190 664 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 665 _tasks[i]->reset(_nextMarkBitMap);
johnc@2190 666 }
ysr@777 667
ysr@777 668 // we need this to make sure that the flag is on during the evac
ysr@777 669 // pause with initial mark piggy-backed
ysr@777 670 set_concurrent_marking_in_progress();
ysr@777 671 }
ysr@777 672
ysr@777 673 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) {
tonyp@1458 674 assert(active_tasks <= _max_task_num, "we should not have more");
ysr@777 675
ysr@777 676 _active_tasks = active_tasks;
ysr@777 677 // Need to update the three data structures below according to the
ysr@777 678 // number of active threads for this phase.
ysr@777 679 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
ysr@777 680 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 681 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
ysr@777 682
ysr@777 683 _concurrent = concurrent;
ysr@777 684 // We propagate this to all tasks, not just the active ones.
ysr@777 685 for (int i = 0; i < (int) _max_task_num; ++i)
ysr@777 686 _tasks[i]->set_concurrent(concurrent);
ysr@777 687
ysr@777 688 if (concurrent) {
ysr@777 689 set_concurrent_marking_in_progress();
ysr@777 690 } else {
ysr@777 691 // We currently assume that the concurrent flag has been set to
ysr@777 692 // false before we start remark. At this point we should also be
ysr@777 693 // in a STW phase.
tonyp@1458 694 assert(!concurrent_marking_in_progress(), "invariant");
tonyp@1458 695 assert(_finger == _heap_end, "only way to get here");
ysr@777 696 update_g1_committed(true);
ysr@777 697 }
ysr@777 698 }
ysr@777 699
ysr@777 700 void ConcurrentMark::set_non_marking_state() {
ysr@777 701 // We set the global marking state to some default values when we're
ysr@777 702 // not doing marking.
ysr@777 703 clear_marking_state();
ysr@777 704 _active_tasks = 0;
ysr@777 705 clear_concurrent_marking_in_progress();
ysr@777 706 }
ysr@777 707
ysr@777 708 ConcurrentMark::~ConcurrentMark() {
ysr@777 709 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 710 delete _task_queues->queue(i);
ysr@777 711 delete _tasks[i];
ysr@777 712 }
ysr@777 713 delete _task_queues;
ysr@777 714 FREE_C_HEAP_ARRAY(CMTask*, _max_task_num);
ysr@777 715 }
ysr@777 716
ysr@777 717 // This closure is used to mark refs into the g1 generation
ysr@777 718 // from external roots in the CMS bit map.
ysr@777 719 // Called at the first checkpoint.
ysr@777 720 //
ysr@777 721
ysr@777 722 void ConcurrentMark::clearNextBitmap() {
tonyp@1794 723 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1794 724 G1CollectorPolicy* g1p = g1h->g1_policy();
tonyp@1794 725
tonyp@1794 726 // Make sure that the concurrent mark thread looks to still be in
tonyp@1794 727 // the current cycle.
tonyp@1794 728 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 729
tonyp@1794 730 // We are finishing up the current cycle by clearing the next
tonyp@1794 731 // marking bitmap and getting it ready for the next cycle. During
tonyp@1794 732 // this time no other cycle can start. So, let's make sure that this
tonyp@1794 733 // is the case.
tonyp@1794 734 guarantee(!g1h->mark_in_progress(), "invariant");
tonyp@1794 735
tonyp@1794 736 // clear the mark bitmap (no grey objects to start with).
tonyp@1794 737 // We need to do this in chunks and offer to yield in between
tonyp@1794 738 // each chunk.
tonyp@1794 739 HeapWord* start = _nextMarkBitMap->startWord();
tonyp@1794 740 HeapWord* end = _nextMarkBitMap->endWord();
tonyp@1794 741 HeapWord* cur = start;
tonyp@1794 742 size_t chunkSize = M;
tonyp@1794 743 while (cur < end) {
tonyp@1794 744 HeapWord* next = cur + chunkSize;
tonyp@1794 745 if (next > end)
tonyp@1794 746 next = end;
tonyp@1794 747 MemRegion mr(cur,next);
tonyp@1794 748 _nextMarkBitMap->clearRange(mr);
tonyp@1794 749 cur = next;
tonyp@1794 750 do_yield_check();
tonyp@1794 751
tonyp@1794 752 // Repeat the asserts from above. We'll do them as asserts here to
tonyp@1794 753 // minimize their overhead on the product. However, we'll have
tonyp@1794 754 // them as guarantees at the beginning / end of the bitmap
tonyp@1794 755 // clearing to get some checking in the product.
tonyp@1794 756 assert(cmThread()->during_cycle(), "invariant");
tonyp@1794 757 assert(!g1h->mark_in_progress(), "invariant");
tonyp@1794 758 }
tonyp@1794 759
tonyp@1794 760 // Repeat the asserts from above.
tonyp@1794 761 guarantee(cmThread()->during_cycle(), "invariant");
tonyp@1794 762 guarantee(!g1h->mark_in_progress(), "invariant");
ysr@777 763 }
ysr@777 764
ysr@777 765 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
ysr@777 766 public:
ysr@777 767 bool doHeapRegion(HeapRegion* r) {
ysr@777 768 if (!r->continuesHumongous()) {
ysr@777 769 r->note_start_of_marking(true);
ysr@777 770 }
ysr@777 771 return false;
ysr@777 772 }
ysr@777 773 };
ysr@777 774
ysr@777 775 void ConcurrentMark::checkpointRootsInitialPre() {
ysr@777 776 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 777 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 778
ysr@777 779 _has_aborted = false;
ysr@777 780
jcoomes@1902 781 #ifndef PRODUCT
tonyp@1479 782 if (G1PrintReachableAtInitialMark) {
tonyp@1823 783 print_reachable("at-cycle-start",
tonyp@1823 784 true /* use_prev_marking */, true /* all */);
tonyp@1479 785 }
jcoomes@1902 786 #endif
ysr@777 787
ysr@777 788 // Initialise marking structures. This has to be done in a STW phase.
ysr@777 789 reset();
ysr@777 790 }
ysr@777 791
ysr@777 792 class CMMarkRootsClosure: public OopsInGenClosure {
ysr@777 793 private:
ysr@777 794 ConcurrentMark* _cm;
ysr@777 795 G1CollectedHeap* _g1h;
ysr@777 796 bool _do_barrier;
ysr@777 797
ysr@777 798 public:
ysr@777 799 CMMarkRootsClosure(ConcurrentMark* cm,
ysr@777 800 G1CollectedHeap* g1h,
ysr@777 801 bool do_barrier) : _cm(cm), _g1h(g1h),
ysr@777 802 _do_barrier(do_barrier) { }
ysr@777 803
ysr@1280 804 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 805 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 806
ysr@1280 807 template <class T> void do_oop_work(T* p) {
ysr@1280 808 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 809 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 810 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@1280 811 assert(obj->is_oop() || obj->mark() == NULL,
ysr@777 812 "expected an oop, possibly with mark word displaced");
ysr@1280 813 HeapWord* addr = (HeapWord*)obj;
ysr@777 814 if (_g1h->is_in_g1_reserved(addr)) {
ysr@1280 815 _cm->grayRoot(obj);
ysr@777 816 }
ysr@777 817 }
ysr@777 818 if (_do_barrier) {
ysr@777 819 assert(!_g1h->is_in_g1_reserved(p),
ysr@777 820 "Should be called on external roots");
ysr@777 821 do_barrier(p);
ysr@777 822 }
ysr@777 823 }
ysr@777 824 };
ysr@777 825
ysr@777 826 void ConcurrentMark::checkpointRootsInitialPost() {
ysr@777 827 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 828
tonyp@2848 829 // If we force an overflow during remark, the remark operation will
tonyp@2848 830 // actually abort and we'll restart concurrent marking. If we always
tonyp@2848 831 // force an oveflow during remark we'll never actually complete the
tonyp@2848 832 // marking phase. So, we initilize this here, at the start of the
tonyp@2848 833 // cycle, so that at the remaining overflow number will decrease at
tonyp@2848 834 // every remark and we'll eventually not need to cause one.
tonyp@2848 835 force_overflow_stw()->init();
tonyp@2848 836
ysr@777 837 // For each region note start of marking.
ysr@777 838 NoteStartOfMarkHRClosure startcl;
ysr@777 839 g1h->heap_region_iterate(&startcl);
ysr@777 840
ysr@777 841 // Start weak-reference discovery.
ysr@777 842 ReferenceProcessor* rp = g1h->ref_processor();
ysr@777 843 rp->verify_no_references_recorded();
ysr@777 844 rp->enable_discovery(); // enable ("weak") refs discovery
ysr@892 845 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
ysr@777 846
ysr@777 847 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1752 848 // This is the start of the marking cycle, we're expected all
tonyp@1752 849 // threads to have SATB queues with active set to false.
tonyp@1752 850 satb_mq_set.set_active_all_threads(true, /* new active value */
tonyp@1752 851 false /* expected_active */);
ysr@777 852
ysr@777 853 // update_g1_committed() will be called at the end of an evac pause
ysr@777 854 // when marking is on. So, it's also called at the end of the
ysr@777 855 // initial-mark pause to update the heap end, if the heap expands
ysr@777 856 // during it. No need to call it here.
ysr@777 857 }
ysr@777 858
ysr@777 859 // Checkpoint the roots into this generation from outside
ysr@777 860 // this generation. [Note this initial checkpoint need only
ysr@777 861 // be approximate -- we'll do a catch up phase subsequently.]
ysr@777 862 void ConcurrentMark::checkpointRootsInitial() {
ysr@777 863 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
ysr@777 864 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 865
ysr@777 866 double start = os::elapsedTime();
ysr@777 867
ysr@777 868 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
ysr@777 869 g1p->record_concurrent_mark_init_start();
ysr@777 870 checkpointRootsInitialPre();
ysr@777 871
ysr@777 872 // YSR: when concurrent precleaning is in place, we'll
ysr@777 873 // need to clear the cached card table here
ysr@777 874
ysr@777 875 ResourceMark rm;
ysr@777 876 HandleMark hm;
ysr@777 877
ysr@777 878 g1h->ensure_parsability(false);
ysr@777 879 g1h->perm_gen()->save_marks();
ysr@777 880
ysr@777 881 CMMarkRootsClosure notOlder(this, g1h, false);
ysr@777 882 CMMarkRootsClosure older(this, g1h, true);
ysr@777 883
ysr@777 884 g1h->set_marking_started();
ysr@777 885 g1h->rem_set()->prepare_for_younger_refs_iterate(false);
ysr@777 886
jrose@1424 887 g1h->process_strong_roots(true, // activate StrongRootsScope
jrose@1424 888 false, // fake perm gen collection
ysr@777 889 SharedHeap::SO_AllClasses,
ysr@777 890 &notOlder, // Regular roots
jrose@1424 891 NULL, // do not visit active blobs
ysr@777 892 &older // Perm Gen Roots
ysr@777 893 );
ysr@777 894 checkpointRootsInitialPost();
ysr@777 895
ysr@777 896 // Statistics.
ysr@777 897 double end = os::elapsedTime();
ysr@777 898 _init_times.add((end - start) * 1000.0);
ysr@777 899
ysr@777 900 g1p->record_concurrent_mark_init_end();
ysr@777 901 }
ysr@777 902
ysr@777 903 /*
tonyp@2848 904 * Notice that in the next two methods, we actually leave the STS
tonyp@2848 905 * during the barrier sync and join it immediately afterwards. If we
tonyp@2848 906 * do not do this, the following deadlock can occur: one thread could
tonyp@2848 907 * be in the barrier sync code, waiting for the other thread to also
tonyp@2848 908 * sync up, whereas another one could be trying to yield, while also
tonyp@2848 909 * waiting for the other threads to sync up too.
tonyp@2848 910 *
tonyp@2848 911 * Note, however, that this code is also used during remark and in
tonyp@2848 912 * this case we should not attempt to leave / enter the STS, otherwise
tonyp@2848 913 * we'll either hit an asseert (debug / fastdebug) or deadlock
tonyp@2848 914 * (product). So we should only leave / enter the STS if we are
tonyp@2848 915 * operating concurrently.
tonyp@2848 916 *
tonyp@2848 917 * Because the thread that does the sync barrier has left the STS, it
tonyp@2848 918 * is possible to be suspended for a Full GC or an evacuation pause
tonyp@2848 919 * could occur. This is actually safe, since the entering the sync
tonyp@2848 920 * barrier is one of the last things do_marking_step() does, and it
tonyp@2848 921 * doesn't manipulate any data structures afterwards.
tonyp@2848 922 */
ysr@777 923
ysr@777 924 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
ysr@777 925 if (verbose_low())
ysr@777 926 gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
ysr@777 927
tonyp@2848 928 if (concurrent()) {
tonyp@2848 929 ConcurrentGCThread::stsLeave();
tonyp@2848 930 }
ysr@777 931 _first_overflow_barrier_sync.enter();
tonyp@2848 932 if (concurrent()) {
tonyp@2848 933 ConcurrentGCThread::stsJoin();
tonyp@2848 934 }
ysr@777 935 // at this point everyone should have synced up and not be doing any
ysr@777 936 // more work
ysr@777 937
ysr@777 938 if (verbose_low())
ysr@777 939 gclog_or_tty->print_cr("[%d] leaving first barrier", task_num);
ysr@777 940
ysr@777 941 // let task 0 do this
ysr@777 942 if (task_num == 0) {
ysr@777 943 // task 0 is responsible for clearing the global data structures
tonyp@2848 944 // We should be here because of an overflow. During STW we should
tonyp@2848 945 // not clear the overflow flag since we rely on it being true when
tonyp@2848 946 // we exit this method to abort the pause and restart concurent
tonyp@2848 947 // marking.
tonyp@2848 948 clear_marking_state(concurrent() /* clear_overflow */);
tonyp@2848 949 force_overflow()->update();
ysr@777 950
ysr@777 951 if (PrintGC) {
ysr@777 952 gclog_or_tty->date_stamp(PrintGCDateStamps);
ysr@777 953 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 954 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
ysr@777 955 }
ysr@777 956 }
ysr@777 957
ysr@777 958 // after this, each task should reset its own data structures then
ysr@777 959 // then go into the second barrier
ysr@777 960 }
ysr@777 961
ysr@777 962 void ConcurrentMark::enter_second_sync_barrier(int task_num) {
ysr@777 963 if (verbose_low())
ysr@777 964 gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
ysr@777 965
tonyp@2848 966 if (concurrent()) {
tonyp@2848 967 ConcurrentGCThread::stsLeave();
tonyp@2848 968 }
ysr@777 969 _second_overflow_barrier_sync.enter();
tonyp@2848 970 if (concurrent()) {
tonyp@2848 971 ConcurrentGCThread::stsJoin();
tonyp@2848 972 }
ysr@777 973 // at this point everything should be re-initialised and ready to go
ysr@777 974
ysr@777 975 if (verbose_low())
ysr@777 976 gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
ysr@777 977 }
ysr@777 978
tonyp@2848 979 #ifndef PRODUCT
tonyp@2848 980 void ForceOverflowSettings::init() {
tonyp@2848 981 _num_remaining = G1ConcMarkForceOverflow;
tonyp@2848 982 _force = false;
tonyp@2848 983 update();
tonyp@2848 984 }
tonyp@2848 985
tonyp@2848 986 void ForceOverflowSettings::update() {
tonyp@2848 987 if (_num_remaining > 0) {
tonyp@2848 988 _num_remaining -= 1;
tonyp@2848 989 _force = true;
tonyp@2848 990 } else {
tonyp@2848 991 _force = false;
tonyp@2848 992 }
tonyp@2848 993 }
tonyp@2848 994
tonyp@2848 995 bool ForceOverflowSettings::should_force() {
tonyp@2848 996 if (_force) {
tonyp@2848 997 _force = false;
tonyp@2848 998 return true;
tonyp@2848 999 } else {
tonyp@2848 1000 return false;
tonyp@2848 1001 }
tonyp@2848 1002 }
tonyp@2848 1003 #endif // !PRODUCT
tonyp@2848 1004
ysr@777 1005 void ConcurrentMark::grayRoot(oop p) {
ysr@777 1006 HeapWord* addr = (HeapWord*) p;
ysr@777 1007 // We can't really check against _heap_start and _heap_end, since it
ysr@777 1008 // is possible during an evacuation pause with piggy-backed
ysr@777 1009 // initial-mark that the committed space is expanded during the
ysr@777 1010 // pause without CM observing this change. So the assertions below
ysr@777 1011 // is a bit conservative; but better than nothing.
tonyp@1458 1012 assert(_g1h->g1_committed().contains(addr),
tonyp@1458 1013 "address should be within the heap bounds");
ysr@777 1014
ysr@777 1015 if (!_nextMarkBitMap->isMarked(addr))
ysr@777 1016 _nextMarkBitMap->parMark(addr);
ysr@777 1017 }
ysr@777 1018
ysr@777 1019 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
ysr@777 1020 // The objects on the region have already been marked "in bulk" by
ysr@777 1021 // the caller. We only need to decide whether to push the region on
ysr@777 1022 // the region stack or not.
ysr@777 1023
ysr@777 1024 if (!concurrent_marking_in_progress() || !_should_gray_objects)
ysr@777 1025 // We're done with marking and waiting for remark. We do not need to
ysr@777 1026 // push anything else on the region stack.
ysr@777 1027 return;
ysr@777 1028
ysr@777 1029 HeapWord* finger = _finger;
ysr@777 1030
ysr@777 1031 if (verbose_low())
ysr@777 1032 gclog_or_tty->print_cr("[global] attempting to push "
ysr@777 1033 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at "
ysr@777 1034 PTR_FORMAT, mr.start(), mr.end(), finger);
ysr@777 1035
ysr@777 1036 if (mr.start() < finger) {
ysr@777 1037 // The finger is always heap region aligned and it is not possible
ysr@777 1038 // for mr to span heap regions.
tonyp@1458 1039 assert(mr.end() <= finger, "invariant");
tonyp@1458 1040
tonyp@1458 1041 // Separated the asserts so that we know which one fires.
tonyp@1458 1042 assert(mr.start() <= mr.end(),
tonyp@1458 1043 "region boundaries should fall within the committed space");
tonyp@1458 1044 assert(_heap_start <= mr.start(),
tonyp@1458 1045 "region boundaries should fall within the committed space");
tonyp@1458 1046 assert(mr.end() <= _heap_end,
tonyp@1458 1047 "region boundaries should fall within the committed space");
ysr@777 1048 if (verbose_low())
ysr@777 1049 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
ysr@777 1050 "below the finger, pushing it",
ysr@777 1051 mr.start(), mr.end());
ysr@777 1052
johnc@2190 1053 if (!region_stack_push_lock_free(mr)) {
ysr@777 1054 if (verbose_low())
ysr@777 1055 gclog_or_tty->print_cr("[global] region stack has overflown.");
ysr@777 1056 }
ysr@777 1057 }
ysr@777 1058 }
ysr@777 1059
ysr@777 1060 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
ysr@777 1061 // The object is not marked by the caller. We need to at least mark
ysr@777 1062 // it and maybe push in on the stack.
ysr@777 1063
ysr@777 1064 HeapWord* addr = (HeapWord*)p;
ysr@777 1065 if (!_nextMarkBitMap->isMarked(addr)) {
ysr@777 1066 // We definitely need to mark it, irrespective whether we bail out
ysr@777 1067 // because we're done with marking.
ysr@777 1068 if (_nextMarkBitMap->parMark(addr)) {
ysr@777 1069 if (!concurrent_marking_in_progress() || !_should_gray_objects)
ysr@777 1070 // If we're done with concurrent marking and we're waiting for
ysr@777 1071 // remark, then we're not pushing anything on the stack.
ysr@777 1072 return;
ysr@777 1073
ysr@777 1074 // No OrderAccess:store_load() is needed. It is implicit in the
ysr@777 1075 // CAS done in parMark(addr) above
ysr@777 1076 HeapWord* finger = _finger;
ysr@777 1077
ysr@777 1078 if (addr < finger) {
ysr@777 1079 if (!mark_stack_push(oop(addr))) {
ysr@777 1080 if (verbose_low())
ysr@777 1081 gclog_or_tty->print_cr("[global] global stack overflow "
ysr@777 1082 "during parMark");
ysr@777 1083 }
ysr@777 1084 }
ysr@777 1085 }
ysr@777 1086 }
ysr@777 1087 }
ysr@777 1088
ysr@777 1089 class CMConcurrentMarkingTask: public AbstractGangTask {
ysr@777 1090 private:
ysr@777 1091 ConcurrentMark* _cm;
ysr@777 1092 ConcurrentMarkThread* _cmt;
ysr@777 1093
ysr@777 1094 public:
ysr@777 1095 void work(int worker_i) {
tonyp@1458 1096 assert(Thread::current()->is_ConcurrentGC_thread(),
tonyp@1458 1097 "this should only be done by a conc GC thread");
johnc@2316 1098 ResourceMark rm;
ysr@777 1099
ysr@777 1100 double start_vtime = os::elapsedVTime();
ysr@777 1101
ysr@777 1102 ConcurrentGCThread::stsJoin();
ysr@777 1103
tonyp@1458 1104 assert((size_t) worker_i < _cm->active_tasks(), "invariant");
ysr@777 1105 CMTask* the_task = _cm->task(worker_i);
ysr@777 1106 the_task->record_start_time();
ysr@777 1107 if (!_cm->has_aborted()) {
ysr@777 1108 do {
ysr@777 1109 double start_vtime_sec = os::elapsedVTime();
ysr@777 1110 double start_time_sec = os::elapsedTime();
johnc@2494 1111 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 1112
johnc@2494 1113 the_task->do_marking_step(mark_step_duration_ms,
johnc@2494 1114 true /* do_stealing */,
johnc@2494 1115 true /* do_termination */);
johnc@2494 1116
ysr@777 1117 double end_time_sec = os::elapsedTime();
ysr@777 1118 double end_vtime_sec = os::elapsedVTime();
ysr@777 1119 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
ysr@777 1120 double elapsed_time_sec = end_time_sec - start_time_sec;
ysr@777 1121 _cm->clear_has_overflown();
ysr@777 1122
ysr@777 1123 bool ret = _cm->do_yield_check(worker_i);
ysr@777 1124
ysr@777 1125 jlong sleep_time_ms;
ysr@777 1126 if (!_cm->has_aborted() && the_task->has_aborted()) {
ysr@777 1127 sleep_time_ms =
ysr@777 1128 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
ysr@777 1129 ConcurrentGCThread::stsLeave();
ysr@777 1130 os::sleep(Thread::current(), sleep_time_ms, false);
ysr@777 1131 ConcurrentGCThread::stsJoin();
ysr@777 1132 }
ysr@777 1133 double end_time2_sec = os::elapsedTime();
ysr@777 1134 double elapsed_time2_sec = end_time2_sec - start_time_sec;
ysr@777 1135
ysr@777 1136 #if 0
ysr@777 1137 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
ysr@777 1138 "overhead %1.4lf",
ysr@777 1139 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
ysr@777 1140 the_task->conc_overhead(os::elapsedTime()) * 8.0);
ysr@777 1141 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
ysr@777 1142 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
ysr@777 1143 #endif
ysr@777 1144 } while (!_cm->has_aborted() && the_task->has_aborted());
ysr@777 1145 }
ysr@777 1146 the_task->record_end_time();
tonyp@1458 1147 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
ysr@777 1148
ysr@777 1149 ConcurrentGCThread::stsLeave();
ysr@777 1150
ysr@777 1151 double end_vtime = os::elapsedVTime();
ysr@777 1152 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
ysr@777 1153 }
ysr@777 1154
ysr@777 1155 CMConcurrentMarkingTask(ConcurrentMark* cm,
ysr@777 1156 ConcurrentMarkThread* cmt) :
ysr@777 1157 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
ysr@777 1158
ysr@777 1159 ~CMConcurrentMarkingTask() { }
ysr@777 1160 };
ysr@777 1161
ysr@777 1162 void ConcurrentMark::markFromRoots() {
ysr@777 1163 // we might be tempted to assert that:
ysr@777 1164 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
ysr@777 1165 // "inconsistent argument?");
ysr@777 1166 // However that wouldn't be right, because it's possible that
ysr@777 1167 // a safepoint is indeed in progress as a younger generation
ysr@777 1168 // stop-the-world GC happens even as we mark in this generation.
ysr@777 1169
ysr@777 1170 _restart_for_overflow = false;
ysr@777 1171
johnc@2494 1172 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
tonyp@2848 1173 force_overflow_conc()->init();
johnc@2494 1174 set_phase(active_workers, true /* concurrent */);
ysr@777 1175
ysr@777 1176 CMConcurrentMarkingTask markingTask(this, cmThread());
ysr@777 1177 if (parallel_marking_threads() > 0)
ysr@777 1178 _parallel_workers->run_task(&markingTask);
ysr@777 1179 else
ysr@777 1180 markingTask.work(0);
ysr@777 1181 print_stats();
ysr@777 1182 }
ysr@777 1183
ysr@777 1184 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
ysr@777 1185 // world is stopped at this checkpoint
ysr@777 1186 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1187 "world should be stopped");
ysr@777 1188 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1189
ysr@777 1190 // If a full collection has happened, we shouldn't do this.
ysr@777 1191 if (has_aborted()) {
ysr@777 1192 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1193 return;
ysr@777 1194 }
ysr@777 1195
kamg@2445 1196 SvcGCMarker sgcm(SvcGCMarker::OTHER);
kamg@2445 1197
ysr@1280 1198 if (VerifyDuringGC) {
ysr@1280 1199 HandleMark hm; // handle scope
ysr@1280 1200 gclog_or_tty->print(" VerifyDuringGC:(before)");
ysr@1280 1201 Universe::heap()->prepare_for_verify();
ysr@1280 1202 Universe::verify(true, false, true);
ysr@1280 1203 }
ysr@1280 1204
ysr@777 1205 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 1206 g1p->record_concurrent_mark_remark_start();
ysr@777 1207
ysr@777 1208 double start = os::elapsedTime();
ysr@777 1209
ysr@777 1210 checkpointRootsFinalWork();
ysr@777 1211
ysr@777 1212 double mark_work_end = os::elapsedTime();
ysr@777 1213
ysr@777 1214 weakRefsWork(clear_all_soft_refs);
ysr@777 1215
ysr@777 1216 if (has_overflown()) {
ysr@777 1217 // Oops. We overflowed. Restart concurrent marking.
ysr@777 1218 _restart_for_overflow = true;
ysr@777 1219 // Clear the flag. We do not need it any more.
ysr@777 1220 clear_has_overflown();
ysr@777 1221 if (G1TraceMarkStackOverflow)
ysr@777 1222 gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
ysr@777 1223 } else {
tonyp@2469 1224 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 1225 // We're done with marking.
tonyp@1752 1226 // This is the end of the marking cycle, we're expected all
tonyp@1752 1227 // threads to have SATB queues with active set to true.
tonyp@2469 1228 satb_mq_set.set_active_all_threads(false, /* new active value */
tonyp@2469 1229 true /* expected_active */);
tonyp@1246 1230
tonyp@1246 1231 if (VerifyDuringGC) {
ysr@1280 1232 HandleMark hm; // handle scope
ysr@1280 1233 gclog_or_tty->print(" VerifyDuringGC:(after)");
ysr@1280 1234 Universe::heap()->prepare_for_verify();
ysr@1280 1235 Universe::heap()->verify(/* allow_dirty */ true,
ysr@1280 1236 /* silent */ false,
ysr@1280 1237 /* use_prev_marking */ false);
tonyp@1246 1238 }
johnc@2494 1239 assert(!restart_for_overflow(), "sanity");
johnc@2494 1240 }
johnc@2494 1241
johnc@2494 1242 // Reset the marking state if marking completed
johnc@2494 1243 if (!restart_for_overflow()) {
johnc@2494 1244 set_non_marking_state();
ysr@777 1245 }
ysr@777 1246
ysr@777 1247 #if VERIFY_OBJS_PROCESSED
ysr@777 1248 _scan_obj_cl.objs_processed = 0;
ysr@777 1249 ThreadLocalObjQueue::objs_enqueued = 0;
ysr@777 1250 #endif
ysr@777 1251
ysr@777 1252 // Statistics
ysr@777 1253 double now = os::elapsedTime();
ysr@777 1254 _remark_mark_times.add((mark_work_end - start) * 1000.0);
ysr@777 1255 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
ysr@777 1256 _remark_times.add((now - start) * 1000.0);
ysr@777 1257
ysr@777 1258 g1p->record_concurrent_mark_remark_end();
ysr@777 1259 }
ysr@777 1260
ysr@777 1261 #define CARD_BM_TEST_MODE 0
ysr@777 1262
ysr@777 1263 class CalcLiveObjectsClosure: public HeapRegionClosure {
ysr@777 1264
ysr@777 1265 CMBitMapRO* _bm;
ysr@777 1266 ConcurrentMark* _cm;
ysr@777 1267 bool _changed;
ysr@777 1268 bool _yield;
ysr@777 1269 size_t _words_done;
ysr@777 1270 size_t _tot_live;
ysr@777 1271 size_t _tot_used;
ysr@777 1272 size_t _regions_done;
ysr@777 1273 double _start_vtime_sec;
ysr@777 1274
ysr@777 1275 BitMap* _region_bm;
ysr@777 1276 BitMap* _card_bm;
ysr@777 1277 intptr_t _bottom_card_num;
ysr@777 1278 bool _final;
ysr@777 1279
ysr@777 1280 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
ysr@777 1281 for (intptr_t i = start_card_num; i <= last_card_num; i++) {
ysr@777 1282 #if CARD_BM_TEST_MODE
tonyp@1458 1283 guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
ysr@777 1284 #else
ysr@777 1285 _card_bm->par_at_put(i - _bottom_card_num, 1);
ysr@777 1286 #endif
ysr@777 1287 }
ysr@777 1288 }
ysr@777 1289
ysr@777 1290 public:
ysr@777 1291 CalcLiveObjectsClosure(bool final,
ysr@777 1292 CMBitMapRO *bm, ConcurrentMark *cm,
tonyp@1371 1293 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1294 _bm(bm), _cm(cm), _changed(false), _yield(true),
ysr@777 1295 _words_done(0), _tot_live(0), _tot_used(0),
tonyp@1371 1296 _region_bm(region_bm), _card_bm(card_bm),_final(final),
ysr@777 1297 _regions_done(0), _start_vtime_sec(0.0)
ysr@777 1298 {
ysr@777 1299 _bottom_card_num =
ysr@777 1300 intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >>
ysr@777 1301 CardTableModRefBS::card_shift);
ysr@777 1302 }
ysr@777 1303
tonyp@1264 1304 // It takes a region that's not empty (i.e., it has at least one
tonyp@1264 1305 // live object in it and sets its corresponding bit on the region
tonyp@1264 1306 // bitmap to 1. If the region is "starts humongous" it will also set
tonyp@1264 1307 // to 1 the bits on the region bitmap that correspond to its
tonyp@1264 1308 // associated "continues humongous" regions.
tonyp@1264 1309 void set_bit_for_region(HeapRegion* hr) {
tonyp@1264 1310 assert(!hr->continuesHumongous(), "should have filtered those out");
tonyp@1264 1311
tonyp@1264 1312 size_t index = hr->hrs_index();
tonyp@1264 1313 if (!hr->startsHumongous()) {
tonyp@1264 1314 // Normal (non-humongous) case: just set the bit.
tonyp@1264 1315 _region_bm->par_at_put((BitMap::idx_t) index, true);
tonyp@1264 1316 } else {
tonyp@1264 1317 // Starts humongous case: calculate how many regions are part of
tonyp@1264 1318 // this humongous region and then set the bit range. It might
tonyp@1264 1319 // have been a bit more efficient to look at the object that
tonyp@1264 1320 // spans these humongous regions to calculate their number from
tonyp@1264 1321 // the object's size. However, it's a good idea to calculate
tonyp@1264 1322 // this based on the metadata itself, and not the region
tonyp@1264 1323 // contents, so that this code is not aware of what goes into
tonyp@1264 1324 // the humongous regions (in case this changes in the future).
tonyp@1264 1325 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@1264 1326 size_t end_index = index + 1;
tonyp@1266 1327 while (end_index < g1h->n_regions()) {
tonyp@1266 1328 HeapRegion* chr = g1h->region_at(end_index);
tonyp@1264 1329 if (!chr->continuesHumongous()) {
tonyp@1264 1330 break;
tonyp@1264 1331 }
tonyp@1264 1332 end_index += 1;
tonyp@1264 1333 }
tonyp@1264 1334 _region_bm->par_at_put_range((BitMap::idx_t) index,
tonyp@1264 1335 (BitMap::idx_t) end_index, true);
tonyp@1264 1336 }
tonyp@1264 1337 }
tonyp@1264 1338
ysr@777 1339 bool doHeapRegion(HeapRegion* hr) {
ysr@777 1340 if (!_final && _regions_done == 0)
ysr@777 1341 _start_vtime_sec = os::elapsedVTime();
ysr@777 1342
iveresov@1074 1343 if (hr->continuesHumongous()) {
tonyp@1264 1344 // We will ignore these here and process them when their
tonyp@1264 1345 // associated "starts humongous" region is processed (see
tonyp@1264 1346 // set_bit_for_heap_region()). Note that we cannot rely on their
tonyp@1264 1347 // associated "starts humongous" region to have their bit set to
tonyp@1264 1348 // 1 since, due to the region chunking in the parallel region
tonyp@1264 1349 // iteration, a "continues humongous" region might be visited
tonyp@1264 1350 // before its associated "starts humongous".
iveresov@1074 1351 return false;
iveresov@1074 1352 }
ysr@777 1353
ysr@777 1354 HeapWord* nextTop = hr->next_top_at_mark_start();
ysr@777 1355 HeapWord* start = hr->top_at_conc_mark_count();
ysr@777 1356 assert(hr->bottom() <= start && start <= hr->end() &&
ysr@777 1357 hr->bottom() <= nextTop && nextTop <= hr->end() &&
ysr@777 1358 start <= nextTop,
ysr@777 1359 "Preconditions.");
ysr@777 1360 // Otherwise, record the number of word's we'll examine.
ysr@777 1361 size_t words_done = (nextTop - start);
ysr@777 1362 // Find the first marked object at or after "start".
ysr@777 1363 start = _bm->getNextMarkedWordAddress(start, nextTop);
ysr@777 1364 size_t marked_bytes = 0;
ysr@777 1365
ysr@777 1366 // Below, the term "card num" means the result of shifting an address
ysr@777 1367 // by the card shift -- address 0 corresponds to card number 0. One
ysr@777 1368 // must subtract the card num of the bottom of the heap to obtain a
ysr@777 1369 // card table index.
ysr@777 1370 // The first card num of the sequence of live cards currently being
ysr@777 1371 // constructed. -1 ==> no sequence.
ysr@777 1372 intptr_t start_card_num = -1;
ysr@777 1373 // The last card num of the sequence of live cards currently being
ysr@777 1374 // constructed. -1 ==> no sequence.
ysr@777 1375 intptr_t last_card_num = -1;
ysr@777 1376
ysr@777 1377 while (start < nextTop) {
ysr@777 1378 if (_yield && _cm->do_yield_check()) {
ysr@777 1379 // We yielded. It might be for a full collection, in which case
ysr@777 1380 // all bets are off; terminate the traversal.
ysr@777 1381 if (_cm->has_aborted()) {
ysr@777 1382 _changed = false;
ysr@777 1383 return true;
ysr@777 1384 } else {
ysr@777 1385 // Otherwise, it might be a collection pause, and the region
ysr@777 1386 // we're looking at might be in the collection set. We'll
ysr@777 1387 // abandon this region.
ysr@777 1388 return false;
ysr@777 1389 }
ysr@777 1390 }
ysr@777 1391 oop obj = oop(start);
ysr@777 1392 int obj_sz = obj->size();
ysr@777 1393 // The card num of the start of the current object.
ysr@777 1394 intptr_t obj_card_num =
ysr@777 1395 intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
ysr@777 1396
ysr@777 1397 HeapWord* obj_last = start + obj_sz - 1;
ysr@777 1398 intptr_t obj_last_card_num =
ysr@777 1399 intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
ysr@777 1400
ysr@777 1401 if (obj_card_num != last_card_num) {
ysr@777 1402 if (start_card_num == -1) {
ysr@777 1403 assert(last_card_num == -1, "Both or neither.");
ysr@777 1404 start_card_num = obj_card_num;
ysr@777 1405 } else {
ysr@777 1406 assert(last_card_num != -1, "Both or neither.");
ysr@777 1407 assert(obj_card_num >= last_card_num, "Inv");
ysr@777 1408 if ((obj_card_num - last_card_num) > 1) {
ysr@777 1409 // Mark the last run, and start a new one.
ysr@777 1410 mark_card_num_range(start_card_num, last_card_num);
ysr@777 1411 start_card_num = obj_card_num;
ysr@777 1412 }
ysr@777 1413 }
ysr@777 1414 #if CARD_BM_TEST_MODE
ysr@777 1415 /*
ysr@777 1416 gclog_or_tty->print_cr("Setting bits from %d/%d.",
ysr@777 1417 obj_card_num - _bottom_card_num,
ysr@777 1418 obj_last_card_num - _bottom_card_num);
ysr@777 1419 */
ysr@777 1420 for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) {
ysr@777 1421 _card_bm->par_at_put(j - _bottom_card_num, 1);
ysr@777 1422 }
ysr@777 1423 #endif
ysr@777 1424 }
ysr@777 1425 // In any case, we set the last card num.
ysr@777 1426 last_card_num = obj_last_card_num;
ysr@777 1427
apetrusenko@1465 1428 marked_bytes += (size_t)obj_sz * HeapWordSize;
ysr@777 1429 // Find the next marked object after this one.
ysr@777 1430 start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
ysr@777 1431 _changed = true;
ysr@777 1432 }
ysr@777 1433 // Handle the last range, if any.
ysr@777 1434 if (start_card_num != -1)
ysr@777 1435 mark_card_num_range(start_card_num, last_card_num);
ysr@777 1436 if (_final) {
ysr@777 1437 // Mark the allocated-since-marking portion...
ysr@777 1438 HeapWord* tp = hr->top();
ysr@777 1439 if (nextTop < tp) {
ysr@777 1440 start_card_num =
ysr@777 1441 intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
ysr@777 1442 last_card_num =
ysr@777 1443 intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
ysr@777 1444 mark_card_num_range(start_card_num, last_card_num);
ysr@777 1445 // This definitely means the region has live objects.
tonyp@1264 1446 set_bit_for_region(hr);
ysr@777 1447 }
ysr@777 1448 }
ysr@777 1449
ysr@777 1450 hr->add_to_marked_bytes(marked_bytes);
ysr@777 1451 // Update the live region bitmap.
ysr@777 1452 if (marked_bytes > 0) {
tonyp@1264 1453 set_bit_for_region(hr);
ysr@777 1454 }
ysr@777 1455 hr->set_top_at_conc_mark_count(nextTop);
ysr@777 1456 _tot_live += hr->next_live_bytes();
ysr@777 1457 _tot_used += hr->used();
ysr@777 1458 _words_done = words_done;
ysr@777 1459
ysr@777 1460 if (!_final) {
ysr@777 1461 ++_regions_done;
ysr@777 1462 if (_regions_done % 10 == 0) {
ysr@777 1463 double end_vtime_sec = os::elapsedVTime();
ysr@777 1464 double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec;
ysr@777 1465 if (elapsed_vtime_sec > (10.0 / 1000.0)) {
ysr@777 1466 jlong sleep_time_ms =
ysr@777 1467 (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
ysr@777 1468 os::sleep(Thread::current(), sleep_time_ms, false);
ysr@777 1469 _start_vtime_sec = end_vtime_sec;
ysr@777 1470 }
ysr@777 1471 }
ysr@777 1472 }
ysr@777 1473
ysr@777 1474 return false;
ysr@777 1475 }
ysr@777 1476
ysr@777 1477 bool changed() { return _changed; }
ysr@777 1478 void reset() { _changed = false; _words_done = 0; }
ysr@777 1479 void no_yield() { _yield = false; }
ysr@777 1480 size_t words_done() { return _words_done; }
ysr@777 1481 size_t tot_live() { return _tot_live; }
ysr@777 1482 size_t tot_used() { return _tot_used; }
ysr@777 1483 };
ysr@777 1484
ysr@777 1485
ysr@777 1486 void ConcurrentMark::calcDesiredRegions() {
ysr@777 1487 _region_bm.clear();
ysr@777 1488 _card_bm.clear();
ysr@777 1489 CalcLiveObjectsClosure calccl(false /*final*/,
ysr@777 1490 nextMarkBitMap(), this,
tonyp@1371 1491 &_region_bm, &_card_bm);
ysr@777 1492 G1CollectedHeap *g1h = G1CollectedHeap::heap();
ysr@777 1493 g1h->heap_region_iterate(&calccl);
ysr@777 1494
ysr@777 1495 do {
ysr@777 1496 calccl.reset();
ysr@777 1497 g1h->heap_region_iterate(&calccl);
ysr@777 1498 } while (calccl.changed());
ysr@777 1499 }
ysr@777 1500
ysr@777 1501 class G1ParFinalCountTask: public AbstractGangTask {
ysr@777 1502 protected:
ysr@777 1503 G1CollectedHeap* _g1h;
ysr@777 1504 CMBitMap* _bm;
ysr@777 1505 size_t _n_workers;
ysr@777 1506 size_t *_live_bytes;
ysr@777 1507 size_t *_used_bytes;
ysr@777 1508 BitMap* _region_bm;
ysr@777 1509 BitMap* _card_bm;
ysr@777 1510 public:
ysr@777 1511 G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
ysr@777 1512 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1513 AbstractGangTask("G1 final counting"), _g1h(g1h),
ysr@777 1514 _bm(bm), _region_bm(region_bm), _card_bm(card_bm)
ysr@777 1515 {
ysr@777 1516 if (ParallelGCThreads > 0)
ysr@777 1517 _n_workers = _g1h->workers()->total_workers();
ysr@777 1518 else
ysr@777 1519 _n_workers = 1;
ysr@777 1520 _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
ysr@777 1521 _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
ysr@777 1522 }
ysr@777 1523
ysr@777 1524 ~G1ParFinalCountTask() {
ysr@777 1525 FREE_C_HEAP_ARRAY(size_t, _live_bytes);
ysr@777 1526 FREE_C_HEAP_ARRAY(size_t, _used_bytes);
ysr@777 1527 }
ysr@777 1528
ysr@777 1529 void work(int i) {
ysr@777 1530 CalcLiveObjectsClosure calccl(true /*final*/,
ysr@777 1531 _bm, _g1h->concurrent_mark(),
tonyp@1371 1532 _region_bm, _card_bm);
ysr@777 1533 calccl.no_yield();
jmasa@2188 1534 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@790 1535 _g1h->heap_region_par_iterate_chunked(&calccl, i,
tonyp@790 1536 HeapRegion::FinalCountClaimValue);
ysr@777 1537 } else {
ysr@777 1538 _g1h->heap_region_iterate(&calccl);
ysr@777 1539 }
ysr@777 1540 assert(calccl.complete(), "Shouldn't have yielded!");
ysr@777 1541
tonyp@1458 1542 assert((size_t) i < _n_workers, "invariant");
ysr@777 1543 _live_bytes[i] = calccl.tot_live();
ysr@777 1544 _used_bytes[i] = calccl.tot_used();
ysr@777 1545 }
ysr@777 1546 size_t live_bytes() {
ysr@777 1547 size_t live_bytes = 0;
ysr@777 1548 for (size_t i = 0; i < _n_workers; ++i)
ysr@777 1549 live_bytes += _live_bytes[i];
ysr@777 1550 return live_bytes;
ysr@777 1551 }
ysr@777 1552 size_t used_bytes() {
ysr@777 1553 size_t used_bytes = 0;
ysr@777 1554 for (size_t i = 0; i < _n_workers; ++i)
ysr@777 1555 used_bytes += _used_bytes[i];
ysr@777 1556 return used_bytes;
ysr@777 1557 }
ysr@777 1558 };
ysr@777 1559
ysr@777 1560 class G1ParNoteEndTask;
ysr@777 1561
ysr@777 1562 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
ysr@777 1563 G1CollectedHeap* _g1;
ysr@777 1564 int _worker_num;
ysr@777 1565 size_t _max_live_bytes;
ysr@777 1566 size_t _regions_claimed;
ysr@777 1567 size_t _freed_bytes;
tonyp@2493 1568 FreeRegionList* _local_cleanup_list;
tonyp@2493 1569 HumongousRegionSet* _humongous_proxy_set;
tonyp@2493 1570 HRRSCleanupTask* _hrrs_cleanup_task;
ysr@777 1571 double _claimed_region_time;
ysr@777 1572 double _max_region_time;
ysr@777 1573
ysr@777 1574 public:
ysr@777 1575 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
tonyp@2493 1576 int worker_num,
tonyp@2493 1577 FreeRegionList* local_cleanup_list,
tonyp@2493 1578 HumongousRegionSet* humongous_proxy_set,
tonyp@2493 1579 HRRSCleanupTask* hrrs_cleanup_task);
ysr@777 1580 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1581
ysr@777 1582 bool doHeapRegion(HeapRegion *r);
ysr@777 1583
ysr@777 1584 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1585 size_t regions_claimed() { return _regions_claimed; }
ysr@777 1586 double claimed_region_time_sec() { return _claimed_region_time; }
ysr@777 1587 double max_region_time_sec() { return _max_region_time; }
ysr@777 1588 };
ysr@777 1589
ysr@777 1590 class G1ParNoteEndTask: public AbstractGangTask {
ysr@777 1591 friend class G1NoteEndOfConcMarkClosure;
tonyp@2472 1592
ysr@777 1593 protected:
ysr@777 1594 G1CollectedHeap* _g1h;
ysr@777 1595 size_t _max_live_bytes;
ysr@777 1596 size_t _freed_bytes;
tonyp@2472 1597 FreeRegionList* _cleanup_list;
tonyp@2472 1598
ysr@777 1599 public:
ysr@777 1600 G1ParNoteEndTask(G1CollectedHeap* g1h,
tonyp@2472 1601 FreeRegionList* cleanup_list) :
ysr@777 1602 AbstractGangTask("G1 note end"), _g1h(g1h),
tonyp@2472 1603 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
ysr@777 1604
ysr@777 1605 void work(int i) {
ysr@777 1606 double start = os::elapsedTime();
tonyp@2493 1607 FreeRegionList local_cleanup_list("Local Cleanup List");
tonyp@2493 1608 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
tonyp@2493 1609 HRRSCleanupTask hrrs_cleanup_task;
tonyp@2493 1610 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list,
tonyp@2493 1611 &humongous_proxy_set,
tonyp@2493 1612 &hrrs_cleanup_task);
jmasa@2188 1613 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@790 1614 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
tonyp@790 1615 HeapRegion::NoteEndClaimValue);
ysr@777 1616 } else {
ysr@777 1617 _g1h->heap_region_iterate(&g1_note_end);
ysr@777 1618 }
ysr@777 1619 assert(g1_note_end.complete(), "Shouldn't have yielded!");
ysr@777 1620
tonyp@2472 1621 // Now update the lists
tonyp@2472 1622 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
tonyp@2472 1623 NULL /* free_list */,
tonyp@2493 1624 &humongous_proxy_set,
tonyp@2472 1625 true /* par */);
ysr@777 1626 {
ysr@777 1627 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
ysr@777 1628 _max_live_bytes += g1_note_end.max_live_bytes();
ysr@777 1629 _freed_bytes += g1_note_end.freed_bytes();
tonyp@2472 1630
tonyp@2493 1631 _cleanup_list->add_as_tail(&local_cleanup_list);
tonyp@2493 1632 assert(local_cleanup_list.is_empty(), "post-condition");
tonyp@2493 1633
tonyp@2493 1634 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
ysr@777 1635 }
ysr@777 1636 double end = os::elapsedTime();
ysr@777 1637 if (G1PrintParCleanupStats) {
ysr@777 1638 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
ysr@777 1639 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n",
ysr@777 1640 i, start, end, (end-start)*1000.0,
ysr@777 1641 g1_note_end.regions_claimed(),
ysr@777 1642 g1_note_end.claimed_region_time_sec()*1000.0,
ysr@777 1643 g1_note_end.max_region_time_sec()*1000.0);
ysr@777 1644 }
ysr@777 1645 }
ysr@777 1646 size_t max_live_bytes() { return _max_live_bytes; }
ysr@777 1647 size_t freed_bytes() { return _freed_bytes; }
ysr@777 1648 };
ysr@777 1649
ysr@777 1650 class G1ParScrubRemSetTask: public AbstractGangTask {
ysr@777 1651 protected:
ysr@777 1652 G1RemSet* _g1rs;
ysr@777 1653 BitMap* _region_bm;
ysr@777 1654 BitMap* _card_bm;
ysr@777 1655 public:
ysr@777 1656 G1ParScrubRemSetTask(G1CollectedHeap* g1h,
ysr@777 1657 BitMap* region_bm, BitMap* card_bm) :
ysr@777 1658 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
ysr@777 1659 _region_bm(region_bm), _card_bm(card_bm)
ysr@777 1660 {}
ysr@777 1661
ysr@777 1662 void work(int i) {
jmasa@2188 1663 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@790 1664 _g1rs->scrub_par(_region_bm, _card_bm, i,
tonyp@790 1665 HeapRegion::ScrubRemSetClaimValue);
ysr@777 1666 } else {
ysr@777 1667 _g1rs->scrub(_region_bm, _card_bm);
ysr@777 1668 }
ysr@777 1669 }
ysr@777 1670
ysr@777 1671 };
ysr@777 1672
ysr@777 1673 G1NoteEndOfConcMarkClosure::
ysr@777 1674 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
tonyp@2493 1675 int worker_num,
tonyp@2493 1676 FreeRegionList* local_cleanup_list,
tonyp@2493 1677 HumongousRegionSet* humongous_proxy_set,
tonyp@2493 1678 HRRSCleanupTask* hrrs_cleanup_task)
ysr@777 1679 : _g1(g1), _worker_num(worker_num),
ysr@777 1680 _max_live_bytes(0), _regions_claimed(0),
tonyp@2472 1681 _freed_bytes(0),
ysr@777 1682 _claimed_region_time(0.0), _max_region_time(0.0),
tonyp@2493 1683 _local_cleanup_list(local_cleanup_list),
tonyp@2493 1684 _humongous_proxy_set(humongous_proxy_set),
tonyp@2493 1685 _hrrs_cleanup_task(hrrs_cleanup_task) { }
tonyp@2472 1686
tonyp@2472 1687 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
ysr@777 1688 // We use a claim value of zero here because all regions
ysr@777 1689 // were claimed with value 1 in the FinalCount task.
tonyp@2472 1690 hr->reset_gc_time_stamp();
tonyp@2472 1691 if (!hr->continuesHumongous()) {
ysr@777 1692 double start = os::elapsedTime();
ysr@777 1693 _regions_claimed++;
tonyp@2472 1694 hr->note_end_of_marking();
tonyp@2472 1695 _max_live_bytes += hr->max_live_bytes();
tonyp@2493 1696 _g1->free_region_if_empty(hr,
tonyp@2493 1697 &_freed_bytes,
tonyp@2493 1698 _local_cleanup_list,
tonyp@2493 1699 _humongous_proxy_set,
tonyp@2493 1700 _hrrs_cleanup_task,
tonyp@2493 1701 true /* par */);
ysr@777 1702 double region_time = (os::elapsedTime() - start);
ysr@777 1703 _claimed_region_time += region_time;
ysr@777 1704 if (region_time > _max_region_time) _max_region_time = region_time;
ysr@777 1705 }
ysr@777 1706 return false;
ysr@777 1707 }
ysr@777 1708
ysr@777 1709 void ConcurrentMark::cleanup() {
ysr@777 1710 // world is stopped at this checkpoint
ysr@777 1711 assert(SafepointSynchronize::is_at_safepoint(),
ysr@777 1712 "world should be stopped");
ysr@777 1713 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1714
ysr@777 1715 // If a full collection has happened, we shouldn't do this.
ysr@777 1716 if (has_aborted()) {
ysr@777 1717 g1h->set_marking_complete(); // So bitmap clearing isn't confused
ysr@777 1718 return;
ysr@777 1719 }
ysr@777 1720
tonyp@2472 1721 g1h->verify_region_sets_optional();
tonyp@2472 1722
ysr@1280 1723 if (VerifyDuringGC) {
ysr@1280 1724 HandleMark hm; // handle scope
ysr@1280 1725 gclog_or_tty->print(" VerifyDuringGC:(before)");
ysr@1280 1726 Universe::heap()->prepare_for_verify();
ysr@1280 1727 Universe::verify(/* allow dirty */ true,
ysr@1280 1728 /* silent */ false,
ysr@1280 1729 /* prev marking */ true);
ysr@1280 1730 }
ysr@1280 1731
ysr@777 1732 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
ysr@777 1733 g1p->record_concurrent_mark_cleanup_start();
ysr@777 1734
ysr@777 1735 double start = os::elapsedTime();
ysr@777 1736
tonyp@2493 1737 HeapRegionRemSet::reset_for_cleanup_tasks();
tonyp@2493 1738
ysr@777 1739 // Do counting once more with the world stopped for good measure.
ysr@777 1740 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
ysr@777 1741 &_region_bm, &_card_bm);
jmasa@2188 1742 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@790 1743 assert(g1h->check_heap_region_claim_values(
tonyp@790 1744 HeapRegion::InitialClaimValue),
tonyp@790 1745 "sanity check");
tonyp@790 1746
ysr@777 1747 int n_workers = g1h->workers()->total_workers();
ysr@777 1748 g1h->set_par_threads(n_workers);
ysr@777 1749 g1h->workers()->run_task(&g1_par_count_task);
ysr@777 1750 g1h->set_par_threads(0);
tonyp@790 1751
tonyp@790 1752 assert(g1h->check_heap_region_claim_values(
tonyp@790 1753 HeapRegion::FinalCountClaimValue),
tonyp@790 1754 "sanity check");
ysr@777 1755 } else {
ysr@777 1756 g1_par_count_task.work(0);
ysr@777 1757 }
ysr@777 1758
ysr@777 1759 size_t known_garbage_bytes =
ysr@777 1760 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
ysr@777 1761 #if 0
ysr@777 1762 gclog_or_tty->print_cr("used %1.2lf, live %1.2lf, garbage %1.2lf",
ysr@777 1763 (double) g1_par_count_task.used_bytes() / (double) (1024 * 1024),
ysr@777 1764 (double) g1_par_count_task.live_bytes() / (double) (1024 * 1024),
ysr@777 1765 (double) known_garbage_bytes / (double) (1024 * 1024));
ysr@777 1766 #endif // 0
ysr@777 1767 g1p->set_known_garbage_bytes(known_garbage_bytes);
ysr@777 1768
ysr@777 1769 size_t start_used_bytes = g1h->used();
ysr@777 1770 _at_least_one_mark_complete = true;
ysr@777 1771 g1h->set_marking_complete();
ysr@777 1772
ysr@777 1773 double count_end = os::elapsedTime();
ysr@777 1774 double this_final_counting_time = (count_end - start);
ysr@777 1775 if (G1PrintParCleanupStats) {
ysr@777 1776 gclog_or_tty->print_cr("Cleanup:");
ysr@777 1777 gclog_or_tty->print_cr(" Finalize counting: %8.3f ms",
ysr@777 1778 this_final_counting_time*1000.0);
ysr@777 1779 }
ysr@777 1780 _total_counting_time += this_final_counting_time;
ysr@777 1781
tonyp@2717 1782 if (G1PrintRegionLivenessInfo) {
tonyp@2717 1783 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
tonyp@2717 1784 _g1h->heap_region_iterate(&cl);
tonyp@2717 1785 }
tonyp@2717 1786
ysr@777 1787 // Install newly created mark bitMap as "prev".
ysr@777 1788 swapMarkBitMaps();
ysr@777 1789
ysr@777 1790 g1h->reset_gc_time_stamp();
ysr@777 1791
ysr@777 1792 // Note end of marking in all heap regions.
ysr@777 1793 double note_end_start = os::elapsedTime();
tonyp@2472 1794 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
jmasa@2188 1795 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1796 int n_workers = g1h->workers()->total_workers();
ysr@777 1797 g1h->set_par_threads(n_workers);
ysr@777 1798 g1h->workers()->run_task(&g1_par_note_end_task);
ysr@777 1799 g1h->set_par_threads(0);
tonyp@790 1800
tonyp@790 1801 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
tonyp@790 1802 "sanity check");
ysr@777 1803 } else {
ysr@777 1804 g1_par_note_end_task.work(0);
ysr@777 1805 }
tonyp@2472 1806
tonyp@2472 1807 if (!cleanup_list_is_empty()) {
tonyp@2472 1808 // The cleanup list is not empty, so we'll have to process it
tonyp@2472 1809 // concurrently. Notify anyone else that might be wanting free
tonyp@2472 1810 // regions that there will be more free regions coming soon.
tonyp@2472 1811 g1h->set_free_regions_coming();
tonyp@2472 1812 }
ysr@777 1813 double note_end_end = os::elapsedTime();
ysr@777 1814 if (G1PrintParCleanupStats) {
ysr@777 1815 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
ysr@777 1816 (note_end_end - note_end_start)*1000.0);
ysr@777 1817 }
ysr@777 1818
tonyp@790 1819
ysr@777 1820 // call below, since it affects the metric by which we sort the heap
ysr@777 1821 // regions.
ysr@777 1822 if (G1ScrubRemSets) {
ysr@777 1823 double rs_scrub_start = os::elapsedTime();
ysr@777 1824 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
jmasa@2188 1825 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1826 int n_workers = g1h->workers()->total_workers();
ysr@777 1827 g1h->set_par_threads(n_workers);
ysr@777 1828 g1h->workers()->run_task(&g1_par_scrub_rs_task);
ysr@777 1829 g1h->set_par_threads(0);
tonyp@790 1830
tonyp@790 1831 assert(g1h->check_heap_region_claim_values(
tonyp@790 1832 HeapRegion::ScrubRemSetClaimValue),
tonyp@790 1833 "sanity check");
ysr@777 1834 } else {
ysr@777 1835 g1_par_scrub_rs_task.work(0);
ysr@777 1836 }
ysr@777 1837
ysr@777 1838 double rs_scrub_end = os::elapsedTime();
ysr@777 1839 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
ysr@777 1840 _total_rs_scrub_time += this_rs_scrub_time;
ysr@777 1841 }
ysr@777 1842
ysr@777 1843 // this will also free any regions totally full of garbage objects,
ysr@777 1844 // and sort the regions.
ysr@777 1845 g1h->g1_policy()->record_concurrent_mark_cleanup_end(
ysr@777 1846 g1_par_note_end_task.freed_bytes(),
ysr@777 1847 g1_par_note_end_task.max_live_bytes());
ysr@777 1848
ysr@777 1849 // Statistics.
ysr@777 1850 double end = os::elapsedTime();
ysr@777 1851 _cleanup_times.add((end - start) * 1000.0);
ysr@777 1852
ysr@777 1853 // G1CollectedHeap::heap()->print();
ysr@777 1854 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
ysr@777 1855 // G1CollectedHeap::heap()->get_gc_time_stamp());
ysr@777 1856
ysr@777 1857 if (PrintGC || PrintGCDetails) {
ysr@777 1858 g1h->print_size_transition(gclog_or_tty,
ysr@777 1859 start_used_bytes,
ysr@777 1860 g1h->used(),
ysr@777 1861 g1h->capacity());
ysr@777 1862 }
ysr@777 1863
ysr@777 1864 size_t cleaned_up_bytes = start_used_bytes - g1h->used();
ysr@777 1865 g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
ysr@777 1866
ysr@777 1867 // We need to make this be a "collection" so any collection pause that
ysr@777 1868 // races with it goes around and waits for completeCleanup to finish.
ysr@777 1869 g1h->increment_total_collections();
ysr@777 1870
johnc@1186 1871 if (VerifyDuringGC) {
ysr@1280 1872 HandleMark hm; // handle scope
ysr@1280 1873 gclog_or_tty->print(" VerifyDuringGC:(after)");
ysr@1280 1874 Universe::heap()->prepare_for_verify();
ysr@1280 1875 Universe::verify(/* allow dirty */ true,
ysr@1280 1876 /* silent */ false,
ysr@1280 1877 /* prev marking */ true);
ysr@777 1878 }
tonyp@2472 1879
tonyp@2472 1880 g1h->verify_region_sets_optional();
ysr@777 1881 }
ysr@777 1882
ysr@777 1883 void ConcurrentMark::completeCleanup() {
ysr@777 1884 if (has_aborted()) return;
ysr@777 1885
tonyp@2472 1886 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2472 1887
tonyp@2472 1888 _cleanup_list.verify_optional();
tonyp@2643 1889 FreeRegionList tmp_free_list("Tmp Free List");
tonyp@2472 1890
tonyp@2472 1891 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 1892 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@2472 1893 "cleanup list has "SIZE_FORMAT" entries",
tonyp@2472 1894 _cleanup_list.length());
tonyp@2472 1895 }
tonyp@2472 1896
tonyp@2472 1897 // Noone else should be accessing the _cleanup_list at this point,
tonyp@2472 1898 // so it's not necessary to take any locks
tonyp@2472 1899 while (!_cleanup_list.is_empty()) {
tonyp@2472 1900 HeapRegion* hr = _cleanup_list.remove_head();
tonyp@2472 1901 assert(hr != NULL, "the list was not empty");
tonyp@2472 1902 hr->rem_set()->clear();
tonyp@2643 1903 tmp_free_list.add_as_tail(hr);
tonyp@2472 1904
tonyp@2472 1905 // Instead of adding one region at a time to the secondary_free_list,
tonyp@2472 1906 // we accumulate them in the local list and move them a few at a
tonyp@2472 1907 // time. This also cuts down on the number of notify_all() calls
tonyp@2472 1908 // we do during this process. We'll also append the local list when
tonyp@2472 1909 // _cleanup_list is empty (which means we just removed the last
tonyp@2472 1910 // region from the _cleanup_list).
tonyp@2643 1911 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
tonyp@2472 1912 _cleanup_list.is_empty()) {
tonyp@2472 1913 if (G1ConcRegionFreeingVerbose) {
tonyp@2472 1914 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
tonyp@2472 1915 "appending "SIZE_FORMAT" entries to the "
tonyp@2472 1916 "secondary_free_list, clean list still has "
tonyp@2472 1917 SIZE_FORMAT" entries",
tonyp@2643 1918 tmp_free_list.length(),
tonyp@2472 1919 _cleanup_list.length());
ysr@777 1920 }
tonyp@2472 1921
tonyp@2472 1922 {
tonyp@2472 1923 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@2643 1924 g1h->secondary_free_list_add_as_tail(&tmp_free_list);
tonyp@2472 1925 SecondaryFreeList_lock->notify_all();
tonyp@2472 1926 }
tonyp@2472 1927
tonyp@2472 1928 if (G1StressConcRegionFreeing) {
tonyp@2472 1929 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
tonyp@2472 1930 os::sleep(Thread::current(), (jlong) 1, false);
tonyp@2472 1931 }
tonyp@2472 1932 }
ysr@777 1933 }
ysr@777 1934 }
tonyp@2643 1935 assert(tmp_free_list.is_empty(), "post-condition");
ysr@777 1936 }
ysr@777 1937
johnc@2494 1938 // Support closures for reference procssing in G1
johnc@2494 1939
johnc@2379 1940 bool G1CMIsAliveClosure::do_object_b(oop obj) {
johnc@2379 1941 HeapWord* addr = (HeapWord*)obj;
johnc@2379 1942 return addr != NULL &&
johnc@2379 1943 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
johnc@2379 1944 }
ysr@777 1945
ysr@777 1946 class G1CMKeepAliveClosure: public OopClosure {
ysr@777 1947 G1CollectedHeap* _g1;
ysr@777 1948 ConcurrentMark* _cm;
ysr@777 1949 CMBitMap* _bitMap;
ysr@777 1950 public:
ysr@777 1951 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
ysr@777 1952 CMBitMap* bitMap) :
ysr@777 1953 _g1(g1), _cm(cm),
ysr@777 1954 _bitMap(bitMap) {}
ysr@777 1955
ysr@1280 1956 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 1957 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 1958
ysr@1280 1959 template <class T> void do_oop_work(T* p) {
johnc@2494 1960 oop obj = oopDesc::load_decode_heap_oop(p);
johnc@2494 1961 HeapWord* addr = (HeapWord*)obj;
johnc@2494 1962
johnc@2494 1963 if (_cm->verbose_high())
johnc@2494 1964 gclog_or_tty->print_cr("\t[0] we're looking at location "
johnc@2494 1965 "*"PTR_FORMAT" = "PTR_FORMAT,
johnc@2494 1966 p, (void*) obj);
johnc@2494 1967
johnc@2494 1968 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
ysr@777 1969 _bitMap->mark(addr);
johnc@2494 1970 _cm->mark_stack_push(obj);
ysr@777 1971 }
ysr@777 1972 }
ysr@777 1973 };
ysr@777 1974
ysr@777 1975 class G1CMDrainMarkingStackClosure: public VoidClosure {
ysr@777 1976 CMMarkStack* _markStack;
ysr@777 1977 CMBitMap* _bitMap;
ysr@777 1978 G1CMKeepAliveClosure* _oopClosure;
ysr@777 1979 public:
ysr@777 1980 G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack,
ysr@777 1981 G1CMKeepAliveClosure* oopClosure) :
ysr@777 1982 _bitMap(bitMap),
ysr@777 1983 _markStack(markStack),
ysr@777 1984 _oopClosure(oopClosure)
ysr@777 1985 {}
ysr@777 1986
ysr@777 1987 void do_void() {
ysr@777 1988 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
ysr@777 1989 }
ysr@777 1990 };
ysr@777 1991
johnc@2494 1992 // 'Keep Alive' closure used by parallel reference processing.
johnc@2494 1993 // An instance of this closure is used in the parallel reference processing
johnc@2494 1994 // code rather than an instance of G1CMKeepAliveClosure. We could have used
johnc@2494 1995 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
johnc@2494 1996 // placed on to discovered ref lists once so we can mark and push with no
johnc@2494 1997 // need to check whether the object has already been marked. Using the
johnc@2494 1998 // G1CMKeepAliveClosure would mean, however, having all the worker threads
johnc@2494 1999 // operating on the global mark stack. This means that an individual
johnc@2494 2000 // worker would be doing lock-free pushes while it processes its own
johnc@2494 2001 // discovered ref list followed by drain call. If the discovered ref lists
johnc@2494 2002 // are unbalanced then this could cause interference with the other
johnc@2494 2003 // workers. Using a CMTask (and its embedded local data structures)
johnc@2494 2004 // avoids that potential interference.
johnc@2494 2005 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
johnc@2494 2006 ConcurrentMark* _cm;
johnc@2494 2007 CMTask* _task;
johnc@2494 2008 CMBitMap* _bitMap;
johnc@2494 2009 int _ref_counter_limit;
johnc@2494 2010 int _ref_counter;
johnc@2494 2011 public:
johnc@2494 2012 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
johnc@2494 2013 CMTask* task,
johnc@2494 2014 CMBitMap* bitMap) :
johnc@2494 2015 _cm(cm), _task(task), _bitMap(bitMap),
johnc@2494 2016 _ref_counter_limit(G1RefProcDrainInterval)
johnc@2494 2017 {
johnc@2494 2018 assert(_ref_counter_limit > 0, "sanity");
johnc@2494 2019 _ref_counter = _ref_counter_limit;
johnc@2494 2020 }
johnc@2494 2021
johnc@2494 2022 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
johnc@2494 2023 virtual void do_oop( oop* p) { do_oop_work(p); }
johnc@2494 2024
johnc@2494 2025 template <class T> void do_oop_work(T* p) {
johnc@2494 2026 if (!_cm->has_overflown()) {
johnc@2494 2027 oop obj = oopDesc::load_decode_heap_oop(p);
johnc@2494 2028 if (_cm->verbose_high())
johnc@2494 2029 gclog_or_tty->print_cr("\t[%d] we're looking at location "
johnc@2494 2030 "*"PTR_FORMAT" = "PTR_FORMAT,
johnc@2494 2031 _task->task_id(), p, (void*) obj);
johnc@2494 2032
johnc@2494 2033 _task->deal_with_reference(obj);
johnc@2494 2034 _ref_counter--;
johnc@2494 2035
johnc@2494 2036 if (_ref_counter == 0) {
johnc@2494 2037 // We have dealt with _ref_counter_limit references, pushing them and objects
johnc@2494 2038 // reachable from them on to the local stack (and possibly the global stack).
johnc@2494 2039 // Call do_marking_step() to process these entries. We call the routine in a
johnc@2494 2040 // loop, which we'll exit if there's nothing more to do (i.e. we're done
johnc@2494 2041 // with the entries that we've pushed as a result of the deal_with_reference
johnc@2494 2042 // calls above) or we overflow.
johnc@2494 2043 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
johnc@2494 2044 // while there may still be some work to do. (See the comment at the
johnc@2494 2045 // beginning of CMTask::do_marking_step() for those conditions - one of which
johnc@2494 2046 // is reaching the specified time target.) It is only when
johnc@2494 2047 // CMTask::do_marking_step() returns without setting the has_aborted() flag
johnc@2494 2048 // that the marking has completed.
johnc@2494 2049 do {
johnc@2494 2050 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
johnc@2494 2051 _task->do_marking_step(mark_step_duration_ms,
johnc@2494 2052 false /* do_stealing */,
johnc@2494 2053 false /* do_termination */);
johnc@2494 2054 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2055 _ref_counter = _ref_counter_limit;
johnc@2494 2056 }
johnc@2494 2057 } else {
johnc@2494 2058 if (_cm->verbose_high())
johnc@2494 2059 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
johnc@2494 2060 }
johnc@2494 2061 }
johnc@2494 2062 };
johnc@2494 2063
johnc@2494 2064 class G1CMParDrainMarkingStackClosure: public VoidClosure {
johnc@2494 2065 ConcurrentMark* _cm;
johnc@2494 2066 CMTask* _task;
johnc@2494 2067 public:
johnc@2494 2068 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
johnc@2494 2069 _cm(cm), _task(task)
johnc@2494 2070 {}
johnc@2494 2071
johnc@2494 2072 void do_void() {
johnc@2494 2073 do {
johnc@2494 2074 if (_cm->verbose_high())
johnc@2494 2075 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id());
johnc@2494 2076
johnc@2494 2077 // We call CMTask::do_marking_step() to completely drain the local and
johnc@2494 2078 // global marking stacks. The routine is called in a loop, which we'll
johnc@2494 2079 // exit if there's nothing more to do (i.e. we'completely drained the
johnc@2494 2080 // entries that were pushed as a result of applying the
johnc@2494 2081 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
johnc@2494 2082 // lists above) or we overflow the global marking stack.
johnc@2494 2083 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
johnc@2494 2084 // while there may still be some work to do. (See the comment at the
johnc@2494 2085 // beginning of CMTask::do_marking_step() for those conditions - one of which
johnc@2494 2086 // is reaching the specified time target.) It is only when
johnc@2494 2087 // CMTask::do_marking_step() returns without setting the has_aborted() flag
johnc@2494 2088 // that the marking has completed.
johnc@2494 2089
johnc@2494 2090 _task->do_marking_step(1000000000.0 /* something very large */,
johnc@2494 2091 true /* do_stealing */,
johnc@2494 2092 true /* do_termination */);
johnc@2494 2093 } while (_task->has_aborted() && !_cm->has_overflown());
johnc@2494 2094 }
johnc@2494 2095 };
johnc@2494 2096
johnc@2494 2097 // Implementation of AbstractRefProcTaskExecutor for G1
johnc@2494 2098 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
johnc@2494 2099 private:
johnc@2494 2100 G1CollectedHeap* _g1h;
johnc@2494 2101 ConcurrentMark* _cm;
johnc@2494 2102 CMBitMap* _bitmap;
johnc@2494 2103 WorkGang* _workers;
johnc@2494 2104 int _active_workers;
johnc@2494 2105
johnc@2494 2106 public:
johnc@2494 2107 G1RefProcTaskExecutor(G1CollectedHeap* g1h,
johnc@2494 2108 ConcurrentMark* cm,
johnc@2494 2109 CMBitMap* bitmap,
johnc@2494 2110 WorkGang* workers,
johnc@2494 2111 int n_workers) :
johnc@2494 2112 _g1h(g1h), _cm(cm), _bitmap(bitmap),
johnc@2494 2113 _workers(workers), _active_workers(n_workers)
johnc@2494 2114 { }
johnc@2494 2115
johnc@2494 2116 // Executes the given task using concurrent marking worker threads.
johnc@2494 2117 virtual void execute(ProcessTask& task);
johnc@2494 2118 virtual void execute(EnqueueTask& task);
johnc@2494 2119 };
johnc@2494 2120
johnc@2494 2121 class G1RefProcTaskProxy: public AbstractGangTask {
johnc@2494 2122 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
johnc@2494 2123 ProcessTask& _proc_task;
johnc@2494 2124 G1CollectedHeap* _g1h;
johnc@2494 2125 ConcurrentMark* _cm;
johnc@2494 2126 CMBitMap* _bitmap;
johnc@2494 2127
johnc@2494 2128 public:
johnc@2494 2129 G1RefProcTaskProxy(ProcessTask& proc_task,
johnc@2494 2130 G1CollectedHeap* g1h,
johnc@2494 2131 ConcurrentMark* cm,
johnc@2494 2132 CMBitMap* bitmap) :
johnc@2494 2133 AbstractGangTask("Process reference objects in parallel"),
johnc@2494 2134 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
johnc@2494 2135 {}
johnc@2494 2136
johnc@2494 2137 virtual void work(int i) {
johnc@2494 2138 CMTask* marking_task = _cm->task(i);
johnc@2494 2139 G1CMIsAliveClosure g1_is_alive(_g1h);
johnc@2494 2140 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
johnc@2494 2141 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
johnc@2494 2142
johnc@2494 2143 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
johnc@2494 2144 }
johnc@2494 2145 };
johnc@2494 2146
johnc@2494 2147 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
johnc@2494 2148 assert(_workers != NULL, "Need parallel worker threads.");
johnc@2494 2149
johnc@2494 2150 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
johnc@2494 2151
johnc@2494 2152 // We need to reset the phase for each task execution so that
johnc@2494 2153 // the termination protocol of CMTask::do_marking_step works.
johnc@2494 2154 _cm->set_phase(_active_workers, false /* concurrent */);
johnc@2494 2155 _g1h->set_par_threads(_active_workers);
johnc@2494 2156 _workers->run_task(&proc_task_proxy);
johnc@2494 2157 _g1h->set_par_threads(0);
johnc@2494 2158 }
johnc@2494 2159
johnc@2494 2160 class G1RefEnqueueTaskProxy: public AbstractGangTask {
johnc@2494 2161 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
johnc@2494 2162 EnqueueTask& _enq_task;
johnc@2494 2163
johnc@2494 2164 public:
johnc@2494 2165 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
johnc@2494 2166 AbstractGangTask("Enqueue reference objects in parallel"),
johnc@2494 2167 _enq_task(enq_task)
johnc@2494 2168 { }
johnc@2494 2169
johnc@2494 2170 virtual void work(int i) {
johnc@2494 2171 _enq_task.work(i);
johnc@2494 2172 }
johnc@2494 2173 };
johnc@2494 2174
johnc@2494 2175 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
johnc@2494 2176 assert(_workers != NULL, "Need parallel worker threads.");
johnc@2494 2177
johnc@2494 2178 G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
johnc@2494 2179
johnc@2494 2180 _g1h->set_par_threads(_active_workers);
johnc@2494 2181 _workers->run_task(&enq_task_proxy);
johnc@2494 2182 _g1h->set_par_threads(0);
johnc@2494 2183 }
johnc@2494 2184
ysr@777 2185 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
ysr@777 2186 ResourceMark rm;
ysr@777 2187 HandleMark hm;
ysr@888 2188 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@888 2189 ReferenceProcessor* rp = g1h->ref_processor();
ysr@777 2190
johnc@2316 2191 // See the comment in G1CollectedHeap::ref_processing_init()
johnc@2316 2192 // about how reference processing currently works in G1.
johnc@2316 2193
ysr@777 2194 // Process weak references.
ysr@892 2195 rp->setup_policy(clear_all_soft_refs);
ysr@777 2196 assert(_markStack.isEmpty(), "mark stack should be empty");
ysr@777 2197
johnc@2379 2198 G1CMIsAliveClosure g1_is_alive(g1h);
johnc@2379 2199 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
ysr@777 2200 G1CMDrainMarkingStackClosure
johnc@2379 2201 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
johnc@2494 2202 // We use the work gang from the G1CollectedHeap and we utilize all
johnc@2494 2203 // the worker threads.
ysr@2651 2204 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
ysr@2651 2205 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
johnc@2494 2206
johnc@2494 2207 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
johnc@2494 2208 g1h->workers(), active_workers);
johnc@2494 2209
ysr@2651 2210
johnc@2494 2211 if (rp->processing_is_mt()) {
johnc@2494 2212 // Set the degree of MT here. If the discovery is done MT, there
johnc@2494 2213 // may have been a different number of threads doing the discovery
johnc@2494 2214 // and a different number of discovered lists may have Ref objects.
johnc@2494 2215 // That is OK as long as the Reference lists are balanced (see
johnc@2494 2216 // balance_all_queues() and balance_queues()).
ysr@2651 2217 rp->set_active_mt_degree(active_workers);
johnc@2494 2218
johnc@2494 2219 rp->process_discovered_references(&g1_is_alive,
johnc@2494 2220 &g1_keep_alive,
johnc@2494 2221 &g1_drain_mark_stack,
johnc@2494 2222 &par_task_executor);
johnc@2494 2223
johnc@2494 2224 // The work routines of the parallel keep_alive and drain_marking_stack
johnc@2494 2225 // will set the has_overflown flag if we overflow the global marking
johnc@2494 2226 // stack.
johnc@2494 2227 } else {
johnc@2494 2228 rp->process_discovered_references(&g1_is_alive,
johnc@2494 2229 &g1_keep_alive,
johnc@2494 2230 &g1_drain_mark_stack,
johnc@2494 2231 NULL);
johnc@2494 2232
johnc@2494 2233 }
johnc@2494 2234
ysr@777 2235 assert(_markStack.overflow() || _markStack.isEmpty(),
johnc@2494 2236 "mark stack should be empty (unless it overflowed)");
ysr@777 2237 if (_markStack.overflow()) {
johnc@2494 2238 // Should have been done already when we tried to push an
johnc@2494 2239 // entry on to the global mark stack. But let's do it again.
ysr@777 2240 set_has_overflown();
ysr@777 2241 }
ysr@777 2242
johnc@2494 2243 if (rp->processing_is_mt()) {
johnc@2494 2244 assert(rp->num_q() == active_workers, "why not");
johnc@2494 2245 rp->enqueue_discovered_references(&par_task_executor);
johnc@2494 2246 } else {
johnc@2494 2247 rp->enqueue_discovered_references();
johnc@2494 2248 }
johnc@2494 2249
ysr@777 2250 rp->verify_no_references_recorded();
ysr@777 2251 assert(!rp->discovery_enabled(), "should have been disabled");
ysr@777 2252
coleenp@2497 2253 // Now clean up stale oops in StringTable
johnc@2379 2254 StringTable::unlink(&g1_is_alive);
coleenp@2497 2255 // Clean up unreferenced symbols in symbol table.
coleenp@2497 2256 SymbolTable::unlink();
ysr@777 2257 }
ysr@777 2258
ysr@777 2259 void ConcurrentMark::swapMarkBitMaps() {
ysr@777 2260 CMBitMapRO* temp = _prevMarkBitMap;
ysr@777 2261 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
ysr@777 2262 _nextMarkBitMap = (CMBitMap*) temp;
ysr@777 2263 }
ysr@777 2264
ysr@777 2265 class CMRemarkTask: public AbstractGangTask {
ysr@777 2266 private:
ysr@777 2267 ConcurrentMark *_cm;
ysr@777 2268
ysr@777 2269 public:
ysr@777 2270 void work(int worker_i) {
ysr@777 2271 // Since all available tasks are actually started, we should
ysr@777 2272 // only proceed if we're supposed to be actived.
ysr@777 2273 if ((size_t)worker_i < _cm->active_tasks()) {
ysr@777 2274 CMTask* task = _cm->task(worker_i);
ysr@777 2275 task->record_start_time();
ysr@777 2276 do {
johnc@2494 2277 task->do_marking_step(1000000000.0 /* something very large */,
johnc@2494 2278 true /* do_stealing */,
johnc@2494 2279 true /* do_termination */);
ysr@777 2280 } while (task->has_aborted() && !_cm->has_overflown());
ysr@777 2281 // If we overflow, then we do not want to restart. We instead
ysr@777 2282 // want to abort remark and do concurrent marking again.
ysr@777 2283 task->record_end_time();
ysr@777 2284 }
ysr@777 2285 }
ysr@777 2286
ysr@777 2287 CMRemarkTask(ConcurrentMark* cm) :
ysr@777 2288 AbstractGangTask("Par Remark"), _cm(cm) { }
ysr@777 2289 };
ysr@777 2290
ysr@777 2291 void ConcurrentMark::checkpointRootsFinalWork() {
ysr@777 2292 ResourceMark rm;
ysr@777 2293 HandleMark hm;
ysr@777 2294 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 2295
ysr@777 2296 g1h->ensure_parsability(false);
ysr@777 2297
jmasa@2188 2298 if (G1CollectedHeap::use_parallel_gc_threads()) {
jrose@1424 2299 G1CollectedHeap::StrongRootsScope srs(g1h);
ysr@777 2300 // this is remark, so we'll use up all available threads
ysr@777 2301 int active_workers = ParallelGCThreads;
johnc@2494 2302 set_phase(active_workers, false /* concurrent */);
ysr@777 2303
ysr@777 2304 CMRemarkTask remarkTask(this);
ysr@777 2305 // We will start all available threads, even if we decide that the
ysr@777 2306 // active_workers will be fewer. The extra ones will just bail out
ysr@777 2307 // immediately.
ysr@777 2308 int n_workers = g1h->workers()->total_workers();
ysr@777 2309 g1h->set_par_threads(n_workers);
ysr@777 2310 g1h->workers()->run_task(&remarkTask);
ysr@777 2311 g1h->set_par_threads(0);
ysr@777 2312 } else {
jrose@1424 2313 G1CollectedHeap::StrongRootsScope srs(g1h);
ysr@777 2314 // this is remark, so we'll use up all available threads
ysr@777 2315 int active_workers = 1;
johnc@2494 2316 set_phase(active_workers, false /* concurrent */);
ysr@777 2317
ysr@777 2318 CMRemarkTask remarkTask(this);
ysr@777 2319 // We will start all available threads, even if we decide that the
ysr@777 2320 // active_workers will be fewer. The extra ones will just bail out
ysr@777 2321 // immediately.
ysr@777 2322 remarkTask.work(0);
ysr@777 2323 }
tonyp@1458 2324 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
tonyp@1458 2325 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 2326
ysr@777 2327 print_stats();
ysr@777 2328
ysr@777 2329 #if VERIFY_OBJS_PROCESSED
ysr@777 2330 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
ysr@777 2331 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
ysr@777 2332 _scan_obj_cl.objs_processed,
ysr@777 2333 ThreadLocalObjQueue::objs_enqueued);
ysr@777 2334 guarantee(_scan_obj_cl.objs_processed ==
ysr@777 2335 ThreadLocalObjQueue::objs_enqueued,
ysr@777 2336 "Different number of objs processed and enqueued.");
ysr@777 2337 }
ysr@777 2338 #endif
ysr@777 2339 }
ysr@777 2340
tonyp@1479 2341 #ifndef PRODUCT
tonyp@1479 2342
tonyp@1823 2343 class PrintReachableOopClosure: public OopClosure {
ysr@777 2344 private:
ysr@777 2345 G1CollectedHeap* _g1h;
ysr@777 2346 CMBitMapRO* _bitmap;
ysr@777 2347 outputStream* _out;
tonyp@1479 2348 bool _use_prev_marking;
tonyp@1823 2349 bool _all;
ysr@777 2350
ysr@777 2351 public:
tonyp@1823 2352 PrintReachableOopClosure(CMBitMapRO* bitmap,
tonyp@1823 2353 outputStream* out,
tonyp@1823 2354 bool use_prev_marking,
tonyp@1823 2355 bool all) :
tonyp@1479 2356 _g1h(G1CollectedHeap::heap()),
tonyp@1823 2357 _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
ysr@777 2358
ysr@1280 2359 void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 2360 void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 2361
ysr@1280 2362 template <class T> void do_oop_work(T* p) {
ysr@1280 2363 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 2364 const char* str = NULL;
ysr@777 2365 const char* str2 = "";
ysr@777 2366
tonyp@1823 2367 if (obj == NULL) {
tonyp@1823 2368 str = "";
tonyp@1823 2369 } else if (!_g1h->is_in_g1_reserved(obj)) {
tonyp@1823 2370 str = " O";
tonyp@1823 2371 } else {
ysr@777 2372 HeapRegion* hr = _g1h->heap_region_containing(obj);
tonyp@1458 2373 guarantee(hr != NULL, "invariant");
tonyp@1479 2374 bool over_tams = false;
tonyp@1479 2375 if (_use_prev_marking) {
tonyp@1479 2376 over_tams = hr->obj_allocated_since_prev_marking(obj);
tonyp@1479 2377 } else {
tonyp@1479 2378 over_tams = hr->obj_allocated_since_next_marking(obj);
tonyp@1479 2379 }
tonyp@1823 2380 bool marked = _bitmap->isMarked((HeapWord*) obj);
tonyp@1479 2381
tonyp@1479 2382 if (over_tams) {
tonyp@1823 2383 str = " >";
tonyp@1823 2384 if (marked) {
ysr@777 2385 str2 = " AND MARKED";
tonyp@1479 2386 }
tonyp@1823 2387 } else if (marked) {
tonyp@1823 2388 str = " M";
tonyp@1479 2389 } else {
tonyp@1823 2390 str = " NOT";
tonyp@1479 2391 }
ysr@777 2392 }
ysr@777 2393
tonyp@1823 2394 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s",
ysr@777 2395 p, (void*) obj, str, str2);
ysr@777 2396 }
ysr@777 2397 };
ysr@777 2398
tonyp@1823 2399 class PrintReachableObjectClosure : public ObjectClosure {
ysr@777 2400 private:
tonyp@1479 2401 CMBitMapRO* _bitmap;
ysr@777 2402 outputStream* _out;
tonyp@1479 2403 bool _use_prev_marking;
tonyp@1823 2404 bool _all;
tonyp@1823 2405 HeapRegion* _hr;
ysr@777 2406
ysr@777 2407 public:
tonyp@1823 2408 PrintReachableObjectClosure(CMBitMapRO* bitmap,
tonyp@1823 2409 outputStream* out,
tonyp@1823 2410 bool use_prev_marking,
tonyp@1823 2411 bool all,
tonyp@1823 2412 HeapRegion* hr) :
tonyp@1823 2413 _bitmap(bitmap), _out(out),
tonyp@1823 2414 _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
tonyp@1823 2415
tonyp@1823 2416 void do_object(oop o) {
tonyp@1823 2417 bool over_tams;
tonyp@1823 2418 if (_use_prev_marking) {
tonyp@1823 2419 over_tams = _hr->obj_allocated_since_prev_marking(o);
tonyp@1823 2420 } else {
tonyp@1823 2421 over_tams = _hr->obj_allocated_since_next_marking(o);
tonyp@1823 2422 }
tonyp@1823 2423 bool marked = _bitmap->isMarked((HeapWord*) o);
tonyp@1823 2424 bool print_it = _all || over_tams || marked;
tonyp@1823 2425
tonyp@1823 2426 if (print_it) {
tonyp@1823 2427 _out->print_cr(" "PTR_FORMAT"%s",
tonyp@1823 2428 o, (over_tams) ? " >" : (marked) ? " M" : "");
tonyp@1823 2429 PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
tonyp@1823 2430 o->oop_iterate(&oopCl);
tonyp@1823 2431 }
ysr@777 2432 }
ysr@777 2433 };
ysr@777 2434
tonyp@1823 2435 class PrintReachableRegionClosure : public HeapRegionClosure {
ysr@777 2436 private:
tonyp@1479 2437 CMBitMapRO* _bitmap;
ysr@777 2438 outputStream* _out;
tonyp@1479 2439 bool _use_prev_marking;
tonyp@1823 2440 bool _all;
ysr@777 2441
ysr@777 2442 public:
ysr@777 2443 bool doHeapRegion(HeapRegion* hr) {
ysr@777 2444 HeapWord* b = hr->bottom();
ysr@777 2445 HeapWord* e = hr->end();
ysr@777 2446 HeapWord* t = hr->top();
tonyp@1479 2447 HeapWord* p = NULL;
tonyp@1479 2448 if (_use_prev_marking) {
tonyp@1479 2449 p = hr->prev_top_at_mark_start();
tonyp@1479 2450 } else {
tonyp@1479 2451 p = hr->next_top_at_mark_start();
tonyp@1479 2452 }
ysr@777 2453 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
tonyp@1479 2454 "TAMS: "PTR_FORMAT, b, e, t, p);
tonyp@1823 2455 _out->cr();
tonyp@1823 2456
tonyp@1823 2457 HeapWord* from = b;
tonyp@1823 2458 HeapWord* to = t;
tonyp@1823 2459
tonyp@1823 2460 if (to > from) {
tonyp@1823 2461 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
tonyp@1823 2462 _out->cr();
tonyp@1823 2463 PrintReachableObjectClosure ocl(_bitmap, _out,
tonyp@1823 2464 _use_prev_marking, _all, hr);
tonyp@1823 2465 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
tonyp@1823 2466 _out->cr();
tonyp@1823 2467 }
ysr@777 2468
ysr@777 2469 return false;
ysr@777 2470 }
ysr@777 2471
tonyp@1823 2472 PrintReachableRegionClosure(CMBitMapRO* bitmap,
tonyp@1823 2473 outputStream* out,
tonyp@1823 2474 bool use_prev_marking,
tonyp@1823 2475 bool all) :
tonyp@1823 2476 _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
ysr@777 2477 };
ysr@777 2478
tonyp@1823 2479 void ConcurrentMark::print_reachable(const char* str,
tonyp@1823 2480 bool use_prev_marking,
tonyp@1823 2481 bool all) {
tonyp@1823 2482 gclog_or_tty->cr();
tonyp@1823 2483 gclog_or_tty->print_cr("== Doing heap dump... ");
tonyp@1479 2484
tonyp@1479 2485 if (G1PrintReachableBaseFile == NULL) {
tonyp@1479 2486 gclog_or_tty->print_cr(" #### error: no base file defined");
tonyp@1479 2487 return;
tonyp@1479 2488 }
tonyp@1479 2489
tonyp@1479 2490 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
tonyp@1479 2491 (JVM_MAXPATHLEN - 1)) {
tonyp@1479 2492 gclog_or_tty->print_cr(" #### error: file name too long");
tonyp@1479 2493 return;
tonyp@1479 2494 }
tonyp@1479 2495
tonyp@1479 2496 char file_name[JVM_MAXPATHLEN];
tonyp@1479 2497 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
tonyp@1479 2498 gclog_or_tty->print_cr(" dumping to file %s", file_name);
tonyp@1479 2499
tonyp@1479 2500 fileStream fout(file_name);
tonyp@1479 2501 if (!fout.is_open()) {
tonyp@1479 2502 gclog_or_tty->print_cr(" #### error: could not open file");
tonyp@1479 2503 return;
tonyp@1479 2504 }
tonyp@1479 2505
tonyp@1479 2506 outputStream* out = &fout;
tonyp@1479 2507
tonyp@1479 2508 CMBitMapRO* bitmap = NULL;
tonyp@1479 2509 if (use_prev_marking) {
tonyp@1479 2510 bitmap = _prevMarkBitMap;
tonyp@1479 2511 } else {
tonyp@1479 2512 bitmap = _nextMarkBitMap;
tonyp@1479 2513 }
tonyp@1479 2514
tonyp@1479 2515 out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
tonyp@1479 2516 out->cr();
tonyp@1479 2517
tonyp@1823 2518 out->print_cr("--- ITERATING OVER REGIONS");
tonyp@1479 2519 out->cr();
tonyp@1823 2520 PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
ysr@777 2521 _g1h->heap_region_iterate(&rcl);
tonyp@1479 2522 out->cr();
tonyp@1479 2523
tonyp@1479 2524 gclog_or_tty->print_cr(" done");
tonyp@1823 2525 gclog_or_tty->flush();
ysr@777 2526 }
ysr@777 2527
tonyp@1479 2528 #endif // PRODUCT
tonyp@1479 2529
ysr@777 2530 // This note is for drainAllSATBBuffers and the code in between.
ysr@777 2531 // In the future we could reuse a task to do this work during an
ysr@777 2532 // evacuation pause (since now tasks are not active and can be claimed
ysr@777 2533 // during an evacuation pause). This was a late change to the code and
ysr@777 2534 // is currently not being taken advantage of.
ysr@777 2535
ysr@777 2536 class CMGlobalObjectClosure : public ObjectClosure {
ysr@777 2537 private:
ysr@777 2538 ConcurrentMark* _cm;
ysr@777 2539
ysr@777 2540 public:
ysr@777 2541 void do_object(oop obj) {
ysr@777 2542 _cm->deal_with_reference(obj);
ysr@777 2543 }
ysr@777 2544
ysr@777 2545 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
ysr@777 2546 };
ysr@777 2547
ysr@777 2548 void ConcurrentMark::deal_with_reference(oop obj) {
ysr@777 2549 if (verbose_high())
ysr@777 2550 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
ysr@777 2551 (void*) obj);
ysr@777 2552
ysr@777 2553
ysr@777 2554 HeapWord* objAddr = (HeapWord*) obj;
ysr@1280 2555 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
ysr@777 2556 if (_g1h->is_in_g1_reserved(objAddr)) {
tonyp@1458 2557 assert(obj != NULL, "is_in_g1_reserved should ensure this");
ysr@777 2558 HeapRegion* hr = _g1h->heap_region_containing(obj);
ysr@777 2559 if (_g1h->is_obj_ill(obj, hr)) {
ysr@777 2560 if (verbose_high())
ysr@777 2561 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
ysr@777 2562 "marked", (void*) obj);
ysr@777 2563
ysr@777 2564 // we need to mark it first
ysr@777 2565 if (_nextMarkBitMap->parMark(objAddr)) {
ysr@777 2566 // No OrderAccess:store_load() is needed. It is implicit in the
ysr@777 2567 // CAS done in parMark(objAddr) above
ysr@777 2568 HeapWord* finger = _finger;
ysr@777 2569 if (objAddr < finger) {
ysr@777 2570 if (verbose_high())
ysr@777 2571 gclog_or_tty->print_cr("[global] below the global finger "
ysr@777 2572 "("PTR_FORMAT"), pushing it", finger);
ysr@777 2573 if (!mark_stack_push(obj)) {
ysr@777 2574 if (verbose_low())
ysr@777 2575 gclog_or_tty->print_cr("[global] global stack overflow during "
ysr@777 2576 "deal_with_reference");
ysr@777 2577 }
ysr@777 2578 }
ysr@777 2579 }
ysr@777 2580 }
ysr@777 2581 }
ysr@777 2582 }
ysr@777 2583
ysr@777 2584 void ConcurrentMark::drainAllSATBBuffers() {
ysr@777 2585 CMGlobalObjectClosure oc(this);
ysr@777 2586 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 2587 satb_mq_set.set_closure(&oc);
ysr@777 2588
ysr@777 2589 while (satb_mq_set.apply_closure_to_completed_buffer()) {
ysr@777 2590 if (verbose_medium())
ysr@777 2591 gclog_or_tty->print_cr("[global] processed an SATB buffer");
ysr@777 2592 }
ysr@777 2593
ysr@777 2594 // no need to check whether we should do this, as this is only
ysr@777 2595 // called during an evacuation pause
ysr@777 2596 satb_mq_set.iterate_closure_all_threads();
ysr@777 2597
ysr@777 2598 satb_mq_set.set_closure(NULL);
tonyp@1458 2599 assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 2600 }
ysr@777 2601
ysr@777 2602 void ConcurrentMark::markPrev(oop p) {
ysr@777 2603 // Note we are overriding the read-only view of the prev map here, via
ysr@777 2604 // the cast.
ysr@777 2605 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p);
ysr@777 2606 }
ysr@777 2607
ysr@777 2608 void ConcurrentMark::clear(oop p) {
ysr@777 2609 assert(p != NULL && p->is_oop(), "expected an oop");
ysr@777 2610 HeapWord* addr = (HeapWord*)p;
ysr@777 2611 assert(addr >= _nextMarkBitMap->startWord() ||
ysr@777 2612 addr < _nextMarkBitMap->endWord(), "in a region");
ysr@777 2613
ysr@777 2614 _nextMarkBitMap->clear(addr);
ysr@777 2615 }
ysr@777 2616
ysr@777 2617 void ConcurrentMark::clearRangeBothMaps(MemRegion mr) {
ysr@777 2618 // Note we are overriding the read-only view of the prev map here, via
ysr@777 2619 // the cast.
ysr@777 2620 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
ysr@777 2621 _nextMarkBitMap->clearRange(mr);
ysr@777 2622 }
ysr@777 2623
ysr@777 2624 HeapRegion*
ysr@777 2625 ConcurrentMark::claim_region(int task_num) {
ysr@777 2626 // "checkpoint" the finger
ysr@777 2627 HeapWord* finger = _finger;
ysr@777 2628
ysr@777 2629 // _heap_end will not change underneath our feet; it only changes at
ysr@777 2630 // yield points.
ysr@777 2631 while (finger < _heap_end) {
tonyp@1458 2632 assert(_g1h->is_in_g1_reserved(finger), "invariant");
ysr@777 2633
ysr@777 2634 // is the gap between reading the finger and doing the CAS too long?
ysr@777 2635
ysr@777 2636 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
ysr@777 2637 HeapWord* bottom = curr_region->bottom();
ysr@777 2638 HeapWord* end = curr_region->end();
ysr@777 2639 HeapWord* limit = curr_region->next_top_at_mark_start();
ysr@777 2640
ysr@777 2641 if (verbose_low())
ysr@777 2642 gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" "
ysr@777 2643 "["PTR_FORMAT", "PTR_FORMAT"), "
ysr@777 2644 "limit = "PTR_FORMAT,
ysr@777 2645 task_num, curr_region, bottom, end, limit);
ysr@777 2646
ysr@777 2647 HeapWord* res =
ysr@777 2648 (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
ysr@777 2649 if (res == finger) {
ysr@777 2650 // we succeeded
ysr@777 2651
ysr@777 2652 // notice that _finger == end cannot be guaranteed here since,
ysr@777 2653 // someone else might have moved the finger even further
tonyp@1458 2654 assert(_finger >= end, "the finger should have moved forward");
ysr@777 2655
ysr@777 2656 if (verbose_low())
ysr@777 2657 gclog_or_tty->print_cr("[%d] we were successful with region = "
ysr@777 2658 PTR_FORMAT, task_num, curr_region);
ysr@777 2659
ysr@777 2660 if (limit > bottom) {
ysr@777 2661 if (verbose_low())
ysr@777 2662 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, "
ysr@777 2663 "returning it ", task_num, curr_region);
ysr@777 2664 return curr_region;
ysr@777 2665 } else {
tonyp@1458 2666 assert(limit == bottom,
tonyp@1458 2667 "the region limit should be at bottom");
ysr@777 2668 if (verbose_low())
ysr@777 2669 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
ysr@777 2670 "returning NULL", task_num, curr_region);
ysr@777 2671 // we return NULL and the caller should try calling
ysr@777 2672 // claim_region() again.
ysr@777 2673 return NULL;
ysr@777 2674 }
ysr@777 2675 } else {
tonyp@1458 2676 assert(_finger > finger, "the finger should have moved forward");
ysr@777 2677 if (verbose_low())
ysr@777 2678 gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
ysr@777 2679 "global finger = "PTR_FORMAT", "
ysr@777 2680 "our finger = "PTR_FORMAT,
ysr@777 2681 task_num, _finger, finger);
ysr@777 2682
ysr@777 2683 // read it again
ysr@777 2684 finger = _finger;
ysr@777 2685 }
ysr@777 2686 }
ysr@777 2687
ysr@777 2688 return NULL;
ysr@777 2689 }
ysr@777 2690
johnc@2190 2691 bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
johnc@2190 2692 bool result = false;
johnc@2190 2693 for (int i = 0; i < (int)_max_task_num; ++i) {
johnc@2190 2694 CMTask* the_task = _tasks[i];
johnc@2190 2695 MemRegion mr = the_task->aborted_region();
johnc@2190 2696 if (mr.start() != NULL) {
johnc@2190 2697 assert(mr.end() != NULL, "invariant");
johnc@2190 2698 assert(mr.word_size() > 0, "invariant");
johnc@2190 2699 HeapRegion* hr = _g1h->heap_region_containing(mr.start());
johnc@2190 2700 assert(hr != NULL, "invariant");
johnc@2190 2701 if (hr->in_collection_set()) {
johnc@2190 2702 // The region points into the collection set
johnc@2190 2703 the_task->set_aborted_region(MemRegion());
johnc@2190 2704 result = true;
johnc@2190 2705 }
johnc@2190 2706 }
johnc@2190 2707 }
johnc@2190 2708 return result;
johnc@2190 2709 }
johnc@2190 2710
johnc@2190 2711 bool ConcurrentMark::has_aborted_regions() {
johnc@2190 2712 for (int i = 0; i < (int)_max_task_num; ++i) {
johnc@2190 2713 CMTask* the_task = _tasks[i];
johnc@2190 2714 MemRegion mr = the_task->aborted_region();
johnc@2190 2715 if (mr.start() != NULL) {
johnc@2190 2716 assert(mr.end() != NULL, "invariant");
johnc@2190 2717 assert(mr.word_size() > 0, "invariant");
johnc@2190 2718 return true;
johnc@2190 2719 }
johnc@2190 2720 }
johnc@2190 2721 return false;
johnc@2190 2722 }
johnc@2190 2723
ysr@777 2724 void ConcurrentMark::oops_do(OopClosure* cl) {
ysr@777 2725 if (_markStack.size() > 0 && verbose_low())
ysr@777 2726 gclog_or_tty->print_cr("[global] scanning the global marking stack, "
ysr@777 2727 "size = %d", _markStack.size());
ysr@777 2728 // we first iterate over the contents of the mark stack...
ysr@777 2729 _markStack.oops_do(cl);
ysr@777 2730
ysr@777 2731 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 2732 OopTaskQueue* queue = _task_queues->queue((int)i);
ysr@777 2733
ysr@777 2734 if (queue->size() > 0 && verbose_low())
ysr@777 2735 gclog_or_tty->print_cr("[global] scanning task queue of task %d, "
ysr@777 2736 "size = %d", i, queue->size());
ysr@777 2737
ysr@777 2738 // ...then over the contents of the all the task queues.
ysr@777 2739 queue->oops_do(cl);
ysr@777 2740 }
ysr@777 2741
johnc@2190 2742 // Invalidate any entries, that are in the region stack, that
ysr@777 2743 // point into the collection set
ysr@777 2744 if (_regionStack.invalidate_entries_into_cset()) {
ysr@777 2745 // otherwise, any gray objects copied during the evacuation pause
ysr@777 2746 // might not be visited.
tonyp@1458 2747 assert(_should_gray_objects, "invariant");
ysr@777 2748 }
johnc@2190 2749
johnc@2190 2750 // Invalidate any aborted regions, recorded in the individual CM
johnc@2190 2751 // tasks, that point into the collection set.
johnc@2190 2752 if (invalidate_aborted_regions_in_cset()) {
johnc@2190 2753 // otherwise, any gray objects copied during the evacuation pause
johnc@2190 2754 // might not be visited.
johnc@2190 2755 assert(_should_gray_objects, "invariant");
johnc@2190 2756 }
johnc@2190 2757
ysr@777 2758 }
ysr@777 2759
tonyp@2848 2760 void ConcurrentMark::clear_marking_state(bool clear_overflow) {
ysr@777 2761 _markStack.setEmpty();
ysr@777 2762 _markStack.clear_overflow();
ysr@777 2763 _regionStack.setEmpty();
ysr@777 2764 _regionStack.clear_overflow();
tonyp@2848 2765 if (clear_overflow) {
tonyp@2848 2766 clear_has_overflown();
tonyp@2848 2767 } else {
tonyp@2848 2768 assert(has_overflown(), "pre-condition");
tonyp@2848 2769 }
ysr@777 2770 _finger = _heap_start;
ysr@777 2771
ysr@777 2772 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 2773 OopTaskQueue* queue = _task_queues->queue(i);
ysr@777 2774 queue->set_empty();
johnc@2240 2775 // Clear any partial regions from the CMTasks
johnc@2240 2776 _tasks[i]->clear_aborted_region();
ysr@777 2777 }
ysr@777 2778 }
ysr@777 2779
ysr@777 2780 void ConcurrentMark::print_stats() {
ysr@777 2781 if (verbose_stats()) {
ysr@777 2782 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 2783 for (size_t i = 0; i < _active_tasks; ++i) {
ysr@777 2784 _tasks[i]->print_stats();
ysr@777 2785 gclog_or_tty->print_cr("---------------------------------------------------------------------");
ysr@777 2786 }
ysr@777 2787 }
ysr@777 2788 }
ysr@777 2789
ysr@777 2790 class CSMarkOopClosure: public OopClosure {
ysr@777 2791 friend class CSMarkBitMapClosure;
ysr@777 2792
ysr@777 2793 G1CollectedHeap* _g1h;
ysr@777 2794 CMBitMap* _bm;
ysr@777 2795 ConcurrentMark* _cm;
ysr@777 2796 oop* _ms;
ysr@777 2797 jint* _array_ind_stack;
ysr@777 2798 int _ms_size;
ysr@777 2799 int _ms_ind;
ysr@777 2800 int _array_increment;
ysr@777 2801
ysr@777 2802 bool push(oop obj, int arr_ind = 0) {
ysr@777 2803 if (_ms_ind == _ms_size) {
ysr@777 2804 gclog_or_tty->print_cr("Mark stack is full.");
ysr@777 2805 return false;
ysr@777 2806 }
ysr@777 2807 _ms[_ms_ind] = obj;
ysr@777 2808 if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind;
ysr@777 2809 _ms_ind++;
ysr@777 2810 return true;
ysr@777 2811 }
ysr@777 2812
ysr@777 2813 oop pop() {
ysr@777 2814 if (_ms_ind == 0) return NULL;
ysr@777 2815 else {
ysr@777 2816 _ms_ind--;
ysr@777 2817 return _ms[_ms_ind];
ysr@777 2818 }
ysr@777 2819 }
ysr@777 2820
ysr@1280 2821 template <class T> bool drain() {
ysr@777 2822 while (_ms_ind > 0) {
ysr@777 2823 oop obj = pop();
ysr@777 2824 assert(obj != NULL, "Since index was non-zero.");
ysr@777 2825 if (obj->is_objArray()) {
ysr@777 2826 jint arr_ind = _array_ind_stack[_ms_ind];
ysr@777 2827 objArrayOop aobj = objArrayOop(obj);
ysr@777 2828 jint len = aobj->length();
ysr@777 2829 jint next_arr_ind = arr_ind + _array_increment;
ysr@777 2830 if (next_arr_ind < len) {
ysr@777 2831 push(obj, next_arr_ind);
ysr@777 2832 }
ysr@777 2833 // Now process this portion of this one.
ysr@777 2834 int lim = MIN2(next_arr_ind, len);
ysr@777 2835 for (int j = arr_ind; j < lim; j++) {
apetrusenko@1347 2836 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j));
ysr@777 2837 }
ysr@777 2838
ysr@777 2839 } else {
ysr@777 2840 obj->oop_iterate(this);
ysr@777 2841 }
ysr@777 2842 if (abort()) return false;
ysr@777 2843 }
ysr@777 2844 return true;
ysr@777 2845 }
ysr@777 2846
ysr@777 2847 public:
ysr@777 2848 CSMarkOopClosure(ConcurrentMark* cm, int ms_size) :
ysr@777 2849 _g1h(G1CollectedHeap::heap()),
ysr@777 2850 _cm(cm),
ysr@777 2851 _bm(cm->nextMarkBitMap()),
ysr@777 2852 _ms_size(ms_size), _ms_ind(0),
ysr@777 2853 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
ysr@777 2854 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
ysr@777 2855 _array_increment(MAX2(ms_size/8, 16))
ysr@777 2856 {}
ysr@777 2857
ysr@777 2858 ~CSMarkOopClosure() {
ysr@777 2859 FREE_C_HEAP_ARRAY(oop, _ms);
ysr@777 2860 FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
ysr@777 2861 }
ysr@777 2862
ysr@1280 2863 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 2864 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 2865
ysr@1280 2866 template <class T> void do_oop_work(T* p) {
ysr@1280 2867 T heap_oop = oopDesc::load_heap_oop(p);
ysr@1280 2868 if (oopDesc::is_null(heap_oop)) return;
ysr@1280 2869 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
ysr@777 2870 if (obj->is_forwarded()) {
ysr@777 2871 // If the object has already been forwarded, we have to make sure
ysr@777 2872 // that it's marked. So follow the forwarding pointer. Note that
ysr@777 2873 // this does the right thing for self-forwarding pointers in the
ysr@777 2874 // evacuation failure case.
ysr@777 2875 obj = obj->forwardee();
ysr@777 2876 }
ysr@777 2877 HeapRegion* hr = _g1h->heap_region_containing(obj);
ysr@777 2878 if (hr != NULL) {
ysr@777 2879 if (hr->in_collection_set()) {
ysr@777 2880 if (_g1h->is_obj_ill(obj)) {
ysr@777 2881 _bm->mark((HeapWord*)obj);
ysr@777 2882 if (!push(obj)) {
ysr@777 2883 gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed.");
ysr@777 2884 set_abort();
ysr@777 2885 }
ysr@777 2886 }
ysr@777 2887 } else {
ysr@777 2888 // Outside the collection set; we need to gray it
ysr@777 2889 _cm->deal_with_reference(obj);
ysr@777 2890 }
ysr@777 2891 }
ysr@777 2892 }
ysr@777 2893 };
ysr@777 2894
ysr@777 2895 class CSMarkBitMapClosure: public BitMapClosure {
ysr@777 2896 G1CollectedHeap* _g1h;
ysr@777 2897 CMBitMap* _bitMap;
ysr@777 2898 ConcurrentMark* _cm;
ysr@777 2899 CSMarkOopClosure _oop_cl;
ysr@777 2900 public:
ysr@777 2901 CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) :
ysr@777 2902 _g1h(G1CollectedHeap::heap()),
ysr@777 2903 _bitMap(cm->nextMarkBitMap()),
ysr@777 2904 _oop_cl(cm, ms_size)
ysr@777 2905 {}
ysr@777 2906
ysr@777 2907 ~CSMarkBitMapClosure() {}
ysr@777 2908
ysr@777 2909 bool do_bit(size_t offset) {
ysr@777 2910 // convert offset into a HeapWord*
ysr@777 2911 HeapWord* addr = _bitMap->offsetToHeapWord(offset);
ysr@777 2912 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
ysr@777 2913 "address out of range");
ysr@777 2914 assert(_bitMap->isMarked(addr), "tautology");
ysr@777 2915 oop obj = oop(addr);
ysr@777 2916 if (!obj->is_forwarded()) {
ysr@777 2917 if (!_oop_cl.push(obj)) return false;
ysr@1280 2918 if (UseCompressedOops) {
ysr@1280 2919 if (!_oop_cl.drain<narrowOop>()) return false;
ysr@1280 2920 } else {
ysr@1280 2921 if (!_oop_cl.drain<oop>()) return false;
ysr@1280 2922 }
ysr@777 2923 }
ysr@777 2924 // Otherwise...
ysr@777 2925 return true;
ysr@777 2926 }
ysr@777 2927 };
ysr@777 2928
ysr@777 2929
ysr@777 2930 class CompleteMarkingInCSHRClosure: public HeapRegionClosure {
ysr@777 2931 CMBitMap* _bm;
ysr@777 2932 CSMarkBitMapClosure _bit_cl;
ysr@777 2933 enum SomePrivateConstants {
ysr@777 2934 MSSize = 1000
ysr@777 2935 };
ysr@777 2936 bool _completed;
ysr@777 2937 public:
ysr@777 2938 CompleteMarkingInCSHRClosure(ConcurrentMark* cm) :
ysr@777 2939 _bm(cm->nextMarkBitMap()),
ysr@777 2940 _bit_cl(cm, MSSize),
ysr@777 2941 _completed(true)
ysr@777 2942 {}
ysr@777 2943
ysr@777 2944 ~CompleteMarkingInCSHRClosure() {}
ysr@777 2945
ysr@777 2946 bool doHeapRegion(HeapRegion* r) {
ysr@777 2947 if (!r->evacuation_failed()) {
ysr@777 2948 MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start());
ysr@777 2949 if (!mr.is_empty()) {
ysr@777 2950 if (!_bm->iterate(&_bit_cl, mr)) {
ysr@777 2951 _completed = false;
ysr@777 2952 return true;
ysr@777 2953 }
ysr@777 2954 }
ysr@777 2955 }
ysr@777 2956 return false;
ysr@777 2957 }
ysr@777 2958
ysr@777 2959 bool completed() { return _completed; }
ysr@777 2960 };
ysr@777 2961
ysr@777 2962 class ClearMarksInHRClosure: public HeapRegionClosure {
ysr@777 2963 CMBitMap* _bm;
ysr@777 2964 public:
ysr@777 2965 ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { }
ysr@777 2966
ysr@777 2967 bool doHeapRegion(HeapRegion* r) {
ysr@777 2968 if (!r->used_region().is_empty() && !r->evacuation_failed()) {
ysr@777 2969 MemRegion usedMR = r->used_region();
ysr@777 2970 _bm->clearRange(r->used_region());
ysr@777 2971 }
ysr@777 2972 return false;
ysr@777 2973 }
ysr@777 2974 };
ysr@777 2975
ysr@777 2976 void ConcurrentMark::complete_marking_in_collection_set() {
ysr@777 2977 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 2978
ysr@777 2979 if (!g1h->mark_in_progress()) {
ysr@777 2980 g1h->g1_policy()->record_mark_closure_time(0.0);
ysr@777 2981 return;
ysr@777 2982 }
ysr@777 2983
ysr@777 2984 int i = 1;
ysr@777 2985 double start = os::elapsedTime();
ysr@777 2986 while (true) {
ysr@777 2987 i++;
ysr@777 2988 CompleteMarkingInCSHRClosure cmplt(this);
ysr@777 2989 g1h->collection_set_iterate(&cmplt);
ysr@777 2990 if (cmplt.completed()) break;
ysr@777 2991 }
ysr@777 2992 double end_time = os::elapsedTime();
ysr@777 2993 double elapsed_time_ms = (end_time - start) * 1000.0;
ysr@777 2994 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
ysr@777 2995
ysr@777 2996 ClearMarksInHRClosure clr(nextMarkBitMap());
ysr@777 2997 g1h->collection_set_iterate(&clr);
ysr@777 2998 }
ysr@777 2999
ysr@777 3000 // The next two methods deal with the following optimisation. Some
ysr@777 3001 // objects are gray by being marked and located above the finger. If
ysr@777 3002 // they are copied, during an evacuation pause, below the finger then
ysr@777 3003 // the need to be pushed on the stack. The observation is that, if
ysr@777 3004 // there are no regions in the collection set located above the
ysr@777 3005 // finger, then the above cannot happen, hence we do not need to
ysr@777 3006 // explicitly gray any objects when copying them to below the
ysr@777 3007 // finger. The global stack will be scanned to ensure that, if it
ysr@777 3008 // points to objects being copied, it will update their
ysr@777 3009 // location. There is a tricky situation with the gray objects in
ysr@777 3010 // region stack that are being coped, however. See the comment in
ysr@777 3011 // newCSet().
ysr@777 3012
ysr@777 3013 void ConcurrentMark::newCSet() {
ysr@777 3014 if (!concurrent_marking_in_progress())
ysr@777 3015 // nothing to do if marking is not in progress
ysr@777 3016 return;
ysr@777 3017
ysr@777 3018 // find what the lowest finger is among the global and local fingers
ysr@777 3019 _min_finger = _finger;
ysr@777 3020 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 3021 CMTask* task = _tasks[i];
ysr@777 3022 HeapWord* task_finger = task->finger();
ysr@777 3023 if (task_finger != NULL && task_finger < _min_finger)
ysr@777 3024 _min_finger = task_finger;
ysr@777 3025 }
ysr@777 3026
ysr@777 3027 _should_gray_objects = false;
ysr@777 3028
ysr@777 3029 // This fixes a very subtle and fustrating bug. It might be the case
ysr@777 3030 // that, during en evacuation pause, heap regions that contain
ysr@777 3031 // objects that are gray (by being in regions contained in the
ysr@777 3032 // region stack) are included in the collection set. Since such gray
ysr@777 3033 // objects will be moved, and because it's not easy to redirect
ysr@777 3034 // region stack entries to point to a new location (because objects
ysr@777 3035 // in one region might be scattered to multiple regions after they
ysr@777 3036 // are copied), one option is to ensure that all marked objects
ysr@777 3037 // copied during a pause are pushed on the stack. Notice, however,
ysr@777 3038 // that this problem can only happen when the region stack is not
ysr@777 3039 // empty during an evacuation pause. So, we make the fix a bit less
ysr@777 3040 // conservative and ensure that regions are pushed on the stack,
ysr@777 3041 // irrespective whether all collection set regions are below the
ysr@777 3042 // finger, if the region stack is not empty. This is expected to be
ysr@777 3043 // a rare case, so I don't think it's necessary to be smarted about it.
johnc@2190 3044 if (!region_stack_empty() || has_aborted_regions())
ysr@777 3045 _should_gray_objects = true;
ysr@777 3046 }
ysr@777 3047
ysr@777 3048 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
ysr@777 3049 if (!concurrent_marking_in_progress())
ysr@777 3050 return;
ysr@777 3051
ysr@777 3052 HeapWord* region_end = hr->end();
ysr@777 3053 if (region_end > _min_finger)
ysr@777 3054 _should_gray_objects = true;
ysr@777 3055 }
ysr@777 3056
ysr@777 3057 // abandon current marking iteration due to a Full GC
ysr@777 3058 void ConcurrentMark::abort() {
ysr@777 3059 // Clear all marks to force marking thread to do nothing
ysr@777 3060 _nextMarkBitMap->clearAll();
ysr@777 3061 // Empty mark stack
ysr@777 3062 clear_marking_state();
johnc@2190 3063 for (int i = 0; i < (int)_max_task_num; ++i) {
ysr@777 3064 _tasks[i]->clear_region_fields();
johnc@2190 3065 }
ysr@777 3066 _has_aborted = true;
ysr@777 3067
ysr@777 3068 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3069 satb_mq_set.abandon_partial_marking();
tonyp@1752 3070 // This can be called either during or outside marking, we'll read
tonyp@1752 3071 // the expected_active value from the SATB queue set.
tonyp@1752 3072 satb_mq_set.set_active_all_threads(
tonyp@1752 3073 false, /* new active value */
tonyp@1752 3074 satb_mq_set.is_active() /* expected_active */);
ysr@777 3075 }
ysr@777 3076
ysr@777 3077 static void print_ms_time_info(const char* prefix, const char* name,
ysr@777 3078 NumberSeq& ns) {
ysr@777 3079 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3080 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
ysr@777 3081 if (ns.num() > 0) {
ysr@777 3082 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 3083 prefix, ns.sd(), ns.maximum());
ysr@777 3084 }
ysr@777 3085 }
ysr@777 3086
ysr@777 3087 void ConcurrentMark::print_summary_info() {
ysr@777 3088 gclog_or_tty->print_cr(" Concurrent marking:");
ysr@777 3089 print_ms_time_info(" ", "init marks", _init_times);
ysr@777 3090 print_ms_time_info(" ", "remarks", _remark_times);
ysr@777 3091 {
ysr@777 3092 print_ms_time_info(" ", "final marks", _remark_mark_times);
ysr@777 3093 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
ysr@777 3094
ysr@777 3095 }
ysr@777 3096 print_ms_time_info(" ", "cleanups", _cleanup_times);
ysr@777 3097 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3098 _total_counting_time,
ysr@777 3099 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
ysr@777 3100 (double)_cleanup_times.num()
ysr@777 3101 : 0.0));
ysr@777 3102 if (G1ScrubRemSets) {
ysr@777 3103 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
ysr@777 3104 _total_rs_scrub_time,
ysr@777 3105 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
ysr@777 3106 (double)_cleanup_times.num()
ysr@777 3107 : 0.0));
ysr@777 3108 }
ysr@777 3109 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.",
ysr@777 3110 (_init_times.sum() + _remark_times.sum() +
ysr@777 3111 _cleanup_times.sum())/1000.0);
ysr@777 3112 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s "
ysr@777 3113 "(%8.2f s marking, %8.2f s counting).",
ysr@777 3114 cmThread()->vtime_accum(),
ysr@777 3115 cmThread()->vtime_mark_accum(),
ysr@777 3116 cmThread()->vtime_count_accum());
ysr@777 3117 }
ysr@777 3118
tonyp@1454 3119 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
tonyp@1454 3120 _parallel_workers->print_worker_threads_on(st);
tonyp@1454 3121 }
tonyp@1454 3122
ysr@777 3123 // Closures
ysr@777 3124 // XXX: there seems to be a lot of code duplication here;
ysr@777 3125 // should refactor and consolidate the shared code.
ysr@777 3126
ysr@777 3127 // This closure is used to mark refs into the CMS generation in
ysr@777 3128 // the CMS bit map. Called at the first checkpoint.
ysr@777 3129
ysr@777 3130 // We take a break if someone is trying to stop the world.
ysr@777 3131 bool ConcurrentMark::do_yield_check(int worker_i) {
ysr@777 3132 if (should_yield()) {
ysr@777 3133 if (worker_i == 0)
ysr@777 3134 _g1h->g1_policy()->record_concurrent_pause();
ysr@777 3135 cmThread()->yield();
ysr@777 3136 if (worker_i == 0)
ysr@777 3137 _g1h->g1_policy()->record_concurrent_pause_end();
ysr@777 3138 return true;
ysr@777 3139 } else {
ysr@777 3140 return false;
ysr@777 3141 }
ysr@777 3142 }
ysr@777 3143
ysr@777 3144 bool ConcurrentMark::should_yield() {
ysr@777 3145 return cmThread()->should_yield();
ysr@777 3146 }
ysr@777 3147
ysr@777 3148 bool ConcurrentMark::containing_card_is_marked(void* p) {
ysr@777 3149 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
ysr@777 3150 return _card_bm.at(offset >> CardTableModRefBS::card_shift);
ysr@777 3151 }
ysr@777 3152
ysr@777 3153 bool ConcurrentMark::containing_cards_are_marked(void* start,
ysr@777 3154 void* last) {
ysr@777 3155 return
ysr@777 3156 containing_card_is_marked(start) &&
ysr@777 3157 containing_card_is_marked(last);
ysr@777 3158 }
ysr@777 3159
ysr@777 3160 #ifndef PRODUCT
ysr@777 3161 // for debugging purposes
ysr@777 3162 void ConcurrentMark::print_finger() {
ysr@777 3163 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
ysr@777 3164 _heap_start, _heap_end, _finger);
ysr@777 3165 for (int i = 0; i < (int) _max_task_num; ++i) {
ysr@777 3166 gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger());
ysr@777 3167 }
ysr@777 3168 gclog_or_tty->print_cr("");
ysr@777 3169 }
ysr@777 3170 #endif
ysr@777 3171
ysr@777 3172 // Closure for iteration over bitmaps
ysr@777 3173 class CMBitMapClosure : public BitMapClosure {
ysr@777 3174 private:
ysr@777 3175 // the bitmap that is being iterated over
ysr@777 3176 CMBitMap* _nextMarkBitMap;
ysr@777 3177 ConcurrentMark* _cm;
ysr@777 3178 CMTask* _task;
ysr@777 3179 // true if we're scanning a heap region claimed by the task (so that
ysr@777 3180 // we move the finger along), false if we're not, i.e. currently when
ysr@777 3181 // scanning a heap region popped from the region stack (so that we
ysr@777 3182 // do not move the task finger along; it'd be a mistake if we did so).
ysr@777 3183 bool _scanning_heap_region;
ysr@777 3184
ysr@777 3185 public:
ysr@777 3186 CMBitMapClosure(CMTask *task,
ysr@777 3187 ConcurrentMark* cm,
ysr@777 3188 CMBitMap* nextMarkBitMap)
ysr@777 3189 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
ysr@777 3190
ysr@777 3191 void set_scanning_heap_region(bool scanning_heap_region) {
ysr@777 3192 _scanning_heap_region = scanning_heap_region;
ysr@777 3193 }
ysr@777 3194
ysr@777 3195 bool do_bit(size_t offset) {
ysr@777 3196 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
tonyp@1458 3197 assert(_nextMarkBitMap->isMarked(addr), "invariant");
tonyp@1458 3198 assert( addr < _cm->finger(), "invariant");
ysr@777 3199
ysr@777 3200 if (_scanning_heap_region) {
ysr@777 3201 statsOnly( _task->increase_objs_found_on_bitmap() );
tonyp@1458 3202 assert(addr >= _task->finger(), "invariant");
ysr@777 3203 // We move that task's local finger along.
ysr@777 3204 _task->move_finger_to(addr);
ysr@777 3205 } else {
ysr@777 3206 // We move the task's region finger along.
ysr@777 3207 _task->move_region_finger_to(addr);
ysr@777 3208 }
ysr@777 3209
ysr@777 3210 _task->scan_object(oop(addr));
ysr@777 3211 // we only partially drain the local queue and global stack
ysr@777 3212 _task->drain_local_queue(true);
ysr@777 3213 _task->drain_global_stack(true);
ysr@777 3214
ysr@777 3215 // if the has_aborted flag has been raised, we need to bail out of
ysr@777 3216 // the iteration
ysr@777 3217 return !_task->has_aborted();
ysr@777 3218 }
ysr@777 3219 };
ysr@777 3220
ysr@777 3221 // Closure for iterating over objects, currently only used for
ysr@777 3222 // processing SATB buffers.
ysr@777 3223 class CMObjectClosure : public ObjectClosure {
ysr@777 3224 private:
ysr@777 3225 CMTask* _task;
ysr@777 3226
ysr@777 3227 public:
ysr@777 3228 void do_object(oop obj) {
ysr@777 3229 _task->deal_with_reference(obj);
ysr@777 3230 }
ysr@777 3231
ysr@777 3232 CMObjectClosure(CMTask* task) : _task(task) { }
ysr@777 3233 };
ysr@777 3234
ysr@777 3235 // Closure for iterating over object fields
ysr@777 3236 class CMOopClosure : public OopClosure {
ysr@777 3237 private:
ysr@777 3238 G1CollectedHeap* _g1h;
ysr@777 3239 ConcurrentMark* _cm;
ysr@777 3240 CMTask* _task;
ysr@777 3241
ysr@777 3242 public:
ysr@1280 3243 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 3244 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 3245
ysr@1280 3246 template <class T> void do_oop_work(T* p) {
tonyp@2472 3247 assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
tonyp@2643 3248 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3249 _g1h->heap_region_containing((HeapWord*) p)), "invariant");
ysr@1280 3250
ysr@1280 3251 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 3252 if (_cm->verbose_high())
ysr@777 3253 gclog_or_tty->print_cr("[%d] we're looking at location "
ysr@777 3254 "*"PTR_FORMAT" = "PTR_FORMAT,
ysr@777 3255 _task->task_id(), p, (void*) obj);
ysr@777 3256 _task->deal_with_reference(obj);
ysr@777 3257 }
ysr@777 3258
ysr@777 3259 CMOopClosure(G1CollectedHeap* g1h,
ysr@777 3260 ConcurrentMark* cm,
ysr@777 3261 CMTask* task)
johnc@2316 3262 : _g1h(g1h), _cm(cm), _task(task)
johnc@2316 3263 {
johnc@2718 3264 assert(_ref_processor == NULL, "should be initialized to NULL");
johnc@2718 3265
johnc@2718 3266 if (G1UseConcMarkReferenceProcessing) {
johnc@2718 3267 _ref_processor = g1h->ref_processor();
johnc@2718 3268 assert(_ref_processor != NULL, "should not be NULL");
johnc@2718 3269 }
johnc@2316 3270 }
ysr@777 3271 };
ysr@777 3272
ysr@777 3273 void CMTask::setup_for_region(HeapRegion* hr) {
tonyp@1458 3274 // Separated the asserts so that we know which one fires.
tonyp@1458 3275 assert(hr != NULL,
tonyp@1458 3276 "claim_region() should have filtered out continues humongous regions");
tonyp@1458 3277 assert(!hr->continuesHumongous(),
tonyp@1458 3278 "claim_region() should have filtered out continues humongous regions");
ysr@777 3279
ysr@777 3280 if (_cm->verbose_low())
ysr@777 3281 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
ysr@777 3282 _task_id, hr);
ysr@777 3283
ysr@777 3284 _curr_region = hr;
ysr@777 3285 _finger = hr->bottom();
ysr@777 3286 update_region_limit();
ysr@777 3287 }
ysr@777 3288
ysr@777 3289 void CMTask::update_region_limit() {
ysr@777 3290 HeapRegion* hr = _curr_region;
ysr@777 3291 HeapWord* bottom = hr->bottom();
ysr@777 3292 HeapWord* limit = hr->next_top_at_mark_start();
ysr@777 3293
ysr@777 3294 if (limit == bottom) {
ysr@777 3295 if (_cm->verbose_low())
ysr@777 3296 gclog_or_tty->print_cr("[%d] found an empty region "
ysr@777 3297 "["PTR_FORMAT", "PTR_FORMAT")",
ysr@777 3298 _task_id, bottom, limit);
ysr@777 3299 // The region was collected underneath our feet.
ysr@777 3300 // We set the finger to bottom to ensure that the bitmap
ysr@777 3301 // iteration that will follow this will not do anything.
ysr@777 3302 // (this is not a condition that holds when we set the region up,
ysr@777 3303 // as the region is not supposed to be empty in the first place)
ysr@777 3304 _finger = bottom;
ysr@777 3305 } else if (limit >= _region_limit) {
tonyp@1458 3306 assert(limit >= _finger, "peace of mind");
ysr@777 3307 } else {
tonyp@1458 3308 assert(limit < _region_limit, "only way to get here");
ysr@777 3309 // This can happen under some pretty unusual circumstances. An
ysr@777 3310 // evacuation pause empties the region underneath our feet (NTAMS
ysr@777 3311 // at bottom). We then do some allocation in the region (NTAMS
ysr@777 3312 // stays at bottom), followed by the region being used as a GC
ysr@777 3313 // alloc region (NTAMS will move to top() and the objects
ysr@777 3314 // originally below it will be grayed). All objects now marked in
ysr@777 3315 // the region are explicitly grayed, if below the global finger,
ysr@777 3316 // and we do not need in fact to scan anything else. So, we simply
ysr@777 3317 // set _finger to be limit to ensure that the bitmap iteration
ysr@777 3318 // doesn't do anything.
ysr@777 3319 _finger = limit;
ysr@777 3320 }
ysr@777 3321
ysr@777 3322 _region_limit = limit;
ysr@777 3323 }
ysr@777 3324
ysr@777 3325 void CMTask::giveup_current_region() {
tonyp@1458 3326 assert(_curr_region != NULL, "invariant");
ysr@777 3327 if (_cm->verbose_low())
ysr@777 3328 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
ysr@777 3329 _task_id, _curr_region);
ysr@777 3330 clear_region_fields();
ysr@777 3331 }
ysr@777 3332
ysr@777 3333 void CMTask::clear_region_fields() {
ysr@777 3334 // Values for these three fields that indicate that we're not
ysr@777 3335 // holding on to a region.
ysr@777 3336 _curr_region = NULL;
ysr@777 3337 _finger = NULL;
ysr@777 3338 _region_limit = NULL;
ysr@777 3339
ysr@777 3340 _region_finger = NULL;
ysr@777 3341 }
ysr@777 3342
ysr@777 3343 void CMTask::reset(CMBitMap* nextMarkBitMap) {
tonyp@1458 3344 guarantee(nextMarkBitMap != NULL, "invariant");
ysr@777 3345
ysr@777 3346 if (_cm->verbose_low())
ysr@777 3347 gclog_or_tty->print_cr("[%d] resetting", _task_id);
ysr@777 3348
ysr@777 3349 _nextMarkBitMap = nextMarkBitMap;
ysr@777 3350 clear_region_fields();
johnc@2240 3351 assert(_aborted_region.is_empty(), "should have been cleared");
ysr@777 3352
ysr@777 3353 _calls = 0;
ysr@777 3354 _elapsed_time_ms = 0.0;
ysr@777 3355 _termination_time_ms = 0.0;
ysr@777 3356 _termination_start_time_ms = 0.0;
ysr@777 3357
ysr@777 3358 #if _MARKING_STATS_
ysr@777 3359 _local_pushes = 0;
ysr@777 3360 _local_pops = 0;
ysr@777 3361 _local_max_size = 0;
ysr@777 3362 _objs_scanned = 0;
ysr@777 3363 _global_pushes = 0;
ysr@777 3364 _global_pops = 0;
ysr@777 3365 _global_max_size = 0;
ysr@777 3366 _global_transfers_to = 0;
ysr@777 3367 _global_transfers_from = 0;
ysr@777 3368 _region_stack_pops = 0;
ysr@777 3369 _regions_claimed = 0;
ysr@777 3370 _objs_found_on_bitmap = 0;
ysr@777 3371 _satb_buffers_processed = 0;
ysr@777 3372 _steal_attempts = 0;
ysr@777 3373 _steals = 0;
ysr@777 3374 _aborted = 0;
ysr@777 3375 _aborted_overflow = 0;
ysr@777 3376 _aborted_cm_aborted = 0;
ysr@777 3377 _aborted_yield = 0;
ysr@777 3378 _aborted_timed_out = 0;
ysr@777 3379 _aborted_satb = 0;
ysr@777 3380 _aborted_termination = 0;
ysr@777 3381 #endif // _MARKING_STATS_
ysr@777 3382 }
ysr@777 3383
ysr@777 3384 bool CMTask::should_exit_termination() {
ysr@777 3385 regular_clock_call();
ysr@777 3386 // This is called when we are in the termination protocol. We should
ysr@777 3387 // quit if, for some reason, this task wants to abort or the global
ysr@777 3388 // stack is not empty (this means that we can get work from it).
ysr@777 3389 return !_cm->mark_stack_empty() || has_aborted();
ysr@777 3390 }
ysr@777 3391
ysr@777 3392 // This determines whether the method below will check both the local
ysr@777 3393 // and global fingers when determining whether to push on the stack a
ysr@777 3394 // gray object (value 1) or whether it will only check the global one
ysr@777 3395 // (value 0). The tradeoffs are that the former will be a bit more
ysr@777 3396 // accurate and possibly push less on the stack, but it might also be
ysr@777 3397 // a little bit slower.
ysr@777 3398
ysr@777 3399 #define _CHECK_BOTH_FINGERS_ 1
ysr@777 3400
ysr@777 3401 void CMTask::deal_with_reference(oop obj) {
ysr@777 3402 if (_cm->verbose_high())
ysr@777 3403 gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
ysr@777 3404 _task_id, (void*) obj);
ysr@777 3405
ysr@777 3406 ++_refs_reached;
ysr@777 3407
ysr@777 3408 HeapWord* objAddr = (HeapWord*) obj;
ysr@1280 3409 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
ysr@777 3410 if (_g1h->is_in_g1_reserved(objAddr)) {
tonyp@1458 3411 assert(obj != NULL, "is_in_g1_reserved should ensure this");
ysr@777 3412 HeapRegion* hr = _g1h->heap_region_containing(obj);
ysr@777 3413 if (_g1h->is_obj_ill(obj, hr)) {
ysr@777 3414 if (_cm->verbose_high())
ysr@777 3415 gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
ysr@777 3416 _task_id, (void*) obj);
ysr@777 3417
ysr@777 3418 // we need to mark it first
ysr@777 3419 if (_nextMarkBitMap->parMark(objAddr)) {
ysr@777 3420 // No OrderAccess:store_load() is needed. It is implicit in the
ysr@777 3421 // CAS done in parMark(objAddr) above
ysr@777 3422 HeapWord* global_finger = _cm->finger();
ysr@777 3423
ysr@777 3424 #if _CHECK_BOTH_FINGERS_
ysr@777 3425 // we will check both the local and global fingers
ysr@777 3426
ysr@777 3427 if (_finger != NULL && objAddr < _finger) {
ysr@777 3428 if (_cm->verbose_high())
ysr@777 3429 gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
ysr@777 3430 "pushing it", _task_id, _finger);
ysr@777 3431 push(obj);
ysr@777 3432 } else if (_curr_region != NULL && objAddr < _region_limit) {
ysr@777 3433 // do nothing
ysr@777 3434 } else if (objAddr < global_finger) {
ysr@777 3435 // Notice that the global finger might be moving forward
ysr@777 3436 // concurrently. This is not a problem. In the worst case, we
ysr@777 3437 // mark the object while it is above the global finger and, by
ysr@777 3438 // the time we read the global finger, it has moved forward
ysr@777 3439 // passed this object. In this case, the object will probably
ysr@777 3440 // be visited when a task is scanning the region and will also
ysr@777 3441 // be pushed on the stack. So, some duplicate work, but no
ysr@777 3442 // correctness problems.
ysr@777 3443
ysr@777 3444 if (_cm->verbose_high())
ysr@777 3445 gclog_or_tty->print_cr("[%d] below the global finger "
ysr@777 3446 "("PTR_FORMAT"), pushing it",
ysr@777 3447 _task_id, global_finger);
ysr@777 3448 push(obj);
ysr@777 3449 } else {
ysr@777 3450 // do nothing
ysr@777 3451 }
ysr@777 3452 #else // _CHECK_BOTH_FINGERS_
johnc@2494 3453 // we will only check the global finger
ysr@777 3454
ysr@777 3455 if (objAddr < global_finger) {
ysr@777 3456 // see long comment above
ysr@777 3457
ysr@777 3458 if (_cm->verbose_high())
ysr@777 3459 gclog_or_tty->print_cr("[%d] below the global finger "
ysr@777 3460 "("PTR_FORMAT"), pushing it",
ysr@777 3461 _task_id, global_finger);
ysr@777 3462 push(obj);
ysr@777 3463 }
ysr@777 3464 #endif // _CHECK_BOTH_FINGERS_
ysr@777 3465 }
ysr@777 3466 }
ysr@777 3467 }
ysr@777 3468 }
ysr@777 3469
ysr@777 3470 void CMTask::push(oop obj) {
ysr@777 3471 HeapWord* objAddr = (HeapWord*) obj;
tonyp@1458 3472 assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
tonyp@2643 3473 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3474 _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
tonyp@1458 3475 assert(!_g1h->is_obj_ill(obj), "invariant");
tonyp@1458 3476 assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
ysr@777 3477
ysr@777 3478 if (_cm->verbose_high())
ysr@777 3479 gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
ysr@777 3480
ysr@777 3481 if (!_task_queue->push(obj)) {
ysr@777 3482 // The local task queue looks full. We need to push some entries
ysr@777 3483 // to the global stack.
ysr@777 3484
ysr@777 3485 if (_cm->verbose_medium())
ysr@777 3486 gclog_or_tty->print_cr("[%d] task queue overflow, "
ysr@777 3487 "moving entries to the global stack",
ysr@777 3488 _task_id);
ysr@777 3489 move_entries_to_global_stack();
ysr@777 3490
ysr@777 3491 // this should succeed since, even if we overflow the global
ysr@777 3492 // stack, we should have definitely removed some entries from the
ysr@777 3493 // local queue. So, there must be space on it.
ysr@777 3494 bool success = _task_queue->push(obj);
tonyp@1458 3495 assert(success, "invariant");
ysr@777 3496 }
ysr@777 3497
ysr@777 3498 statsOnly( int tmp_size = _task_queue->size();
ysr@777 3499 if (tmp_size > _local_max_size)
ysr@777 3500 _local_max_size = tmp_size;
ysr@777 3501 ++_local_pushes );
ysr@777 3502 }
ysr@777 3503
ysr@777 3504 void CMTask::reached_limit() {
tonyp@1458 3505 assert(_words_scanned >= _words_scanned_limit ||
tonyp@1458 3506 _refs_reached >= _refs_reached_limit ,
tonyp@1458 3507 "shouldn't have been called otherwise");
ysr@777 3508 regular_clock_call();
ysr@777 3509 }
ysr@777 3510
ysr@777 3511 void CMTask::regular_clock_call() {
ysr@777 3512 if (has_aborted())
ysr@777 3513 return;
ysr@777 3514
ysr@777 3515 // First, we need to recalculate the words scanned and refs reached
ysr@777 3516 // limits for the next clock call.
ysr@777 3517 recalculate_limits();
ysr@777 3518
ysr@777 3519 // During the regular clock call we do the following
ysr@777 3520
ysr@777 3521 // (1) If an overflow has been flagged, then we abort.
ysr@777 3522 if (_cm->has_overflown()) {
ysr@777 3523 set_has_aborted();
ysr@777 3524 return;
ysr@777 3525 }
ysr@777 3526
ysr@777 3527 // If we are not concurrent (i.e. we're doing remark) we don't need
ysr@777 3528 // to check anything else. The other steps are only needed during
ysr@777 3529 // the concurrent marking phase.
ysr@777 3530 if (!concurrent())
ysr@777 3531 return;
ysr@777 3532
ysr@777 3533 // (2) If marking has been aborted for Full GC, then we also abort.
ysr@777 3534 if (_cm->has_aborted()) {
ysr@777 3535 set_has_aborted();
ysr@777 3536 statsOnly( ++_aborted_cm_aborted );
ysr@777 3537 return;
ysr@777 3538 }
ysr@777 3539
ysr@777 3540 double curr_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 3541
ysr@777 3542 // (3) If marking stats are enabled, then we update the step history.
ysr@777 3543 #if _MARKING_STATS_
ysr@777 3544 if (_words_scanned >= _words_scanned_limit)
ysr@777 3545 ++_clock_due_to_scanning;
ysr@777 3546 if (_refs_reached >= _refs_reached_limit)
ysr@777 3547 ++_clock_due_to_marking;
ysr@777 3548
ysr@777 3549 double last_interval_ms = curr_time_ms - _interval_start_time_ms;
ysr@777 3550 _interval_start_time_ms = curr_time_ms;
ysr@777 3551 _all_clock_intervals_ms.add(last_interval_ms);
ysr@777 3552
ysr@777 3553 if (_cm->verbose_medium()) {
ysr@777 3554 gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
ysr@777 3555 "scanned = %d%s, refs reached = %d%s",
ysr@777 3556 _task_id, last_interval_ms,
ysr@777 3557 _words_scanned,
ysr@777 3558 (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
ysr@777 3559 _refs_reached,
ysr@777 3560 (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
ysr@777 3561 }
ysr@777 3562 #endif // _MARKING_STATS_
ysr@777 3563
ysr@777 3564 // (4) We check whether we should yield. If we have to, then we abort.
ysr@777 3565 if (_cm->should_yield()) {
ysr@777 3566 // We should yield. To do this we abort the task. The caller is
ysr@777 3567 // responsible for yielding.
ysr@777 3568 set_has_aborted();
ysr@777 3569 statsOnly( ++_aborted_yield );
ysr@777 3570 return;
ysr@777 3571 }
ysr@777 3572
ysr@777 3573 // (5) We check whether we've reached our time quota. If we have,
ysr@777 3574 // then we abort.
ysr@777 3575 double elapsed_time_ms = curr_time_ms - _start_time_ms;
ysr@777 3576 if (elapsed_time_ms > _time_target_ms) {
ysr@777 3577 set_has_aborted();
johnc@2494 3578 _has_timed_out = true;
ysr@777 3579 statsOnly( ++_aborted_timed_out );
ysr@777 3580 return;
ysr@777 3581 }
ysr@777 3582
ysr@777 3583 // (6) Finally, we check whether there are enough completed STAB
ysr@777 3584 // buffers available for processing. If there are, we abort.
ysr@777 3585 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
ysr@777 3586 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
ysr@777 3587 if (_cm->verbose_low())
ysr@777 3588 gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers",
ysr@777 3589 _task_id);
ysr@777 3590 // we do need to process SATB buffers, we'll abort and restart
ysr@777 3591 // the marking task to do so
ysr@777 3592 set_has_aborted();
ysr@777 3593 statsOnly( ++_aborted_satb );
ysr@777 3594 return;
ysr@777 3595 }
ysr@777 3596 }
ysr@777 3597
ysr@777 3598 void CMTask::recalculate_limits() {
ysr@777 3599 _real_words_scanned_limit = _words_scanned + words_scanned_period;
ysr@777 3600 _words_scanned_limit = _real_words_scanned_limit;
ysr@777 3601
ysr@777 3602 _real_refs_reached_limit = _refs_reached + refs_reached_period;
ysr@777 3603 _refs_reached_limit = _real_refs_reached_limit;
ysr@777 3604 }
ysr@777 3605
ysr@777 3606 void CMTask::decrease_limits() {
ysr@777 3607 // This is called when we believe that we're going to do an infrequent
ysr@777 3608 // operation which will increase the per byte scanned cost (i.e. move
ysr@777 3609 // entries to/from the global stack). It basically tries to decrease the
ysr@777 3610 // scanning limit so that the clock is called earlier.
ysr@777 3611
ysr@777 3612 if (_cm->verbose_medium())
ysr@777 3613 gclog_or_tty->print_cr("[%d] decreasing limits", _task_id);
ysr@777 3614
ysr@777 3615 _words_scanned_limit = _real_words_scanned_limit -
ysr@777 3616 3 * words_scanned_period / 4;
ysr@777 3617 _refs_reached_limit = _real_refs_reached_limit -
ysr@777 3618 3 * refs_reached_period / 4;
ysr@777 3619 }
ysr@777 3620
ysr@777 3621 void CMTask::move_entries_to_global_stack() {
ysr@777 3622 // local array where we'll store the entries that will be popped
ysr@777 3623 // from the local queue
ysr@777 3624 oop buffer[global_stack_transfer_size];
ysr@777 3625
ysr@777 3626 int n = 0;
ysr@777 3627 oop obj;
ysr@777 3628 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
ysr@777 3629 buffer[n] = obj;
ysr@777 3630 ++n;
ysr@777 3631 }
ysr@777 3632
ysr@777 3633 if (n > 0) {
ysr@777 3634 // we popped at least one entry from the local queue
ysr@777 3635
ysr@777 3636 statsOnly( ++_global_transfers_to; _local_pops += n );
ysr@777 3637
ysr@777 3638 if (!_cm->mark_stack_push(buffer, n)) {
ysr@777 3639 if (_cm->verbose_low())
ysr@777 3640 gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id);
ysr@777 3641 set_has_aborted();
ysr@777 3642 } else {
ysr@777 3643 // the transfer was successful
ysr@777 3644
ysr@777 3645 if (_cm->verbose_medium())
ysr@777 3646 gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack",
ysr@777 3647 _task_id, n);
ysr@777 3648 statsOnly( int tmp_size = _cm->mark_stack_size();
ysr@777 3649 if (tmp_size > _global_max_size)
ysr@777 3650 _global_max_size = tmp_size;
ysr@777 3651 _global_pushes += n );
ysr@777 3652 }
ysr@777 3653 }
ysr@777 3654
ysr@777 3655 // this operation was quite expensive, so decrease the limits
ysr@777 3656 decrease_limits();
ysr@777 3657 }
ysr@777 3658
ysr@777 3659 void CMTask::get_entries_from_global_stack() {
ysr@777 3660 // local array where we'll store the entries that will be popped
ysr@777 3661 // from the global stack.
ysr@777 3662 oop buffer[global_stack_transfer_size];
ysr@777 3663 int n;
ysr@777 3664 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
tonyp@1458 3665 assert(n <= global_stack_transfer_size,
tonyp@1458 3666 "we should not pop more than the given limit");
ysr@777 3667 if (n > 0) {
ysr@777 3668 // yes, we did actually pop at least one entry
ysr@777 3669
ysr@777 3670 statsOnly( ++_global_transfers_from; _global_pops += n );
ysr@777 3671 if (_cm->verbose_medium())
ysr@777 3672 gclog_or_tty->print_cr("[%d] popped %d entries from the global stack",
ysr@777 3673 _task_id, n);
ysr@777 3674 for (int i = 0; i < n; ++i) {
ysr@777 3675 bool success = _task_queue->push(buffer[i]);
ysr@777 3676 // We only call this when the local queue is empty or under a
ysr@777 3677 // given target limit. So, we do not expect this push to fail.
tonyp@1458 3678 assert(success, "invariant");
ysr@777 3679 }
ysr@777 3680
ysr@777 3681 statsOnly( int tmp_size = _task_queue->size();
ysr@777 3682 if (tmp_size > _local_max_size)
ysr@777 3683 _local_max_size = tmp_size;
ysr@777 3684 _local_pushes += n );
ysr@777 3685 }
ysr@777 3686
ysr@777 3687 // this operation was quite expensive, so decrease the limits
ysr@777 3688 decrease_limits();
ysr@777 3689 }
ysr@777 3690
ysr@777 3691 void CMTask::drain_local_queue(bool partially) {
ysr@777 3692 if (has_aborted())
ysr@777 3693 return;
ysr@777 3694
ysr@777 3695 // Decide what the target size is, depending whether we're going to
ysr@777 3696 // drain it partially (so that other tasks can steal if they run out
ysr@777 3697 // of things to do) or totally (at the very end).
ysr@777 3698 size_t target_size;
ysr@777 3699 if (partially)
ysr@777 3700 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
ysr@777 3701 else
ysr@777 3702 target_size = 0;
ysr@777 3703
ysr@777 3704 if (_task_queue->size() > target_size) {
ysr@777 3705 if (_cm->verbose_high())
ysr@777 3706 gclog_or_tty->print_cr("[%d] draining local queue, target size = %d",
ysr@777 3707 _task_id, target_size);
ysr@777 3708
ysr@777 3709 oop obj;
ysr@777 3710 bool ret = _task_queue->pop_local(obj);
ysr@777 3711 while (ret) {
ysr@777 3712 statsOnly( ++_local_pops );
ysr@777 3713
ysr@777 3714 if (_cm->verbose_high())
ysr@777 3715 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
ysr@777 3716 (void*) obj);
ysr@777 3717
tonyp@1458 3718 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
tonyp@2643 3719 assert(!_g1h->is_on_master_free_list(
tonyp@2472 3720 _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
ysr@777 3721
ysr@777 3722 scan_object(obj);
ysr@777 3723
ysr@777 3724 if (_task_queue->size() <= target_size || has_aborted())
ysr@777 3725 ret = false;
ysr@777 3726 else
ysr@777 3727 ret = _task_queue->pop_local(obj);
ysr@777 3728 }
ysr@777 3729
ysr@777 3730 if (_cm->verbose_high())
ysr@777 3731 gclog_or_tty->print_cr("[%d] drained local queue, size = %d",
ysr@777 3732 _task_id, _task_queue->size());
ysr@777 3733 }
ysr@777 3734 }
ysr@777 3735
ysr@777 3736 void CMTask::drain_global_stack(bool partially) {
ysr@777 3737 if (has_aborted())
ysr@777 3738 return;
ysr@777 3739
ysr@777 3740 // We have a policy to drain the local queue before we attempt to
ysr@777 3741 // drain the global stack.
tonyp@1458 3742 assert(partially || _task_queue->size() == 0, "invariant");
ysr@777 3743
ysr@777 3744 // Decide what the target size is, depending whether we're going to
ysr@777 3745 // drain it partially (so that other tasks can steal if they run out
ysr@777 3746 // of things to do) or totally (at the very end). Notice that,
ysr@777 3747 // because we move entries from the global stack in chunks or
ysr@777 3748 // because another task might be doing the same, we might in fact
ysr@777 3749 // drop below the target. But, this is not a problem.
ysr@777 3750 size_t target_size;
ysr@777 3751 if (partially)
ysr@777 3752 target_size = _cm->partial_mark_stack_size_target();
ysr@777 3753 else
ysr@777 3754 target_size = 0;
ysr@777 3755
ysr@777 3756 if (_cm->mark_stack_size() > target_size) {
ysr@777 3757 if (_cm->verbose_low())
ysr@777 3758 gclog_or_tty->print_cr("[%d] draining global_stack, target size %d",
ysr@777 3759 _task_id, target_size);
ysr@777 3760
ysr@777 3761 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
ysr@777 3762 get_entries_from_global_stack();
ysr@777 3763 drain_local_queue(partially);
ysr@777 3764 }
ysr@777 3765
ysr@777 3766 if (_cm->verbose_low())
ysr@777 3767 gclog_or_tty->print_cr("[%d] drained global stack, size = %d",
ysr@777 3768 _task_id, _cm->mark_stack_size());
ysr@777 3769 }
ysr@777 3770 }
ysr@777 3771
ysr@777 3772 // SATB Queue has several assumptions on whether to call the par or
ysr@777 3773 // non-par versions of the methods. this is why some of the code is
ysr@777 3774 // replicated. We should really get rid of the single-threaded version
ysr@777 3775 // of the code to simplify things.
ysr@777 3776 void CMTask::drain_satb_buffers() {
ysr@777 3777 if (has_aborted())
ysr@777 3778 return;
ysr@777 3779
ysr@777 3780 // We set this so that the regular clock knows that we're in the
ysr@777 3781 // middle of draining buffers and doesn't set the abort flag when it
ysr@777 3782 // notices that SATB buffers are available for draining. It'd be
ysr@777 3783 // very counter productive if it did that. :-)
ysr@777 3784 _draining_satb_buffers = true;
ysr@777 3785
ysr@777 3786 CMObjectClosure oc(this);
ysr@777 3787 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
jmasa@2188 3788 if (G1CollectedHeap::use_parallel_gc_threads())
ysr@777 3789 satb_mq_set.set_par_closure(_task_id, &oc);
ysr@777 3790 else
ysr@777 3791 satb_mq_set.set_closure(&oc);
ysr@777 3792
ysr@777 3793 // This keeps claiming and applying the closure to completed buffers
ysr@777 3794 // until we run out of buffers or we need to abort.
jmasa@2188 3795 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 3796 while (!has_aborted() &&
ysr@777 3797 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
ysr@777 3798 if (_cm->verbose_medium())
ysr@777 3799 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
ysr@777 3800 statsOnly( ++_satb_buffers_processed );
ysr@777 3801 regular_clock_call();
ysr@777 3802 }
ysr@777 3803 } else {
ysr@777 3804 while (!has_aborted() &&
ysr@777 3805 satb_mq_set.apply_closure_to_completed_buffer()) {
ysr@777 3806 if (_cm->verbose_medium())
ysr@777 3807 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
ysr@777 3808 statsOnly( ++_satb_buffers_processed );
ysr@777 3809 regular_clock_call();
ysr@777 3810 }
ysr@777 3811 }
ysr@777 3812
ysr@777 3813 if (!concurrent() && !has_aborted()) {
ysr@777 3814 // We should only do this during remark.
jmasa@2188 3815 if (G1CollectedHeap::use_parallel_gc_threads())
ysr@777 3816 satb_mq_set.par_iterate_closure_all_threads(_task_id);
ysr@777 3817 else
ysr@777 3818 satb_mq_set.iterate_closure_all_threads();
ysr@777 3819 }
ysr@777 3820
ysr@777 3821 _draining_satb_buffers = false;
ysr@777 3822
tonyp@1458 3823 assert(has_aborted() ||
tonyp@1458 3824 concurrent() ||
tonyp@1458 3825 satb_mq_set.completed_buffers_num() == 0, "invariant");
ysr@777 3826
jmasa@2188 3827 if (G1CollectedHeap::use_parallel_gc_threads())
ysr@777 3828 satb_mq_set.set_par_closure(_task_id, NULL);
ysr@777 3829 else
ysr@777 3830 satb_mq_set.set_closure(NULL);
ysr@777 3831
ysr@777 3832 // again, this was a potentially expensive operation, decrease the
ysr@777 3833 // limits to get the regular clock call early
ysr@777 3834 decrease_limits();
ysr@777 3835 }
ysr@777 3836
ysr@777 3837 void CMTask::drain_region_stack(BitMapClosure* bc) {
ysr@777 3838 if (has_aborted())
ysr@777 3839 return;
ysr@777 3840
tonyp@1458 3841 assert(_region_finger == NULL,
tonyp@1458 3842 "it should be NULL when we're not scanning a region");
ysr@777 3843
johnc@2190 3844 if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) {
ysr@777 3845 if (_cm->verbose_low())
ysr@777 3846 gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
ysr@777 3847 _task_id, _cm->region_stack_size());
ysr@777 3848
johnc@2190 3849 MemRegion mr;
johnc@2190 3850
johnc@2190 3851 if (!_aborted_region.is_empty()) {
johnc@2190 3852 mr = _aborted_region;
johnc@2190 3853 _aborted_region = MemRegion();
johnc@2190 3854
johnc@2190 3855 if (_cm->verbose_low())
johnc@2190 3856 gclog_or_tty->print_cr("[%d] scanning aborted region [ " PTR_FORMAT ", " PTR_FORMAT " )",
johnc@2190 3857 _task_id, mr.start(), mr.end());
johnc@2190 3858 } else {
johnc@2190 3859 mr = _cm->region_stack_pop_lock_free();
johnc@2190 3860 // it returns MemRegion() if the pop fails
johnc@2190 3861 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
johnc@2190 3862 }
ysr@777 3863
ysr@777 3864 while (mr.start() != NULL) {
ysr@777 3865 if (_cm->verbose_medium())
ysr@777 3866 gclog_or_tty->print_cr("[%d] we are scanning region "
ysr@777 3867 "["PTR_FORMAT", "PTR_FORMAT")",
ysr@777 3868 _task_id, mr.start(), mr.end());
johnc@2190 3869
tonyp@1458 3870 assert(mr.end() <= _cm->finger(),
tonyp@1458 3871 "otherwise the region shouldn't be on the stack");
ysr@777 3872 assert(!mr.is_empty(), "Only non-empty regions live on the region stack");
ysr@777 3873 if (_nextMarkBitMap->iterate(bc, mr)) {
tonyp@1458 3874 assert(!has_aborted(),
tonyp@1458 3875 "cannot abort the task without aborting the bitmap iteration");
ysr@777 3876
ysr@777 3877 // We finished iterating over the region without aborting.
ysr@777 3878 regular_clock_call();
ysr@777 3879 if (has_aborted())
ysr@777 3880 mr = MemRegion();
ysr@777 3881 else {
johnc@2190 3882 mr = _cm->region_stack_pop_lock_free();
ysr@777 3883 // it returns MemRegion() if the pop fails
ysr@777 3884 statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
ysr@777 3885 }
ysr@777 3886 } else {
tonyp@1458 3887 assert(has_aborted(), "currently the only way to do so");
ysr@777 3888
ysr@777 3889 // The only way to abort the bitmap iteration is to return
ysr@777 3890 // false from the do_bit() method. However, inside the
ysr@777 3891 // do_bit() method we move the _region_finger to point to the
ysr@777 3892 // object currently being looked at. So, if we bail out, we
ysr@777 3893 // have definitely set _region_finger to something non-null.
tonyp@1458 3894 assert(_region_finger != NULL, "invariant");
ysr@777 3895
johnc@2190 3896 // Make sure that any previously aborted region has been
johnc@2190 3897 // cleared.
johnc@2190 3898 assert(_aborted_region.is_empty(), "aborted region not cleared");
johnc@2190 3899
ysr@777 3900 // The iteration was actually aborted. So now _region_finger
ysr@777 3901 // points to the address of the object we last scanned. If we
ysr@777 3902 // leave it there, when we restart this task, we will rescan
ysr@777 3903 // the object. It is easy to avoid this. We move the finger by
ysr@777 3904 // enough to point to the next possible object header (the
ysr@777 3905 // bitmap knows by how much we need to move it as it knows its
ysr@777 3906 // granularity).
ysr@777 3907 MemRegion newRegion =
ysr@777 3908 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end());
ysr@777 3909
ysr@777 3910 if (!newRegion.is_empty()) {
ysr@777 3911 if (_cm->verbose_low()) {
johnc@2190 3912 gclog_or_tty->print_cr("[%d] recording unscanned region"
johnc@2190 3913 "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask",
ysr@777 3914 _task_id,
ysr@777 3915 newRegion.start(), newRegion.end());
ysr@777 3916 }
johnc@2190 3917 // Now record the part of the region we didn't scan to
johnc@2190 3918 // make sure this task scans it later.
johnc@2190 3919 _aborted_region = newRegion;
ysr@777 3920 }
ysr@777 3921 // break from while
ysr@777 3922 mr = MemRegion();
ysr@777 3923 }
ysr@777 3924 _region_finger = NULL;
ysr@777 3925 }
ysr@777 3926
ysr@777 3927 if (_cm->verbose_low())
ysr@777 3928 gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
ysr@777 3929 _task_id, _cm->region_stack_size());
ysr@777 3930 }
ysr@777 3931 }
ysr@777 3932
ysr@777 3933 void CMTask::print_stats() {
ysr@777 3934 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d",
ysr@777 3935 _task_id, _calls);
ysr@777 3936 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
ysr@777 3937 _elapsed_time_ms, _termination_time_ms);
ysr@777 3938 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 3939 _step_times_ms.num(), _step_times_ms.avg(),
ysr@777 3940 _step_times_ms.sd());
ysr@777 3941 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 3942 _step_times_ms.maximum(), _step_times_ms.sum());
ysr@777 3943
ysr@777 3944 #if _MARKING_STATS_
ysr@777 3945 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
ysr@777 3946 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
ysr@777 3947 _all_clock_intervals_ms.sd());
ysr@777 3948 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms",
ysr@777 3949 _all_clock_intervals_ms.maximum(),
ysr@777 3950 _all_clock_intervals_ms.sum());
ysr@777 3951 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d",
ysr@777 3952 _clock_due_to_scanning, _clock_due_to_marking);
ysr@777 3953 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d",
ysr@777 3954 _objs_scanned, _objs_found_on_bitmap);
ysr@777 3955 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d",
ysr@777 3956 _local_pushes, _local_pops, _local_max_size);
ysr@777 3957 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d",
ysr@777 3958 _global_pushes, _global_pops, _global_max_size);
ysr@777 3959 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d",
ysr@777 3960 _global_transfers_to,_global_transfers_from);
ysr@777 3961 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d",
ysr@777 3962 _regions_claimed, _region_stack_pops);
ysr@777 3963 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed);
ysr@777 3964 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d",
ysr@777 3965 _steal_attempts, _steals);
ysr@777 3966 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted);
ysr@777 3967 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d",
ysr@777 3968 _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
ysr@777 3969 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d",
ysr@777 3970 _aborted_timed_out, _aborted_satb, _aborted_termination);
ysr@777 3971 #endif // _MARKING_STATS_
ysr@777 3972 }
ysr@777 3973
ysr@777 3974 /*****************************************************************************
ysr@777 3975
ysr@777 3976 The do_marking_step(time_target_ms) method is the building block
ysr@777 3977 of the parallel marking framework. It can be called in parallel
ysr@777 3978 with other invocations of do_marking_step() on different tasks
ysr@777 3979 (but only one per task, obviously) and concurrently with the
ysr@777 3980 mutator threads, or during remark, hence it eliminates the need
ysr@777 3981 for two versions of the code. When called during remark, it will
ysr@777 3982 pick up from where the task left off during the concurrent marking
ysr@777 3983 phase. Interestingly, tasks are also claimable during evacuation
ysr@777 3984 pauses too, since do_marking_step() ensures that it aborts before
ysr@777 3985 it needs to yield.
ysr@777 3986
ysr@777 3987 The data structures that is uses to do marking work are the
ysr@777 3988 following:
ysr@777 3989
ysr@777 3990 (1) Marking Bitmap. If there are gray objects that appear only
ysr@777 3991 on the bitmap (this happens either when dealing with an overflow
ysr@777 3992 or when the initial marking phase has simply marked the roots
ysr@777 3993 and didn't push them on the stack), then tasks claim heap
ysr@777 3994 regions whose bitmap they then scan to find gray objects. A
ysr@777 3995 global finger indicates where the end of the last claimed region
ysr@777 3996 is. A local finger indicates how far into the region a task has
ysr@777 3997 scanned. The two fingers are used to determine how to gray an
ysr@777 3998 object (i.e. whether simply marking it is OK, as it will be
ysr@777 3999 visited by a task in the future, or whether it needs to be also
ysr@777 4000 pushed on a stack).
ysr@777 4001
ysr@777 4002 (2) Local Queue. The local queue of the task which is accessed
ysr@777 4003 reasonably efficiently by the task. Other tasks can steal from
ysr@777 4004 it when they run out of work. Throughout the marking phase, a
ysr@777 4005 task attempts to keep its local queue short but not totally
ysr@777 4006 empty, so that entries are available for stealing by other
ysr@777 4007 tasks. Only when there is no more work, a task will totally
ysr@777 4008 drain its local queue.
ysr@777 4009
ysr@777 4010 (3) Global Mark Stack. This handles local queue overflow. During
ysr@777 4011 marking only sets of entries are moved between it and the local
ysr@777 4012 queues, as access to it requires a mutex and more fine-grain
ysr@777 4013 interaction with it which might cause contention. If it
ysr@777 4014 overflows, then the marking phase should restart and iterate
ysr@777 4015 over the bitmap to identify gray objects. Throughout the marking
ysr@777 4016 phase, tasks attempt to keep the global mark stack at a small
ysr@777 4017 length but not totally empty, so that entries are available for
ysr@777 4018 popping by other tasks. Only when there is no more work, tasks
ysr@777 4019 will totally drain the global mark stack.
ysr@777 4020
ysr@777 4021 (4) Global Region Stack. Entries on it correspond to areas of
ysr@777 4022 the bitmap that need to be scanned since they contain gray
ysr@777 4023 objects. Pushes on the region stack only happen during
ysr@777 4024 evacuation pauses and typically correspond to areas covered by
ysr@777 4025 GC LABS. If it overflows, then the marking phase should restart
ysr@777 4026 and iterate over the bitmap to identify gray objects. Tasks will
ysr@777 4027 try to totally drain the region stack as soon as possible.
ysr@777 4028
ysr@777 4029 (5) SATB Buffer Queue. This is where completed SATB buffers are
ysr@777 4030 made available. Buffers are regularly removed from this queue
ysr@777 4031 and scanned for roots, so that the queue doesn't get too
ysr@777 4032 long. During remark, all completed buffers are processed, as
ysr@777 4033 well as the filled in parts of any uncompleted buffers.
ysr@777 4034
ysr@777 4035 The do_marking_step() method tries to abort when the time target
ysr@777 4036 has been reached. There are a few other cases when the
ysr@777 4037 do_marking_step() method also aborts:
ysr@777 4038
ysr@777 4039 (1) When the marking phase has been aborted (after a Full GC).
ysr@777 4040
ysr@777 4041 (2) When a global overflow (either on the global stack or the
ysr@777 4042 region stack) has been triggered. Before the task aborts, it
ysr@777 4043 will actually sync up with the other tasks to ensure that all
ysr@777 4044 the marking data structures (local queues, stacks, fingers etc.)
ysr@777 4045 are re-initialised so that when do_marking_step() completes,
ysr@777 4046 the marking phase can immediately restart.
ysr@777 4047
ysr@777 4048 (3) When enough completed SATB buffers are available. The
ysr@777 4049 do_marking_step() method only tries to drain SATB buffers right
ysr@777 4050 at the beginning. So, if enough buffers are available, the
ysr@777 4051 marking step aborts and the SATB buffers are processed at
ysr@777 4052 the beginning of the next invocation.
ysr@777 4053
ysr@777 4054 (4) To yield. when we have to yield then we abort and yield
ysr@777 4055 right at the end of do_marking_step(). This saves us from a lot
ysr@777 4056 of hassle as, by yielding we might allow a Full GC. If this
ysr@777 4057 happens then objects will be compacted underneath our feet, the
ysr@777 4058 heap might shrink, etc. We save checking for this by just
ysr@777 4059 aborting and doing the yield right at the end.
ysr@777 4060
ysr@777 4061 From the above it follows that the do_marking_step() method should
ysr@777 4062 be called in a loop (or, otherwise, regularly) until it completes.
ysr@777 4063
ysr@777 4064 If a marking step completes without its has_aborted() flag being
ysr@777 4065 true, it means it has completed the current marking phase (and
ysr@777 4066 also all other marking tasks have done so and have all synced up).
ysr@777 4067
ysr@777 4068 A method called regular_clock_call() is invoked "regularly" (in
ysr@777 4069 sub ms intervals) throughout marking. It is this clock method that
ysr@777 4070 checks all the abort conditions which were mentioned above and
ysr@777 4071 decides when the task should abort. A work-based scheme is used to
ysr@777 4072 trigger this clock method: when the number of object words the
ysr@777 4073 marking phase has scanned or the number of references the marking
ysr@777 4074 phase has visited reach a given limit. Additional invocations to
ysr@777 4075 the method clock have been planted in a few other strategic places
ysr@777 4076 too. The initial reason for the clock method was to avoid calling
ysr@777 4077 vtime too regularly, as it is quite expensive. So, once it was in
ysr@777 4078 place, it was natural to piggy-back all the other conditions on it
ysr@777 4079 too and not constantly check them throughout the code.
ysr@777 4080
ysr@777 4081 *****************************************************************************/
ysr@777 4082
johnc@2494 4083 void CMTask::do_marking_step(double time_target_ms,
johnc@2494 4084 bool do_stealing,
johnc@2494 4085 bool do_termination) {
tonyp@1458 4086 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
tonyp@1458 4087 assert(concurrent() == _cm->concurrent(), "they should be the same");
tonyp@1458 4088
tonyp@1458 4089 assert(concurrent() || _cm->region_stack_empty(),
tonyp@1458 4090 "the region stack should have been cleared before remark");
johnc@2190 4091 assert(concurrent() || !_cm->has_aborted_regions(),
johnc@2190 4092 "aborted regions should have been cleared before remark");
tonyp@1458 4093 assert(_region_finger == NULL,
tonyp@1458 4094 "this should be non-null only when a region is being scanned");
ysr@777 4095
ysr@777 4096 G1CollectorPolicy* g1_policy = _g1h->g1_policy();
tonyp@1458 4097 assert(_task_queues != NULL, "invariant");
tonyp@1458 4098 assert(_task_queue != NULL, "invariant");
tonyp@1458 4099 assert(_task_queues->queue(_task_id) == _task_queue, "invariant");
tonyp@1458 4100
tonyp@1458 4101 assert(!_claimed,
tonyp@1458 4102 "only one thread should claim this task at any one time");
ysr@777 4103
ysr@777 4104 // OK, this doesn't safeguard again all possible scenarios, as it is
ysr@777 4105 // possible for two threads to set the _claimed flag at the same
ysr@777 4106 // time. But it is only for debugging purposes anyway and it will
ysr@777 4107 // catch most problems.
ysr@777 4108 _claimed = true;
ysr@777 4109
ysr@777 4110 _start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4111 statsOnly( _interval_start_time_ms = _start_time_ms );
ysr@777 4112
ysr@777 4113 double diff_prediction_ms =
ysr@777 4114 g1_policy->get_new_prediction(&_marking_step_diffs_ms);
ysr@777 4115 _time_target_ms = time_target_ms - diff_prediction_ms;
ysr@777 4116
ysr@777 4117 // set up the variables that are used in the work-based scheme to
ysr@777 4118 // call the regular clock method
ysr@777 4119 _words_scanned = 0;
ysr@777 4120 _refs_reached = 0;
ysr@777 4121 recalculate_limits();
ysr@777 4122
ysr@777 4123 // clear all flags
ysr@777 4124 clear_has_aborted();
johnc@2494 4125 _has_timed_out = false;
ysr@777 4126 _draining_satb_buffers = false;
ysr@777 4127
ysr@777 4128 ++_calls;
ysr@777 4129
ysr@777 4130 if (_cm->verbose_low())
ysr@777 4131 gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, "
ysr@777 4132 "target = %1.2lfms >>>>>>>>>>",
ysr@777 4133 _task_id, _calls, _time_target_ms);
ysr@777 4134
ysr@777 4135 // Set up the bitmap and oop closures. Anything that uses them is
ysr@777 4136 // eventually called from this method, so it is OK to allocate these
ysr@777 4137 // statically.
ysr@777 4138 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
ysr@777 4139 CMOopClosure oop_closure(_g1h, _cm, this);
ysr@777 4140 set_oop_closure(&oop_closure);
ysr@777 4141
ysr@777 4142 if (_cm->has_overflown()) {
ysr@777 4143 // This can happen if the region stack or the mark stack overflows
ysr@777 4144 // during a GC pause and this task, after a yield point,
ysr@777 4145 // restarts. We have to abort as we need to get into the overflow
ysr@777 4146 // protocol which happens right at the end of this task.
ysr@777 4147 set_has_aborted();
ysr@777 4148 }
ysr@777 4149
ysr@777 4150 // First drain any available SATB buffers. After this, we will not
ysr@777 4151 // look at SATB buffers before the next invocation of this method.
ysr@777 4152 // If enough completed SATB buffers are queued up, the regular clock
ysr@777 4153 // will abort this task so that it restarts.
ysr@777 4154 drain_satb_buffers();
ysr@777 4155 // ...then partially drain the local queue and the global stack
ysr@777 4156 drain_local_queue(true);
ysr@777 4157 drain_global_stack(true);
ysr@777 4158
ysr@777 4159 // Then totally drain the region stack. We will not look at
ysr@777 4160 // it again before the next invocation of this method. Entries on
ysr@777 4161 // the region stack are only added during evacuation pauses, for
ysr@777 4162 // which we have to yield. When we do, we abort the task anyway so
ysr@777 4163 // it will look at the region stack again when it restarts.
ysr@777 4164 bitmap_closure.set_scanning_heap_region(false);
ysr@777 4165 drain_region_stack(&bitmap_closure);
ysr@777 4166 // ...then partially drain the local queue and the global stack
ysr@777 4167 drain_local_queue(true);
ysr@777 4168 drain_global_stack(true);
ysr@777 4169
ysr@777 4170 do {
ysr@777 4171 if (!has_aborted() && _curr_region != NULL) {
ysr@777 4172 // This means that we're already holding on to a region.
tonyp@1458 4173 assert(_finger != NULL, "if region is not NULL, then the finger "
tonyp@1458 4174 "should not be NULL either");
ysr@777 4175
ysr@777 4176 // We might have restarted this task after an evacuation pause
ysr@777 4177 // which might have evacuated the region we're holding on to
ysr@777 4178 // underneath our feet. Let's read its limit again to make sure
ysr@777 4179 // that we do not iterate over a region of the heap that
ysr@777 4180 // contains garbage (update_region_limit() will also move
ysr@777 4181 // _finger to the start of the region if it is found empty).
ysr@777 4182 update_region_limit();
ysr@777 4183 // We will start from _finger not from the start of the region,
ysr@777 4184 // as we might be restarting this task after aborting half-way
ysr@777 4185 // through scanning this region. In this case, _finger points to
ysr@777 4186 // the address where we last found a marked object. If this is a
ysr@777 4187 // fresh region, _finger points to start().
ysr@777 4188 MemRegion mr = MemRegion(_finger, _region_limit);
ysr@777 4189
ysr@777 4190 if (_cm->verbose_low())
ysr@777 4191 gclog_or_tty->print_cr("[%d] we're scanning part "
ysr@777 4192 "["PTR_FORMAT", "PTR_FORMAT") "
ysr@777 4193 "of region "PTR_FORMAT,
ysr@777 4194 _task_id, _finger, _region_limit, _curr_region);
ysr@777 4195
ysr@777 4196 // Let's iterate over the bitmap of the part of the
ysr@777 4197 // region that is left.
ysr@777 4198 bitmap_closure.set_scanning_heap_region(true);
ysr@777 4199 if (mr.is_empty() ||
ysr@777 4200 _nextMarkBitMap->iterate(&bitmap_closure, mr)) {
ysr@777 4201 // We successfully completed iterating over the region. Now,
ysr@777 4202 // let's give up the region.
ysr@777 4203 giveup_current_region();
ysr@777 4204 regular_clock_call();
ysr@777 4205 } else {
tonyp@1458 4206 assert(has_aborted(), "currently the only way to do so");
ysr@777 4207 // The only way to abort the bitmap iteration is to return
ysr@777 4208 // false from the do_bit() method. However, inside the
ysr@777 4209 // do_bit() method we move the _finger to point to the
ysr@777 4210 // object currently being looked at. So, if we bail out, we
ysr@777 4211 // have definitely set _finger to something non-null.
tonyp@1458 4212 assert(_finger != NULL, "invariant");
ysr@777 4213
ysr@777 4214 // Region iteration was actually aborted. So now _finger
ysr@777 4215 // points to the address of the object we last scanned. If we
ysr@777 4216 // leave it there, when we restart this task, we will rescan
ysr@777 4217 // the object. It is easy to avoid this. We move the finger by
ysr@777 4218 // enough to point to the next possible object header (the
ysr@777 4219 // bitmap knows by how much we need to move it as it knows its
ysr@777 4220 // granularity).
apetrusenko@1749 4221 assert(_finger < _region_limit, "invariant");
apetrusenko@1749 4222 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
apetrusenko@1749 4223 // Check if bitmap iteration was aborted while scanning the last object
apetrusenko@1749 4224 if (new_finger >= _region_limit) {
apetrusenko@1749 4225 giveup_current_region();
apetrusenko@1749 4226 } else {
apetrusenko@1749 4227 move_finger_to(new_finger);
apetrusenko@1749 4228 }
ysr@777 4229 }
ysr@777 4230 }
ysr@777 4231 // At this point we have either completed iterating over the
ysr@777 4232 // region we were holding on to, or we have aborted.
ysr@777 4233
ysr@777 4234 // We then partially drain the local queue and the global stack.
ysr@777 4235 // (Do we really need this?)
ysr@777 4236 drain_local_queue(true);
ysr@777 4237 drain_global_stack(true);
ysr@777 4238
ysr@777 4239 // Read the note on the claim_region() method on why it might
ysr@777 4240 // return NULL with potentially more regions available for
ysr@777 4241 // claiming and why we have to check out_of_regions() to determine
ysr@777 4242 // whether we're done or not.
ysr@777 4243 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
ysr@777 4244 // We are going to try to claim a new region. We should have
ysr@777 4245 // given up on the previous one.
tonyp@1458 4246 // Separated the asserts so that we know which one fires.
tonyp@1458 4247 assert(_curr_region == NULL, "invariant");
tonyp@1458 4248 assert(_finger == NULL, "invariant");
tonyp@1458 4249 assert(_region_limit == NULL, "invariant");
ysr@777 4250 if (_cm->verbose_low())
ysr@777 4251 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
ysr@777 4252 HeapRegion* claimed_region = _cm->claim_region(_task_id);
ysr@777 4253 if (claimed_region != NULL) {
ysr@777 4254 // Yes, we managed to claim one
ysr@777 4255 statsOnly( ++_regions_claimed );
ysr@777 4256
ysr@777 4257 if (_cm->verbose_low())
ysr@777 4258 gclog_or_tty->print_cr("[%d] we successfully claimed "
ysr@777 4259 "region "PTR_FORMAT,
ysr@777 4260 _task_id, claimed_region);
ysr@777 4261
ysr@777 4262 setup_for_region(claimed_region);
tonyp@1458 4263 assert(_curr_region == claimed_region, "invariant");
ysr@777 4264 }
ysr@777 4265 // It is important to call the regular clock here. It might take
ysr@777 4266 // a while to claim a region if, for example, we hit a large
ysr@777 4267 // block of empty regions. So we need to call the regular clock
ysr@777 4268 // method once round the loop to make sure it's called
ysr@777 4269 // frequently enough.
ysr@777 4270 regular_clock_call();
ysr@777 4271 }
ysr@777 4272
ysr@777 4273 if (!has_aborted() && _curr_region == NULL) {
tonyp@1458 4274 assert(_cm->out_of_regions(),
tonyp@1458 4275 "at this point we should be out of regions");
ysr@777 4276 }
ysr@777 4277 } while ( _curr_region != NULL && !has_aborted());
ysr@777 4278
ysr@777 4279 if (!has_aborted()) {
ysr@777 4280 // We cannot check whether the global stack is empty, since other
iveresov@778 4281 // tasks might be pushing objects to it concurrently. We also cannot
iveresov@778 4282 // check if the region stack is empty because if a thread is aborting
iveresov@778 4283 // it can push a partially done region back.
tonyp@1458 4284 assert(_cm->out_of_regions(),
tonyp@1458 4285 "at this point we should be out of regions");
ysr@777 4286
ysr@777 4287 if (_cm->verbose_low())
ysr@777 4288 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
ysr@777 4289
ysr@777 4290 // Try to reduce the number of available SATB buffers so that
ysr@777 4291 // remark has less work to do.
ysr@777 4292 drain_satb_buffers();
ysr@777 4293 }
ysr@777 4294
ysr@777 4295 // Since we've done everything else, we can now totally drain the
ysr@777 4296 // local queue and global stack.
ysr@777 4297 drain_local_queue(false);
ysr@777 4298 drain_global_stack(false);
ysr@777 4299
ysr@777 4300 // Attempt at work stealing from other task's queues.
johnc@2494 4301 if (do_stealing && !has_aborted()) {
ysr@777 4302 // We have not aborted. This means that we have finished all that
ysr@777 4303 // we could. Let's try to do some stealing...
ysr@777 4304
ysr@777 4305 // We cannot check whether the global stack is empty, since other
iveresov@778 4306 // tasks might be pushing objects to it concurrently. We also cannot
iveresov@778 4307 // check if the region stack is empty because if a thread is aborting
iveresov@778 4308 // it can push a partially done region back.
tonyp@1458 4309 assert(_cm->out_of_regions() && _task_queue->size() == 0,
tonyp@1458 4310 "only way to reach here");
ysr@777 4311
ysr@777 4312 if (_cm->verbose_low())
ysr@777 4313 gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
ysr@777 4314
ysr@777 4315 while (!has_aborted()) {
ysr@777 4316 oop obj;
ysr@777 4317 statsOnly( ++_steal_attempts );
ysr@777 4318
ysr@777 4319 if (_cm->try_stealing(_task_id, &_hash_seed, obj)) {
ysr@777 4320 if (_cm->verbose_medium())
ysr@777 4321 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully",
ysr@777 4322 _task_id, (void*) obj);
ysr@777 4323
ysr@777 4324 statsOnly( ++_steals );
ysr@777 4325
tonyp@1458 4326 assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
tonyp@1458 4327 "any stolen object should be marked");
ysr@777 4328 scan_object(obj);
ysr@777 4329
ysr@777 4330 // And since we're towards the end, let's totally drain the
ysr@777 4331 // local queue and global stack.
ysr@777 4332 drain_local_queue(false);
ysr@777 4333 drain_global_stack(false);
ysr@777 4334 } else {
ysr@777 4335 break;
ysr@777 4336 }
ysr@777 4337 }
ysr@777 4338 }
ysr@777 4339
tonyp@2848 4340 // If we are about to wrap up and go into termination, check if we
tonyp@2848 4341 // should raise the overflow flag.
tonyp@2848 4342 if (do_termination && !has_aborted()) {
tonyp@2848 4343 if (_cm->force_overflow()->should_force()) {
tonyp@2848 4344 _cm->set_has_overflown();
tonyp@2848 4345 regular_clock_call();
tonyp@2848 4346 }
tonyp@2848 4347 }
tonyp@2848 4348
ysr@777 4349 // We still haven't aborted. Now, let's try to get into the
ysr@777 4350 // termination protocol.
johnc@2494 4351 if (do_termination && !has_aborted()) {
ysr@777 4352 // We cannot check whether the global stack is empty, since other
iveresov@778 4353 // tasks might be concurrently pushing objects on it. We also cannot
iveresov@778 4354 // check if the region stack is empty because if a thread is aborting
iveresov@778 4355 // it can push a partially done region back.
tonyp@1458 4356 // Separated the asserts so that we know which one fires.
tonyp@1458 4357 assert(_cm->out_of_regions(), "only way to reach here");
tonyp@1458 4358 assert(_task_queue->size() == 0, "only way to reach here");
ysr@777 4359
ysr@777 4360 if (_cm->verbose_low())
ysr@777 4361 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
ysr@777 4362
ysr@777 4363 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4364 // The CMTask class also extends the TerminatorTerminator class,
ysr@777 4365 // hence its should_exit_termination() method will also decide
ysr@777 4366 // whether to exit the termination protocol or not.
ysr@777 4367 bool finished = _cm->terminator()->offer_termination(this);
ysr@777 4368 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4369 _termination_time_ms +=
ysr@777 4370 termination_end_time_ms - _termination_start_time_ms;
ysr@777 4371
ysr@777 4372 if (finished) {
ysr@777 4373 // We're all done.
ysr@777 4374
ysr@777 4375 if (_task_id == 0) {
ysr@777 4376 // let's allow task 0 to do this
ysr@777 4377 if (concurrent()) {
tonyp@1458 4378 assert(_cm->concurrent_marking_in_progress(), "invariant");
ysr@777 4379 // we need to set this to false before the next
ysr@777 4380 // safepoint. This way we ensure that the marking phase
ysr@777 4381 // doesn't observe any more heap expansions.
ysr@777 4382 _cm->clear_concurrent_marking_in_progress();
ysr@777 4383 }
ysr@777 4384 }
ysr@777 4385
ysr@777 4386 // We can now guarantee that the global stack is empty, since
tonyp@1458 4387 // all other tasks have finished. We separated the guarantees so
tonyp@1458 4388 // that, if a condition is false, we can immediately find out
tonyp@1458 4389 // which one.
tonyp@1458 4390 guarantee(_cm->out_of_regions(), "only way to reach here");
johnc@2190 4391 guarantee(_aborted_region.is_empty(), "only way to reach here");
tonyp@1458 4392 guarantee(_cm->region_stack_empty(), "only way to reach here");
tonyp@1458 4393 guarantee(_cm->mark_stack_empty(), "only way to reach here");
tonyp@1458 4394 guarantee(_task_queue->size() == 0, "only way to reach here");
tonyp@1458 4395 guarantee(!_cm->has_overflown(), "only way to reach here");
tonyp@1458 4396 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
tonyp@1458 4397 guarantee(!_cm->region_stack_overflow(), "only way to reach here");
ysr@777 4398
ysr@777 4399 if (_cm->verbose_low())
ysr@777 4400 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
ysr@777 4401 } else {
ysr@777 4402 // Apparently there's more work to do. Let's abort this task. It
ysr@777 4403 // will restart it and we can hopefully find more things to do.
ysr@777 4404
ysr@777 4405 if (_cm->verbose_low())
ysr@777 4406 gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id);
ysr@777 4407
ysr@777 4408 set_has_aborted();
ysr@777 4409 statsOnly( ++_aborted_termination );
ysr@777 4410 }
ysr@777 4411 }
ysr@777 4412
ysr@777 4413 // Mainly for debugging purposes to make sure that a pointer to the
ysr@777 4414 // closure which was statically allocated in this frame doesn't
ysr@777 4415 // escape it by accident.
ysr@777 4416 set_oop_closure(NULL);
ysr@777 4417 double end_time_ms = os::elapsedVTime() * 1000.0;
ysr@777 4418 double elapsed_time_ms = end_time_ms - _start_time_ms;
ysr@777 4419 // Update the step history.
ysr@777 4420 _step_times_ms.add(elapsed_time_ms);
ysr@777 4421
ysr@777 4422 if (has_aborted()) {
ysr@777 4423 // The task was aborted for some reason.
ysr@777 4424
ysr@777 4425 statsOnly( ++_aborted );
ysr@777 4426
johnc@2494 4427 if (_has_timed_out) {
ysr@777 4428 double diff_ms = elapsed_time_ms - _time_target_ms;
ysr@777 4429 // Keep statistics of how well we did with respect to hitting
ysr@777 4430 // our target only if we actually timed out (if we aborted for
ysr@777 4431 // other reasons, then the results might get skewed).
ysr@777 4432 _marking_step_diffs_ms.add(diff_ms);
ysr@777 4433 }
ysr@777 4434
ysr@777 4435 if (_cm->has_overflown()) {
ysr@777 4436 // This is the interesting one. We aborted because a global
ysr@777 4437 // overflow was raised. This means we have to restart the
ysr@777 4438 // marking phase and start iterating over regions. However, in
ysr@777 4439 // order to do this we have to make sure that all tasks stop
ysr@777 4440 // what they are doing and re-initialise in a safe manner. We
ysr@777 4441 // will achieve this with the use of two barrier sync points.
ysr@777 4442
ysr@777 4443 if (_cm->verbose_low())
ysr@777 4444 gclog_or_tty->print_cr("[%d] detected overflow", _task_id);
ysr@777 4445
ysr@777 4446 _cm->enter_first_sync_barrier(_task_id);
ysr@777 4447 // When we exit this sync barrier we know that all tasks have
ysr@777 4448 // stopped doing marking work. So, it's now safe to
ysr@777 4449 // re-initialise our data structures. At the end of this method,
ysr@777 4450 // task 0 will clear the global data structures.
ysr@777 4451
ysr@777 4452 statsOnly( ++_aborted_overflow );
ysr@777 4453
ysr@777 4454 // We clear the local state of this task...
ysr@777 4455 clear_region_fields();
ysr@777 4456
ysr@777 4457 // ...and enter the second barrier.
ysr@777 4458 _cm->enter_second_sync_barrier(_task_id);
ysr@777 4459 // At this point everything has bee re-initialised and we're
ysr@777 4460 // ready to restart.
ysr@777 4461 }
ysr@777 4462
ysr@777 4463 if (_cm->verbose_low()) {
ysr@777 4464 gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, "
ysr@777 4465 "elapsed = %1.2lfms <<<<<<<<<<",
ysr@777 4466 _task_id, _time_target_ms, elapsed_time_ms);
ysr@777 4467 if (_cm->has_aborted())
ysr@777 4468 gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========",
ysr@777 4469 _task_id);
ysr@777 4470 }
ysr@777 4471 } else {
ysr@777 4472 if (_cm->verbose_low())
ysr@777 4473 gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, "
ysr@777 4474 "elapsed = %1.2lfms <<<<<<<<<<",
ysr@777 4475 _task_id, _time_target_ms, elapsed_time_ms);
ysr@777 4476 }
ysr@777 4477
ysr@777 4478 _claimed = false;
ysr@777 4479 }
ysr@777 4480
ysr@777 4481 CMTask::CMTask(int task_id,
ysr@777 4482 ConcurrentMark* cm,
ysr@777 4483 CMTaskQueue* task_queue,
ysr@777 4484 CMTaskQueueSet* task_queues)
ysr@777 4485 : _g1h(G1CollectedHeap::heap()),
ysr@777 4486 _task_id(task_id), _cm(cm),
ysr@777 4487 _claimed(false),
ysr@777 4488 _nextMarkBitMap(NULL), _hash_seed(17),
ysr@777 4489 _task_queue(task_queue),
ysr@777 4490 _task_queues(task_queues),
johnc@2190 4491 _oop_closure(NULL),
johnc@2190 4492 _aborted_region(MemRegion()) {
tonyp@1458 4493 guarantee(task_queue != NULL, "invariant");
tonyp@1458 4494 guarantee(task_queues != NULL, "invariant");
ysr@777 4495
ysr@777 4496 statsOnly( _clock_due_to_scanning = 0;
ysr@777 4497 _clock_due_to_marking = 0 );
ysr@777 4498
ysr@777 4499 _marking_step_diffs_ms.add(0.5);
ysr@777 4500 }
tonyp@2717 4501
tonyp@2717 4502 // These are formatting macros that are used below to ensure
tonyp@2717 4503 // consistent formatting. The *_H_* versions are used to format the
tonyp@2717 4504 // header for a particular value and they should be kept consistent
tonyp@2717 4505 // with the corresponding macro. Also note that most of the macros add
tonyp@2717 4506 // the necessary white space (as a prefix) which makes them a bit
tonyp@2717 4507 // easier to compose.
tonyp@2717 4508
tonyp@2717 4509 // All the output lines are prefixed with this string to be able to
tonyp@2717 4510 // identify them easily in a large log file.
tonyp@2717 4511 #define G1PPRL_LINE_PREFIX "###"
tonyp@2717 4512
tonyp@2717 4513 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT
tonyp@2717 4514 #ifdef _LP64
tonyp@2717 4515 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
tonyp@2717 4516 #else // _LP64
tonyp@2717 4517 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
tonyp@2717 4518 #endif // _LP64
tonyp@2717 4519
tonyp@2717 4520 // For per-region info
tonyp@2717 4521 #define G1PPRL_TYPE_FORMAT " %-4s"
tonyp@2717 4522 #define G1PPRL_TYPE_H_FORMAT " %4s"
tonyp@2717 4523 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9)
tonyp@2717 4524 #define G1PPRL_BYTE_H_FORMAT " %9s"
tonyp@2717 4525 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
tonyp@2717 4526 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
tonyp@2717 4527
tonyp@2717 4528 // For summary info
tonyp@2717 4529 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4530 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT
tonyp@2717 4531 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB"
tonyp@2717 4532 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
tonyp@2717 4533
tonyp@2717 4534 G1PrintRegionLivenessInfoClosure::
tonyp@2717 4535 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
tonyp@2717 4536 : _out(out),
tonyp@2717 4537 _total_used_bytes(0), _total_capacity_bytes(0),
tonyp@2717 4538 _total_prev_live_bytes(0), _total_next_live_bytes(0),
tonyp@2717 4539 _hum_used_bytes(0), _hum_capacity_bytes(0),
tonyp@2717 4540 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
tonyp@2717 4541 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2717 4542 MemRegion g1_committed = g1h->g1_committed();
tonyp@2717 4543 MemRegion g1_reserved = g1h->g1_reserved();
tonyp@2717 4544 double now = os::elapsedTime();
tonyp@2717 4545
tonyp@2717 4546 // Print the header of the output.
tonyp@2717 4547 _out->cr();
tonyp@2717 4548 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
tonyp@2717 4549 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
tonyp@2717 4550 G1PPRL_SUM_ADDR_FORMAT("committed")
tonyp@2717 4551 G1PPRL_SUM_ADDR_FORMAT("reserved")
tonyp@2717 4552 G1PPRL_SUM_BYTE_FORMAT("region-size"),
tonyp@2717 4553 g1_committed.start(), g1_committed.end(),
tonyp@2717 4554 g1_reserved.start(), g1_reserved.end(),
tonyp@2717 4555 HeapRegion::GrainBytes);
tonyp@2717 4556 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4557 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4558 G1PPRL_TYPE_H_FORMAT
tonyp@2717 4559 G1PPRL_ADDR_BASE_H_FORMAT
tonyp@2717 4560 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4561 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4562 G1PPRL_BYTE_H_FORMAT
tonyp@2717 4563 G1PPRL_DOUBLE_H_FORMAT,
tonyp@2717 4564 "type", "address-range",
tonyp@2717 4565 "used", "prev-live", "next-live", "gc-eff");
tonyp@2717 4566 }
tonyp@2717 4567
tonyp@2717 4568 // It takes as a parameter a reference to one of the _hum_* fields, it
tonyp@2717 4569 // deduces the corresponding value for a region in a humongous region
tonyp@2717 4570 // series (either the region size, or what's left if the _hum_* field
tonyp@2717 4571 // is < the region size), and updates the _hum_* field accordingly.
tonyp@2717 4572 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
tonyp@2717 4573 size_t bytes = 0;
tonyp@2717 4574 // The > 0 check is to deal with the prev and next live bytes which
tonyp@2717 4575 // could be 0.
tonyp@2717 4576 if (*hum_bytes > 0) {
tonyp@2717 4577 bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes);
tonyp@2717 4578 *hum_bytes -= bytes;
tonyp@2717 4579 }
tonyp@2717 4580 return bytes;
tonyp@2717 4581 }
tonyp@2717 4582
tonyp@2717 4583 // It deduces the values for a region in a humongous region series
tonyp@2717 4584 // from the _hum_* fields and updates those accordingly. It assumes
tonyp@2717 4585 // that that _hum_* fields have already been set up from the "starts
tonyp@2717 4586 // humongous" region and we visit the regions in address order.
tonyp@2717 4587 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
tonyp@2717 4588 size_t* capacity_bytes,
tonyp@2717 4589 size_t* prev_live_bytes,
tonyp@2717 4590 size_t* next_live_bytes) {
tonyp@2717 4591 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
tonyp@2717 4592 *used_bytes = get_hum_bytes(&_hum_used_bytes);
tonyp@2717 4593 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
tonyp@2717 4594 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
tonyp@2717 4595 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
tonyp@2717 4596 }
tonyp@2717 4597
tonyp@2717 4598 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
tonyp@2717 4599 const char* type = "";
tonyp@2717 4600 HeapWord* bottom = r->bottom();
tonyp@2717 4601 HeapWord* end = r->end();
tonyp@2717 4602 size_t capacity_bytes = r->capacity();
tonyp@2717 4603 size_t used_bytes = r->used();
tonyp@2717 4604 size_t prev_live_bytes = r->live_bytes();
tonyp@2717 4605 size_t next_live_bytes = r->next_live_bytes();
tonyp@2717 4606 double gc_eff = r->gc_efficiency();
tonyp@2717 4607 if (r->used() == 0) {
tonyp@2717 4608 type = "FREE";
tonyp@2717 4609 } else if (r->is_survivor()) {
tonyp@2717 4610 type = "SURV";
tonyp@2717 4611 } else if (r->is_young()) {
tonyp@2717 4612 type = "EDEN";
tonyp@2717 4613 } else if (r->startsHumongous()) {
tonyp@2717 4614 type = "HUMS";
tonyp@2717 4615
tonyp@2717 4616 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
tonyp@2717 4617 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
tonyp@2717 4618 "they should have been zeroed after the last time we used them");
tonyp@2717 4619 // Set up the _hum_* fields.
tonyp@2717 4620 _hum_capacity_bytes = capacity_bytes;
tonyp@2717 4621 _hum_used_bytes = used_bytes;
tonyp@2717 4622 _hum_prev_live_bytes = prev_live_bytes;
tonyp@2717 4623 _hum_next_live_bytes = next_live_bytes;
tonyp@2717 4624 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4625 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4626 end = bottom + HeapRegion::GrainWords;
tonyp@2717 4627 } else if (r->continuesHumongous()) {
tonyp@2717 4628 type = "HUMC";
tonyp@2717 4629 get_hum_bytes(&used_bytes, &capacity_bytes,
tonyp@2717 4630 &prev_live_bytes, &next_live_bytes);
tonyp@2717 4631 assert(end == bottom + HeapRegion::GrainWords, "invariant");
tonyp@2717 4632 } else {
tonyp@2717 4633 type = "OLD";
tonyp@2717 4634 }
tonyp@2717 4635
tonyp@2717 4636 _total_used_bytes += used_bytes;
tonyp@2717 4637 _total_capacity_bytes += capacity_bytes;
tonyp@2717 4638 _total_prev_live_bytes += prev_live_bytes;
tonyp@2717 4639 _total_next_live_bytes += next_live_bytes;
tonyp@2717 4640
tonyp@2717 4641 // Print a line for this particular region.
tonyp@2717 4642 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4643 G1PPRL_TYPE_FORMAT
tonyp@2717 4644 G1PPRL_ADDR_BASE_FORMAT
tonyp@2717 4645 G1PPRL_BYTE_FORMAT
tonyp@2717 4646 G1PPRL_BYTE_FORMAT
tonyp@2717 4647 G1PPRL_BYTE_FORMAT
tonyp@2717 4648 G1PPRL_DOUBLE_FORMAT,
tonyp@2717 4649 type, bottom, end,
tonyp@2717 4650 used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
tonyp@2717 4651
tonyp@2717 4652 return false;
tonyp@2717 4653 }
tonyp@2717 4654
tonyp@2717 4655 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
tonyp@2717 4656 // Print the footer of the output.
tonyp@2717 4657 _out->print_cr(G1PPRL_LINE_PREFIX);
tonyp@2717 4658 _out->print_cr(G1PPRL_LINE_PREFIX
tonyp@2717 4659 " SUMMARY"
tonyp@2717 4660 G1PPRL_SUM_MB_FORMAT("capacity")
tonyp@2717 4661 G1PPRL_SUM_MB_PERC_FORMAT("used")
tonyp@2717 4662 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
tonyp@2717 4663 G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
tonyp@2717 4664 bytes_to_mb(_total_capacity_bytes),
tonyp@2717 4665 bytes_to_mb(_total_used_bytes),
tonyp@2717 4666 perc(_total_used_bytes, _total_capacity_bytes),
tonyp@2717 4667 bytes_to_mb(_total_prev_live_bytes),
tonyp@2717 4668 perc(_total_prev_live_bytes, _total_capacity_bytes),
tonyp@2717 4669 bytes_to_mb(_total_next_live_bytes),
tonyp@2717 4670 perc(_total_next_live_bytes, _total_capacity_bytes));
tonyp@2717 4671 _out->cr();
tonyp@2717 4672 }

mercurial