Thu, 07 Aug 2014 22:28:53 +0200
8054341: Remove some obsolete code in G1CollectedHeap class
Summary: Remove dead code.
Reviewed-by: stefank, brutisso
ysr@777 | 1 | /* |
drchase@6680 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/symbolTable.hpp" |
stefank@6992 | 27 | #include "code/codeCache.hpp" |
tonyp@2968 | 28 | #include "gc_implementation/g1/concurrentMark.inline.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
stefank@2314 | 30 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 31 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
tonyp@3114 | 32 | #include "gc_implementation/g1/g1ErgoVerbose.hpp" |
brutisso@3710 | 33 | #include "gc_implementation/g1/g1Log.hpp" |
tonyp@2968 | 34 | #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
stefank@2314 | 35 | #include "gc_implementation/g1/g1RemSet.hpp" |
tonyp@3416 | 36 | #include "gc_implementation/g1/heapRegion.inline.hpp" |
stefank@2314 | 37 | #include "gc_implementation/g1/heapRegionRemSet.hpp" |
stefank@2314 | 38 | #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
kamg@2445 | 39 | #include "gc_implementation/shared/vmGCOperations.hpp" |
sla@5237 | 40 | #include "gc_implementation/shared/gcTimer.hpp" |
sla@5237 | 41 | #include "gc_implementation/shared/gcTrace.hpp" |
sla@5237 | 42 | #include "gc_implementation/shared/gcTraceTime.hpp" |
stefank@6992 | 43 | #include "memory/allocation.hpp" |
stefank@2314 | 44 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 45 | #include "memory/referencePolicy.hpp" |
stefank@2314 | 46 | #include "memory/resourceArea.hpp" |
stefank@2314 | 47 | #include "oops/oop.inline.hpp" |
stefank@2314 | 48 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 49 | #include "runtime/java.hpp" |
goetz@6912 | 50 | #include "runtime/prefetch.inline.hpp" |
zgu@3900 | 51 | #include "services/memTracker.hpp" |
ysr@777 | 52 | |
brutisso@3455 | 53 | // Concurrent marking bit map wrapper |
ysr@777 | 54 | |
johnc@4333 | 55 | CMBitMapRO::CMBitMapRO(int shifter) : |
johnc@4333 | 56 | _bm(), |
ysr@777 | 57 | _shifter(shifter) { |
johnc@4333 | 58 | _bmStartWord = 0; |
johnc@4333 | 59 | _bmWordSize = 0; |
ysr@777 | 60 | } |
ysr@777 | 61 | |
stefank@6992 | 62 | HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, |
stefank@6992 | 63 | const HeapWord* limit) const { |
ysr@777 | 64 | // First we must round addr *up* to a possible object boundary. |
ysr@777 | 65 | addr = (HeapWord*)align_size_up((intptr_t)addr, |
ysr@777 | 66 | HeapWordSize << _shifter); |
ysr@777 | 67 | size_t addrOffset = heapWordToOffset(addr); |
tonyp@2973 | 68 | if (limit == NULL) { |
tonyp@2973 | 69 | limit = _bmStartWord + _bmWordSize; |
tonyp@2973 | 70 | } |
ysr@777 | 71 | size_t limitOffset = heapWordToOffset(limit); |
ysr@777 | 72 | size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); |
ysr@777 | 73 | HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
ysr@777 | 74 | assert(nextAddr >= addr, "get_next_one postcondition"); |
ysr@777 | 75 | assert(nextAddr == limit || isMarked(nextAddr), |
ysr@777 | 76 | "get_next_one postcondition"); |
ysr@777 | 77 | return nextAddr; |
ysr@777 | 78 | } |
ysr@777 | 79 | |
stefank@6992 | 80 | HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, |
stefank@6992 | 81 | const HeapWord* limit) const { |
ysr@777 | 82 | size_t addrOffset = heapWordToOffset(addr); |
tonyp@2973 | 83 | if (limit == NULL) { |
tonyp@2973 | 84 | limit = _bmStartWord + _bmWordSize; |
tonyp@2973 | 85 | } |
ysr@777 | 86 | size_t limitOffset = heapWordToOffset(limit); |
ysr@777 | 87 | size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); |
ysr@777 | 88 | HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
ysr@777 | 89 | assert(nextAddr >= addr, "get_next_one postcondition"); |
ysr@777 | 90 | assert(nextAddr == limit || !isMarked(nextAddr), |
ysr@777 | 91 | "get_next_one postcondition"); |
ysr@777 | 92 | return nextAddr; |
ysr@777 | 93 | } |
ysr@777 | 94 | |
ysr@777 | 95 | int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { |
ysr@777 | 96 | assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); |
ysr@777 | 97 | return (int) (diff >> _shifter); |
ysr@777 | 98 | } |
ysr@777 | 99 | |
ysr@777 | 100 | #ifndef PRODUCT |
johnc@4333 | 101 | bool CMBitMapRO::covers(ReservedSpace heap_rs) const { |
ysr@777 | 102 | // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); |
brutisso@4061 | 103 | assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, |
ysr@777 | 104 | "size inconsistency"); |
johnc@4333 | 105 | return _bmStartWord == (HeapWord*)(heap_rs.base()) && |
johnc@4333 | 106 | _bmWordSize == heap_rs.size()>>LogHeapWordSize; |
ysr@777 | 107 | } |
ysr@777 | 108 | #endif |
ysr@777 | 109 | |
stefank@4904 | 110 | void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { |
stefank@4904 | 111 | _bm.print_on_error(st, prefix); |
stefank@4904 | 112 | } |
stefank@4904 | 113 | |
johnc@4333 | 114 | bool CMBitMap::allocate(ReservedSpace heap_rs) { |
johnc@4333 | 115 | _bmStartWord = (HeapWord*)(heap_rs.base()); |
johnc@4333 | 116 | _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes |
johnc@4333 | 117 | ReservedSpace brs(ReservedSpace::allocation_align_size_up( |
johnc@4333 | 118 | (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); |
johnc@4333 | 119 | if (!brs.is_reserved()) { |
johnc@4333 | 120 | warning("ConcurrentMark marking bit map allocation failure"); |
johnc@4333 | 121 | return false; |
johnc@4333 | 122 | } |
johnc@4333 | 123 | MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); |
johnc@4333 | 124 | // For now we'll just commit all of the bit map up front. |
johnc@4333 | 125 | // Later on we'll try to be more parsimonious with swap. |
johnc@4333 | 126 | if (!_virtual_space.initialize(brs, brs.size())) { |
johnc@4333 | 127 | warning("ConcurrentMark marking bit map backing store failure"); |
johnc@4333 | 128 | return false; |
johnc@4333 | 129 | } |
johnc@4333 | 130 | assert(_virtual_space.committed_size() == brs.size(), |
johnc@4333 | 131 | "didn't reserve backing store for all of concurrent marking bit map?"); |
tschatzl@6935 | 132 | _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); |
johnc@4333 | 133 | assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= |
johnc@4333 | 134 | _bmWordSize, "inconsistency in bit map sizing"); |
johnc@4333 | 135 | _bm.set_size(_bmWordSize >> _shifter); |
johnc@4333 | 136 | return true; |
johnc@4333 | 137 | } |
johnc@4333 | 138 | |
ysr@777 | 139 | void CMBitMap::clearAll() { |
ysr@777 | 140 | _bm.clear(); |
ysr@777 | 141 | return; |
ysr@777 | 142 | } |
ysr@777 | 143 | |
ysr@777 | 144 | void CMBitMap::markRange(MemRegion mr) { |
ysr@777 | 145 | mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); |
ysr@777 | 146 | assert(!mr.is_empty(), "unexpected empty region"); |
ysr@777 | 147 | assert((offsetToHeapWord(heapWordToOffset(mr.end())) == |
ysr@777 | 148 | ((HeapWord *) mr.end())), |
ysr@777 | 149 | "markRange memory region end is not card aligned"); |
ysr@777 | 150 | // convert address range into offset range |
ysr@777 | 151 | _bm.at_put_range(heapWordToOffset(mr.start()), |
ysr@777 | 152 | heapWordToOffset(mr.end()), true); |
ysr@777 | 153 | } |
ysr@777 | 154 | |
ysr@777 | 155 | void CMBitMap::clearRange(MemRegion mr) { |
ysr@777 | 156 | mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); |
ysr@777 | 157 | assert(!mr.is_empty(), "unexpected empty region"); |
ysr@777 | 158 | // convert address range into offset range |
ysr@777 | 159 | _bm.at_put_range(heapWordToOffset(mr.start()), |
ysr@777 | 160 | heapWordToOffset(mr.end()), false); |
ysr@777 | 161 | } |
ysr@777 | 162 | |
ysr@777 | 163 | MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, |
ysr@777 | 164 | HeapWord* end_addr) { |
ysr@777 | 165 | HeapWord* start = getNextMarkedWordAddress(addr); |
ysr@777 | 166 | start = MIN2(start, end_addr); |
ysr@777 | 167 | HeapWord* end = getNextUnmarkedWordAddress(start); |
ysr@777 | 168 | end = MIN2(end, end_addr); |
ysr@777 | 169 | assert(start <= end, "Consistency check"); |
ysr@777 | 170 | MemRegion mr(start, end); |
ysr@777 | 171 | if (!mr.is_empty()) { |
ysr@777 | 172 | clearRange(mr); |
ysr@777 | 173 | } |
ysr@777 | 174 | return mr; |
ysr@777 | 175 | } |
ysr@777 | 176 | |
ysr@777 | 177 | CMMarkStack::CMMarkStack(ConcurrentMark* cm) : |
ysr@777 | 178 | _base(NULL), _cm(cm) |
ysr@777 | 179 | #ifdef ASSERT |
ysr@777 | 180 | , _drain_in_progress(false) |
ysr@777 | 181 | , _drain_in_progress_yields(false) |
ysr@777 | 182 | #endif |
ysr@777 | 183 | {} |
ysr@777 | 184 | |
johnc@4333 | 185 | bool CMMarkStack::allocate(size_t capacity) { |
johnc@4333 | 186 | // allocate a stack of the requisite depth |
johnc@4333 | 187 | ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); |
johnc@4333 | 188 | if (!rs.is_reserved()) { |
johnc@4333 | 189 | warning("ConcurrentMark MarkStack allocation failure"); |
johnc@4333 | 190 | return false; |
tonyp@2973 | 191 | } |
johnc@4333 | 192 | MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); |
johnc@4333 | 193 | if (!_virtual_space.initialize(rs, rs.size())) { |
johnc@4333 | 194 | warning("ConcurrentMark MarkStack backing store failure"); |
johnc@4333 | 195 | // Release the virtual memory reserved for the marking stack |
johnc@4333 | 196 | rs.release(); |
johnc@4333 | 197 | return false; |
johnc@4333 | 198 | } |
johnc@4333 | 199 | assert(_virtual_space.committed_size() == rs.size(), |
johnc@4333 | 200 | "Didn't reserve backing store for all of ConcurrentMark stack?"); |
johnc@4333 | 201 | _base = (oop*) _virtual_space.low(); |
johnc@4333 | 202 | setEmpty(); |
johnc@4333 | 203 | _capacity = (jint) capacity; |
tonyp@3416 | 204 | _saved_index = -1; |
johnc@4386 | 205 | _should_expand = false; |
ysr@777 | 206 | NOT_PRODUCT(_max_depth = 0); |
johnc@4333 | 207 | return true; |
johnc@4333 | 208 | } |
johnc@4333 | 209 | |
johnc@4333 | 210 | void CMMarkStack::expand() { |
johnc@4333 | 211 | // Called, during remark, if we've overflown the marking stack during marking. |
johnc@4333 | 212 | assert(isEmpty(), "stack should been emptied while handling overflow"); |
johnc@4333 | 213 | assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); |
johnc@4333 | 214 | // Clear expansion flag |
johnc@4333 | 215 | _should_expand = false; |
johnc@4333 | 216 | if (_capacity == (jint) MarkStackSizeMax) { |
johnc@4333 | 217 | if (PrintGCDetails && Verbose) { |
johnc@4333 | 218 | gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); |
johnc@4333 | 219 | } |
johnc@4333 | 220 | return; |
johnc@4333 | 221 | } |
johnc@4333 | 222 | // Double capacity if possible |
johnc@4333 | 223 | jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); |
johnc@4333 | 224 | // Do not give up existing stack until we have managed to |
johnc@4333 | 225 | // get the double capacity that we desired. |
johnc@4333 | 226 | ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * |
johnc@4333 | 227 | sizeof(oop))); |
johnc@4333 | 228 | if (rs.is_reserved()) { |
johnc@4333 | 229 | // Release the backing store associated with old stack |
johnc@4333 | 230 | _virtual_space.release(); |
johnc@4333 | 231 | // Reinitialize virtual space for new stack |
johnc@4333 | 232 | if (!_virtual_space.initialize(rs, rs.size())) { |
johnc@4333 | 233 | fatal("Not enough swap for expanded marking stack capacity"); |
johnc@4333 | 234 | } |
johnc@4333 | 235 | _base = (oop*)(_virtual_space.low()); |
johnc@4333 | 236 | _index = 0; |
johnc@4333 | 237 | _capacity = new_capacity; |
johnc@4333 | 238 | } else { |
johnc@4333 | 239 | if (PrintGCDetails && Verbose) { |
johnc@4333 | 240 | // Failed to double capacity, continue; |
johnc@4333 | 241 | gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " |
johnc@4333 | 242 | SIZE_FORMAT"K to " SIZE_FORMAT"K", |
johnc@4333 | 243 | _capacity / K, new_capacity / K); |
johnc@4333 | 244 | } |
johnc@4333 | 245 | } |
johnc@4333 | 246 | } |
johnc@4333 | 247 | |
johnc@4333 | 248 | void CMMarkStack::set_should_expand() { |
johnc@4333 | 249 | // If we're resetting the marking state because of an |
johnc@4333 | 250 | // marking stack overflow, record that we should, if |
johnc@4333 | 251 | // possible, expand the stack. |
johnc@4333 | 252 | _should_expand = _cm->has_overflown(); |
ysr@777 | 253 | } |
ysr@777 | 254 | |
ysr@777 | 255 | CMMarkStack::~CMMarkStack() { |
tonyp@2973 | 256 | if (_base != NULL) { |
johnc@4333 | 257 | _base = NULL; |
johnc@4333 | 258 | _virtual_space.release(); |
tonyp@2973 | 259 | } |
ysr@777 | 260 | } |
ysr@777 | 261 | |
ysr@777 | 262 | void CMMarkStack::par_push(oop ptr) { |
ysr@777 | 263 | while (true) { |
ysr@777 | 264 | if (isFull()) { |
ysr@777 | 265 | _overflow = true; |
ysr@777 | 266 | return; |
ysr@777 | 267 | } |
ysr@777 | 268 | // Otherwise... |
ysr@777 | 269 | jint index = _index; |
ysr@777 | 270 | jint next_index = index+1; |
ysr@777 | 271 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 272 | if (res == index) { |
ysr@777 | 273 | _base[index] = ptr; |
ysr@777 | 274 | // Note that we don't maintain this atomically. We could, but it |
ysr@777 | 275 | // doesn't seem necessary. |
ysr@777 | 276 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
ysr@777 | 277 | return; |
ysr@777 | 278 | } |
ysr@777 | 279 | // Otherwise, we need to try again. |
ysr@777 | 280 | } |
ysr@777 | 281 | } |
ysr@777 | 282 | |
ysr@777 | 283 | void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { |
ysr@777 | 284 | while (true) { |
ysr@777 | 285 | if (isFull()) { |
ysr@777 | 286 | _overflow = true; |
ysr@777 | 287 | return; |
ysr@777 | 288 | } |
ysr@777 | 289 | // Otherwise... |
ysr@777 | 290 | jint index = _index; |
ysr@777 | 291 | jint next_index = index + n; |
ysr@777 | 292 | if (next_index > _capacity) { |
ysr@777 | 293 | _overflow = true; |
ysr@777 | 294 | return; |
ysr@777 | 295 | } |
ysr@777 | 296 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 297 | if (res == index) { |
ysr@777 | 298 | for (int i = 0; i < n; i++) { |
johnc@4333 | 299 | int ind = index + i; |
ysr@777 | 300 | assert(ind < _capacity, "By overflow test above."); |
ysr@777 | 301 | _base[ind] = ptr_arr[i]; |
ysr@777 | 302 | } |
ysr@777 | 303 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
ysr@777 | 304 | return; |
ysr@777 | 305 | } |
ysr@777 | 306 | // Otherwise, we need to try again. |
ysr@777 | 307 | } |
ysr@777 | 308 | } |
ysr@777 | 309 | |
ysr@777 | 310 | void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { |
ysr@777 | 311 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 312 | jint start = _index; |
ysr@777 | 313 | jint next_index = start + n; |
ysr@777 | 314 | if (next_index > _capacity) { |
ysr@777 | 315 | _overflow = true; |
ysr@777 | 316 | return; |
ysr@777 | 317 | } |
ysr@777 | 318 | // Otherwise. |
ysr@777 | 319 | _index = next_index; |
ysr@777 | 320 | for (int i = 0; i < n; i++) { |
ysr@777 | 321 | int ind = start + i; |
tonyp@1458 | 322 | assert(ind < _capacity, "By overflow test above."); |
ysr@777 | 323 | _base[ind] = ptr_arr[i]; |
ysr@777 | 324 | } |
johnc@4333 | 325 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
ysr@777 | 326 | } |
ysr@777 | 327 | |
ysr@777 | 328 | bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { |
ysr@777 | 329 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 330 | jint index = _index; |
ysr@777 | 331 | if (index == 0) { |
ysr@777 | 332 | *n = 0; |
ysr@777 | 333 | return false; |
ysr@777 | 334 | } else { |
ysr@777 | 335 | int k = MIN2(max, index); |
johnc@4333 | 336 | jint new_ind = index - k; |
ysr@777 | 337 | for (int j = 0; j < k; j++) { |
ysr@777 | 338 | ptr_arr[j] = _base[new_ind + j]; |
ysr@777 | 339 | } |
ysr@777 | 340 | _index = new_ind; |
ysr@777 | 341 | *n = k; |
ysr@777 | 342 | return true; |
ysr@777 | 343 | } |
ysr@777 | 344 | } |
ysr@777 | 345 | |
ysr@777 | 346 | template<class OopClosureClass> |
ysr@777 | 347 | bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { |
ysr@777 | 348 | assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after |
ysr@777 | 349 | || SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 350 | "Drain recursion must be yield-safe."); |
ysr@777 | 351 | bool res = true; |
ysr@777 | 352 | debug_only(_drain_in_progress = true); |
ysr@777 | 353 | debug_only(_drain_in_progress_yields = yield_after); |
ysr@777 | 354 | while (!isEmpty()) { |
ysr@777 | 355 | oop newOop = pop(); |
ysr@777 | 356 | assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); |
ysr@777 | 357 | assert(newOop->is_oop(), "Expected an oop"); |
ysr@777 | 358 | assert(bm == NULL || bm->isMarked((HeapWord*)newOop), |
ysr@777 | 359 | "only grey objects on this stack"); |
ysr@777 | 360 | newOop->oop_iterate(cl); |
ysr@777 | 361 | if (yield_after && _cm->do_yield_check()) { |
tonyp@2973 | 362 | res = false; |
tonyp@2973 | 363 | break; |
ysr@777 | 364 | } |
ysr@777 | 365 | } |
ysr@777 | 366 | debug_only(_drain_in_progress = false); |
ysr@777 | 367 | return res; |
ysr@777 | 368 | } |
ysr@777 | 369 | |
tonyp@3416 | 370 | void CMMarkStack::note_start_of_gc() { |
tonyp@3416 | 371 | assert(_saved_index == -1, |
tonyp@3416 | 372 | "note_start_of_gc()/end_of_gc() bracketed incorrectly"); |
tonyp@3416 | 373 | _saved_index = _index; |
tonyp@3416 | 374 | } |
tonyp@3416 | 375 | |
tonyp@3416 | 376 | void CMMarkStack::note_end_of_gc() { |
tonyp@3416 | 377 | // This is intentionally a guarantee, instead of an assert. If we |
tonyp@3416 | 378 | // accidentally add something to the mark stack during GC, it |
tonyp@3416 | 379 | // will be a correctness issue so it's better if we crash. we'll |
tonyp@3416 | 380 | // only check this once per GC anyway, so it won't be a performance |
tonyp@3416 | 381 | // issue in any way. |
tonyp@3416 | 382 | guarantee(_saved_index == _index, |
tonyp@3416 | 383 | err_msg("saved index: %d index: %d", _saved_index, _index)); |
tonyp@3416 | 384 | _saved_index = -1; |
tonyp@3416 | 385 | } |
tonyp@3416 | 386 | |
ysr@777 | 387 | void CMMarkStack::oops_do(OopClosure* f) { |
tonyp@3416 | 388 | assert(_saved_index == _index, |
tonyp@3416 | 389 | err_msg("saved index: %d index: %d", _saved_index, _index)); |
tonyp@3416 | 390 | for (int i = 0; i < _index; i += 1) { |
ysr@777 | 391 | f->do_oop(&_base[i]); |
ysr@777 | 392 | } |
ysr@777 | 393 | } |
ysr@777 | 394 | |
ysr@777 | 395 | bool ConcurrentMark::not_yet_marked(oop obj) const { |
coleenp@4037 | 396 | return _g1h->is_obj_ill(obj); |
ysr@777 | 397 | } |
ysr@777 | 398 | |
tonyp@3464 | 399 | CMRootRegions::CMRootRegions() : |
tonyp@3464 | 400 | _young_list(NULL), _cm(NULL), _scan_in_progress(false), |
tonyp@3464 | 401 | _should_abort(false), _next_survivor(NULL) { } |
tonyp@3464 | 402 | |
tonyp@3464 | 403 | void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { |
tonyp@3464 | 404 | _young_list = g1h->young_list(); |
tonyp@3464 | 405 | _cm = cm; |
tonyp@3464 | 406 | } |
tonyp@3464 | 407 | |
tonyp@3464 | 408 | void CMRootRegions::prepare_for_scan() { |
tonyp@3464 | 409 | assert(!scan_in_progress(), "pre-condition"); |
tonyp@3464 | 410 | |
tonyp@3464 | 411 | // Currently, only survivors can be root regions. |
tonyp@3464 | 412 | assert(_next_survivor == NULL, "pre-condition"); |
tonyp@3464 | 413 | _next_survivor = _young_list->first_survivor_region(); |
tonyp@3464 | 414 | _scan_in_progress = (_next_survivor != NULL); |
tonyp@3464 | 415 | _should_abort = false; |
tonyp@3464 | 416 | } |
tonyp@3464 | 417 | |
tonyp@3464 | 418 | HeapRegion* CMRootRegions::claim_next() { |
tonyp@3464 | 419 | if (_should_abort) { |
tonyp@3464 | 420 | // If someone has set the should_abort flag, we return NULL to |
tonyp@3464 | 421 | // force the caller to bail out of their loop. |
tonyp@3464 | 422 | return NULL; |
tonyp@3464 | 423 | } |
tonyp@3464 | 424 | |
tonyp@3464 | 425 | // Currently, only survivors can be root regions. |
tonyp@3464 | 426 | HeapRegion* res = _next_survivor; |
tonyp@3464 | 427 | if (res != NULL) { |
tonyp@3464 | 428 | MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); |
tonyp@3464 | 429 | // Read it again in case it changed while we were waiting for the lock. |
tonyp@3464 | 430 | res = _next_survivor; |
tonyp@3464 | 431 | if (res != NULL) { |
tonyp@3464 | 432 | if (res == _young_list->last_survivor_region()) { |
tonyp@3464 | 433 | // We just claimed the last survivor so store NULL to indicate |
tonyp@3464 | 434 | // that we're done. |
tonyp@3464 | 435 | _next_survivor = NULL; |
tonyp@3464 | 436 | } else { |
tonyp@3464 | 437 | _next_survivor = res->get_next_young_region(); |
tonyp@3464 | 438 | } |
tonyp@3464 | 439 | } else { |
tonyp@3464 | 440 | // Someone else claimed the last survivor while we were trying |
tonyp@3464 | 441 | // to take the lock so nothing else to do. |
tonyp@3464 | 442 | } |
tonyp@3464 | 443 | } |
tonyp@3464 | 444 | assert(res == NULL || res->is_survivor(), "post-condition"); |
tonyp@3464 | 445 | |
tonyp@3464 | 446 | return res; |
tonyp@3464 | 447 | } |
tonyp@3464 | 448 | |
tonyp@3464 | 449 | void CMRootRegions::scan_finished() { |
tonyp@3464 | 450 | assert(scan_in_progress(), "pre-condition"); |
tonyp@3464 | 451 | |
tonyp@3464 | 452 | // Currently, only survivors can be root regions. |
tonyp@3464 | 453 | if (!_should_abort) { |
tonyp@3464 | 454 | assert(_next_survivor == NULL, "we should have claimed all survivors"); |
tonyp@3464 | 455 | } |
tonyp@3464 | 456 | _next_survivor = NULL; |
tonyp@3464 | 457 | |
tonyp@3464 | 458 | { |
tonyp@3464 | 459 | MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); |
tonyp@3464 | 460 | _scan_in_progress = false; |
tonyp@3464 | 461 | RootRegionScan_lock->notify_all(); |
tonyp@3464 | 462 | } |
tonyp@3464 | 463 | } |
tonyp@3464 | 464 | |
tonyp@3464 | 465 | bool CMRootRegions::wait_until_scan_finished() { |
tonyp@3464 | 466 | if (!scan_in_progress()) return false; |
tonyp@3464 | 467 | |
tonyp@3464 | 468 | { |
tonyp@3464 | 469 | MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); |
tonyp@3464 | 470 | while (scan_in_progress()) { |
tonyp@3464 | 471 | RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); |
tonyp@3464 | 472 | } |
tonyp@3464 | 473 | } |
tonyp@3464 | 474 | return true; |
tonyp@3464 | 475 | } |
tonyp@3464 | 476 | |
ysr@777 | 477 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 478 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 479 | #endif // _MSC_VER |
ysr@777 | 480 | |
jmasa@3357 | 481 | uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { |
jmasa@3357 | 482 | return MAX2((n_par_threads + 2) / 4, 1U); |
jmasa@3294 | 483 | } |
jmasa@3294 | 484 | |
johnc@4333 | 485 | ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : |
johnc@4333 | 486 | _g1h(g1h), |
tschatzl@5697 | 487 | _markBitMap1(log2_intptr(MinObjAlignment)), |
tschatzl@5697 | 488 | _markBitMap2(log2_intptr(MinObjAlignment)), |
ysr@777 | 489 | _parallel_marking_threads(0), |
jmasa@3294 | 490 | _max_parallel_marking_threads(0), |
ysr@777 | 491 | _sleep_factor(0.0), |
ysr@777 | 492 | _marking_task_overhead(1.0), |
ysr@777 | 493 | _cleanup_sleep_factor(0.0), |
ysr@777 | 494 | _cleanup_task_overhead(1.0), |
tonyp@2472 | 495 | _cleanup_list("Cleanup List"), |
johnc@4333 | 496 | _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), |
johnc@4333 | 497 | _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> |
johnc@4333 | 498 | CardTableModRefBS::card_shift, |
johnc@4333 | 499 | false /* in_resource_area*/), |
johnc@3463 | 500 | |
ysr@777 | 501 | _prevMarkBitMap(&_markBitMap1), |
ysr@777 | 502 | _nextMarkBitMap(&_markBitMap2), |
ysr@777 | 503 | |
ysr@777 | 504 | _markStack(this), |
ysr@777 | 505 | // _finger set in set_non_marking_state |
ysr@777 | 506 | |
johnc@4173 | 507 | _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), |
ysr@777 | 508 | // _active_tasks set in set_non_marking_state |
ysr@777 | 509 | // _tasks set inside the constructor |
johnc@4173 | 510 | _task_queues(new CMTaskQueueSet((int) _max_worker_id)), |
johnc@4173 | 511 | _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), |
ysr@777 | 512 | |
ysr@777 | 513 | _has_overflown(false), |
ysr@777 | 514 | _concurrent(false), |
tonyp@1054 | 515 | _has_aborted(false), |
brutisso@6904 | 516 | _aborted_gc_id(GCId::undefined()), |
tonyp@1054 | 517 | _restart_for_overflow(false), |
tonyp@1054 | 518 | _concurrent_marking_in_progress(false), |
ysr@777 | 519 | |
ysr@777 | 520 | // _verbose_level set below |
ysr@777 | 521 | |
ysr@777 | 522 | _init_times(), |
ysr@777 | 523 | _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), |
ysr@777 | 524 | _cleanup_times(), |
ysr@777 | 525 | _total_counting_time(0.0), |
ysr@777 | 526 | _total_rs_scrub_time(0.0), |
johnc@3463 | 527 | |
johnc@3463 | 528 | _parallel_workers(NULL), |
johnc@3463 | 529 | |
johnc@3463 | 530 | _count_card_bitmaps(NULL), |
johnc@4333 | 531 | _count_marked_bytes(NULL), |
johnc@4333 | 532 | _completed_initialization(false) { |
tonyp@2973 | 533 | CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; |
tonyp@2973 | 534 | if (verbose_level < no_verbose) { |
ysr@777 | 535 | verbose_level = no_verbose; |
tonyp@2973 | 536 | } |
tonyp@2973 | 537 | if (verbose_level > high_verbose) { |
ysr@777 | 538 | verbose_level = high_verbose; |
tonyp@2973 | 539 | } |
ysr@777 | 540 | _verbose_level = verbose_level; |
ysr@777 | 541 | |
tonyp@2973 | 542 | if (verbose_low()) { |
ysr@777 | 543 | gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " |
drchase@6680 | 544 | "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); |
tonyp@2973 | 545 | } |
ysr@777 | 546 | |
johnc@4333 | 547 | if (!_markBitMap1.allocate(heap_rs)) { |
johnc@4333 | 548 | warning("Failed to allocate first CM bit map"); |
johnc@4333 | 549 | return; |
johnc@4333 | 550 | } |
johnc@4333 | 551 | if (!_markBitMap2.allocate(heap_rs)) { |
johnc@4333 | 552 | warning("Failed to allocate second CM bit map"); |
johnc@4333 | 553 | return; |
johnc@4333 | 554 | } |
ysr@777 | 555 | |
ysr@777 | 556 | // Create & start a ConcurrentMark thread. |
ysr@1280 | 557 | _cmThread = new ConcurrentMarkThread(this); |
ysr@1280 | 558 | assert(cmThread() != NULL, "CM Thread should have been created"); |
ysr@1280 | 559 | assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); |
ehelin@6168 | 560 | if (_cmThread->osthread() == NULL) { |
ehelin@6168 | 561 | vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); |
ehelin@6168 | 562 | } |
ysr@1280 | 563 | |
ysr@777 | 564 | assert(CGC_lock != NULL, "Where's the CGC_lock?"); |
johnc@4333 | 565 | assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); |
johnc@4333 | 566 | assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); |
ysr@777 | 567 | |
ysr@777 | 568 | SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); |
tonyp@1717 | 569 | satb_qs.set_buffer_size(G1SATBBufferSize); |
ysr@777 | 570 | |
tonyp@3464 | 571 | _root_regions.init(_g1h, this); |
tonyp@3464 | 572 | |
jmasa@1719 | 573 | if (ConcGCThreads > ParallelGCThreads) { |
drchase@6680 | 574 | warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " |
drchase@6680 | 575 | "than ParallelGCThreads (" UINTX_FORMAT ").", |
johnc@4333 | 576 | ConcGCThreads, ParallelGCThreads); |
johnc@4333 | 577 | return; |
ysr@777 | 578 | } |
ysr@777 | 579 | if (ParallelGCThreads == 0) { |
ysr@777 | 580 | // if we are not running with any parallel GC threads we will not |
ysr@777 | 581 | // spawn any marking threads either |
jmasa@3294 | 582 | _parallel_marking_threads = 0; |
jmasa@3294 | 583 | _max_parallel_marking_threads = 0; |
jmasa@3294 | 584 | _sleep_factor = 0.0; |
jmasa@3294 | 585 | _marking_task_overhead = 1.0; |
ysr@777 | 586 | } else { |
johnc@4547 | 587 | if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { |
johnc@4547 | 588 | // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent |
ysr@777 | 589 | // if both are set |
ysr@777 | 590 | _sleep_factor = 0.0; |
ysr@777 | 591 | _marking_task_overhead = 1.0; |
johnc@1186 | 592 | } else if (G1MarkingOverheadPercent > 0) { |
johnc@4547 | 593 | // We will calculate the number of parallel marking threads based |
johnc@4547 | 594 | // on a target overhead with respect to the soft real-time goal |
johnc@1186 | 595 | double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; |
ysr@777 | 596 | double overall_cm_overhead = |
johnc@1186 | 597 | (double) MaxGCPauseMillis * marking_overhead / |
johnc@1186 | 598 | (double) GCPauseIntervalMillis; |
ysr@777 | 599 | double cpu_ratio = 1.0 / (double) os::processor_count(); |
ysr@777 | 600 | double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); |
ysr@777 | 601 | double marking_task_overhead = |
ysr@777 | 602 | overall_cm_overhead / marking_thread_num * |
ysr@777 | 603 | (double) os::processor_count(); |
ysr@777 | 604 | double sleep_factor = |
ysr@777 | 605 | (1.0 - marking_task_overhead) / marking_task_overhead; |
ysr@777 | 606 | |
johnc@4547 | 607 | FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); |
ysr@777 | 608 | _sleep_factor = sleep_factor; |
ysr@777 | 609 | _marking_task_overhead = marking_task_overhead; |
ysr@777 | 610 | } else { |
johnc@4547 | 611 | // Calculate the number of parallel marking threads by scaling |
johnc@4547 | 612 | // the number of parallel GC threads. |
johnc@4547 | 613 | uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); |
johnc@4547 | 614 | FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); |
ysr@777 | 615 | _sleep_factor = 0.0; |
ysr@777 | 616 | _marking_task_overhead = 1.0; |
ysr@777 | 617 | } |
ysr@777 | 618 | |
johnc@4547 | 619 | assert(ConcGCThreads > 0, "Should have been set"); |
johnc@4547 | 620 | _parallel_marking_threads = (uint) ConcGCThreads; |
johnc@4547 | 621 | _max_parallel_marking_threads = _parallel_marking_threads; |
johnc@4547 | 622 | |
tonyp@2973 | 623 | if (parallel_marking_threads() > 1) { |
ysr@777 | 624 | _cleanup_task_overhead = 1.0; |
tonyp@2973 | 625 | } else { |
ysr@777 | 626 | _cleanup_task_overhead = marking_task_overhead(); |
tonyp@2973 | 627 | } |
ysr@777 | 628 | _cleanup_sleep_factor = |
ysr@777 | 629 | (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); |
ysr@777 | 630 | |
ysr@777 | 631 | #if 0 |
ysr@777 | 632 | gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); |
ysr@777 | 633 | gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); |
ysr@777 | 634 | gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); |
ysr@777 | 635 | gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); |
ysr@777 | 636 | gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); |
ysr@777 | 637 | #endif |
ysr@777 | 638 | |
tonyp@1458 | 639 | guarantee(parallel_marking_threads() > 0, "peace of mind"); |
jmasa@2188 | 640 | _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", |
jmasa@3357 | 641 | _max_parallel_marking_threads, false, true); |
jmasa@2188 | 642 | if (_parallel_workers == NULL) { |
ysr@777 | 643 | vm_exit_during_initialization("Failed necessary allocation."); |
jmasa@2188 | 644 | } else { |
jmasa@2188 | 645 | _parallel_workers->initialize_workers(); |
jmasa@2188 | 646 | } |
ysr@777 | 647 | } |
ysr@777 | 648 | |
johnc@4333 | 649 | if (FLAG_IS_DEFAULT(MarkStackSize)) { |
johnc@4333 | 650 | uintx mark_stack_size = |
johnc@4333 | 651 | MIN2(MarkStackSizeMax, |
johnc@4333 | 652 | MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); |
johnc@4333 | 653 | // Verify that the calculated value for MarkStackSize is in range. |
johnc@4333 | 654 | // It would be nice to use the private utility routine from Arguments. |
johnc@4333 | 655 | if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { |
johnc@4333 | 656 | warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " |
johnc@4333 | 657 | "must be between " UINTX_FORMAT " and " UINTX_FORMAT, |
drchase@6680 | 658 | mark_stack_size, (uintx) 1, MarkStackSizeMax); |
johnc@4333 | 659 | return; |
johnc@4333 | 660 | } |
johnc@4333 | 661 | FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); |
johnc@4333 | 662 | } else { |
johnc@4333 | 663 | // Verify MarkStackSize is in range. |
johnc@4333 | 664 | if (FLAG_IS_CMDLINE(MarkStackSize)) { |
johnc@4333 | 665 | if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { |
johnc@4333 | 666 | if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { |
johnc@4333 | 667 | warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " |
johnc@4333 | 668 | "must be between " UINTX_FORMAT " and " UINTX_FORMAT, |
drchase@6680 | 669 | MarkStackSize, (uintx) 1, MarkStackSizeMax); |
johnc@4333 | 670 | return; |
johnc@4333 | 671 | } |
johnc@4333 | 672 | } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { |
johnc@4333 | 673 | if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { |
johnc@4333 | 674 | warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" |
johnc@4333 | 675 | " or for MarkStackSizeMax (" UINTX_FORMAT ")", |
johnc@4333 | 676 | MarkStackSize, MarkStackSizeMax); |
johnc@4333 | 677 | return; |
johnc@4333 | 678 | } |
johnc@4333 | 679 | } |
johnc@4333 | 680 | } |
johnc@4333 | 681 | } |
johnc@4333 | 682 | |
johnc@4333 | 683 | if (!_markStack.allocate(MarkStackSize)) { |
johnc@4333 | 684 | warning("Failed to allocate CM marking stack"); |
johnc@4333 | 685 | return; |
johnc@4333 | 686 | } |
johnc@4333 | 687 | |
johnc@4333 | 688 | _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); |
johnc@4333 | 689 | _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); |
johnc@4333 | 690 | |
johnc@4333 | 691 | _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); |
johnc@4333 | 692 | _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); |
johnc@4333 | 693 | |
johnc@4333 | 694 | BitMap::idx_t card_bm_size = _card_bm.size(); |
johnc@4333 | 695 | |
johnc@4333 | 696 | // so that the assertion in MarkingTaskQueue::task_queue doesn't fail |
johnc@4333 | 697 | _active_tasks = _max_worker_id; |
johnc@4333 | 698 | |
johnc@4333 | 699 | size_t max_regions = (size_t) _g1h->max_regions(); |
johnc@4333 | 700 | for (uint i = 0; i < _max_worker_id; ++i) { |
johnc@4333 | 701 | CMTaskQueue* task_queue = new CMTaskQueue(); |
johnc@4333 | 702 | task_queue->initialize(); |
johnc@4333 | 703 | _task_queues->register_queue(i, task_queue); |
johnc@4333 | 704 | |
johnc@4333 | 705 | _count_card_bitmaps[i] = BitMap(card_bm_size, false); |
johnc@4333 | 706 | _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); |
johnc@4333 | 707 | |
johnc@4333 | 708 | _tasks[i] = new CMTask(i, this, |
johnc@4333 | 709 | _count_marked_bytes[i], |
johnc@4333 | 710 | &_count_card_bitmaps[i], |
johnc@4333 | 711 | task_queue, _task_queues); |
johnc@4333 | 712 | |
johnc@4333 | 713 | _accum_task_vtime[i] = 0.0; |
johnc@4333 | 714 | } |
johnc@4333 | 715 | |
johnc@4333 | 716 | // Calculate the card number for the bottom of the heap. Used |
johnc@4333 | 717 | // in biasing indexes into the accounting card bitmaps. |
johnc@4333 | 718 | _heap_bottom_card_num = |
johnc@4333 | 719 | intptr_t(uintptr_t(_g1h->reserved_region().start()) >> |
johnc@4333 | 720 | CardTableModRefBS::card_shift); |
johnc@4333 | 721 | |
johnc@4333 | 722 | // Clear all the liveness counting data |
johnc@4333 | 723 | clear_all_count_data(); |
johnc@4333 | 724 | |
ysr@777 | 725 | // so that the call below can read a sensible value |
johnc@4333 | 726 | _heap_start = (HeapWord*) heap_rs.base(); |
ysr@777 | 727 | set_non_marking_state(); |
johnc@4333 | 728 | _completed_initialization = true; |
ysr@777 | 729 | } |
ysr@777 | 730 | |
ysr@777 | 731 | void ConcurrentMark::update_g1_committed(bool force) { |
ysr@777 | 732 | // If concurrent marking is not in progress, then we do not need to |
tonyp@3691 | 733 | // update _heap_end. |
tonyp@2973 | 734 | if (!concurrent_marking_in_progress() && !force) return; |
ysr@777 | 735 | |
ysr@777 | 736 | MemRegion committed = _g1h->g1_committed(); |
tonyp@1458 | 737 | assert(committed.start() == _heap_start, "start shouldn't change"); |
ysr@777 | 738 | HeapWord* new_end = committed.end(); |
ysr@777 | 739 | if (new_end > _heap_end) { |
ysr@777 | 740 | // The heap has been expanded. |
ysr@777 | 741 | |
ysr@777 | 742 | _heap_end = new_end; |
ysr@777 | 743 | } |
ysr@777 | 744 | // Notice that the heap can also shrink. However, this only happens |
ysr@777 | 745 | // during a Full GC (at least currently) and the entire marking |
ysr@777 | 746 | // phase will bail out and the task will not be restarted. So, let's |
ysr@777 | 747 | // do nothing. |
ysr@777 | 748 | } |
ysr@777 | 749 | |
ysr@777 | 750 | void ConcurrentMark::reset() { |
ysr@777 | 751 | // Starting values for these two. This should be called in a STW |
ysr@777 | 752 | // phase. CM will be notified of any future g1_committed expansions |
ysr@777 | 753 | // will be at the end of evacuation pauses, when tasks are |
ysr@777 | 754 | // inactive. |
ysr@777 | 755 | MemRegion committed = _g1h->g1_committed(); |
ysr@777 | 756 | _heap_start = committed.start(); |
ysr@777 | 757 | _heap_end = committed.end(); |
ysr@777 | 758 | |
tonyp@1458 | 759 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 760 | assert(_heap_start != NULL, "heap bounds should look ok"); |
tonyp@1458 | 761 | assert(_heap_end != NULL, "heap bounds should look ok"); |
tonyp@1458 | 762 | assert(_heap_start < _heap_end, "heap bounds should look ok"); |
ysr@777 | 763 | |
johnc@4386 | 764 | // Reset all the marking data structures and any necessary flags |
johnc@4386 | 765 | reset_marking_state(); |
ysr@777 | 766 | |
tonyp@2973 | 767 | if (verbose_low()) { |
ysr@777 | 768 | gclog_or_tty->print_cr("[global] resetting"); |
tonyp@2973 | 769 | } |
ysr@777 | 770 | |
ysr@777 | 771 | // We do reset all of them, since different phases will use |
ysr@777 | 772 | // different number of active threads. So, it's easiest to have all |
ysr@777 | 773 | // of them ready. |
johnc@4173 | 774 | for (uint i = 0; i < _max_worker_id; ++i) { |
ysr@777 | 775 | _tasks[i]->reset(_nextMarkBitMap); |
johnc@2190 | 776 | } |
ysr@777 | 777 | |
ysr@777 | 778 | // we need this to make sure that the flag is on during the evac |
ysr@777 | 779 | // pause with initial mark piggy-backed |
ysr@777 | 780 | set_concurrent_marking_in_progress(); |
ysr@777 | 781 | } |
ysr@777 | 782 | |
johnc@4386 | 783 | |
johnc@4386 | 784 | void ConcurrentMark::reset_marking_state(bool clear_overflow) { |
johnc@4386 | 785 | _markStack.set_should_expand(); |
johnc@4386 | 786 | _markStack.setEmpty(); // Also clears the _markStack overflow flag |
johnc@4386 | 787 | if (clear_overflow) { |
johnc@4386 | 788 | clear_has_overflown(); |
johnc@4386 | 789 | } else { |
johnc@4386 | 790 | assert(has_overflown(), "pre-condition"); |
johnc@4386 | 791 | } |
johnc@4386 | 792 | _finger = _heap_start; |
johnc@4386 | 793 | |
johnc@4386 | 794 | for (uint i = 0; i < _max_worker_id; ++i) { |
johnc@4386 | 795 | CMTaskQueue* queue = _task_queues->queue(i); |
johnc@4386 | 796 | queue->set_empty(); |
johnc@4386 | 797 | } |
johnc@4386 | 798 | } |
johnc@4386 | 799 | |
johnc@4788 | 800 | void ConcurrentMark::set_concurrency(uint active_tasks) { |
johnc@4173 | 801 | assert(active_tasks <= _max_worker_id, "we should not have more"); |
ysr@777 | 802 | |
ysr@777 | 803 | _active_tasks = active_tasks; |
ysr@777 | 804 | // Need to update the three data structures below according to the |
ysr@777 | 805 | // number of active threads for this phase. |
ysr@777 | 806 | _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); |
ysr@777 | 807 | _first_overflow_barrier_sync.set_n_workers((int) active_tasks); |
ysr@777 | 808 | _second_overflow_barrier_sync.set_n_workers((int) active_tasks); |
johnc@4788 | 809 | } |
johnc@4788 | 810 | |
johnc@4788 | 811 | void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { |
johnc@4788 | 812 | set_concurrency(active_tasks); |
ysr@777 | 813 | |
ysr@777 | 814 | _concurrent = concurrent; |
ysr@777 | 815 | // We propagate this to all tasks, not just the active ones. |
johnc@4173 | 816 | for (uint i = 0; i < _max_worker_id; ++i) |
ysr@777 | 817 | _tasks[i]->set_concurrent(concurrent); |
ysr@777 | 818 | |
ysr@777 | 819 | if (concurrent) { |
ysr@777 | 820 | set_concurrent_marking_in_progress(); |
ysr@777 | 821 | } else { |
ysr@777 | 822 | // We currently assume that the concurrent flag has been set to |
ysr@777 | 823 | // false before we start remark. At this point we should also be |
ysr@777 | 824 | // in a STW phase. |
tonyp@1458 | 825 | assert(!concurrent_marking_in_progress(), "invariant"); |
pliden@6693 | 826 | assert(out_of_regions(), |
johnc@4788 | 827 | err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, |
drchase@6680 | 828 | p2i(_finger), p2i(_heap_end))); |
ysr@777 | 829 | update_g1_committed(true); |
ysr@777 | 830 | } |
ysr@777 | 831 | } |
ysr@777 | 832 | |
ysr@777 | 833 | void ConcurrentMark::set_non_marking_state() { |
ysr@777 | 834 | // We set the global marking state to some default values when we're |
ysr@777 | 835 | // not doing marking. |
johnc@4386 | 836 | reset_marking_state(); |
ysr@777 | 837 | _active_tasks = 0; |
ysr@777 | 838 | clear_concurrent_marking_in_progress(); |
ysr@777 | 839 | } |
ysr@777 | 840 | |
ysr@777 | 841 | ConcurrentMark::~ConcurrentMark() { |
stefank@3364 | 842 | // The ConcurrentMark instance is never freed. |
stefank@3364 | 843 | ShouldNotReachHere(); |
ysr@777 | 844 | } |
ysr@777 | 845 | |
ysr@777 | 846 | void ConcurrentMark::clearNextBitmap() { |
tonyp@1794 | 847 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@1794 | 848 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
tonyp@1794 | 849 | |
tonyp@1794 | 850 | // Make sure that the concurrent mark thread looks to still be in |
tonyp@1794 | 851 | // the current cycle. |
tonyp@1794 | 852 | guarantee(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 853 | |
tonyp@1794 | 854 | // We are finishing up the current cycle by clearing the next |
tonyp@1794 | 855 | // marking bitmap and getting it ready for the next cycle. During |
tonyp@1794 | 856 | // this time no other cycle can start. So, let's make sure that this |
tonyp@1794 | 857 | // is the case. |
tonyp@1794 | 858 | guarantee(!g1h->mark_in_progress(), "invariant"); |
tonyp@1794 | 859 | |
tonyp@1794 | 860 | // clear the mark bitmap (no grey objects to start with). |
tonyp@1794 | 861 | // We need to do this in chunks and offer to yield in between |
tonyp@1794 | 862 | // each chunk. |
tonyp@1794 | 863 | HeapWord* start = _nextMarkBitMap->startWord(); |
tonyp@1794 | 864 | HeapWord* end = _nextMarkBitMap->endWord(); |
tonyp@1794 | 865 | HeapWord* cur = start; |
tonyp@1794 | 866 | size_t chunkSize = M; |
tonyp@1794 | 867 | while (cur < end) { |
tonyp@1794 | 868 | HeapWord* next = cur + chunkSize; |
tonyp@2973 | 869 | if (next > end) { |
tonyp@1794 | 870 | next = end; |
tonyp@2973 | 871 | } |
tonyp@1794 | 872 | MemRegion mr(cur,next); |
tonyp@1794 | 873 | _nextMarkBitMap->clearRange(mr); |
tonyp@1794 | 874 | cur = next; |
tonyp@1794 | 875 | do_yield_check(); |
tonyp@1794 | 876 | |
tonyp@1794 | 877 | // Repeat the asserts from above. We'll do them as asserts here to |
tonyp@1794 | 878 | // minimize their overhead on the product. However, we'll have |
tonyp@1794 | 879 | // them as guarantees at the beginning / end of the bitmap |
tonyp@1794 | 880 | // clearing to get some checking in the product. |
tonyp@1794 | 881 | assert(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 882 | assert(!g1h->mark_in_progress(), "invariant"); |
tonyp@1794 | 883 | } |
tonyp@1794 | 884 | |
johnc@3463 | 885 | // Clear the liveness counting data |
johnc@3463 | 886 | clear_all_count_data(); |
johnc@3463 | 887 | |
tonyp@1794 | 888 | // Repeat the asserts from above. |
tonyp@1794 | 889 | guarantee(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 890 | guarantee(!g1h->mark_in_progress(), "invariant"); |
ysr@777 | 891 | } |
ysr@777 | 892 | |
tschatzl@7007 | 893 | bool ConcurrentMark::nextMarkBitmapIsClear() { |
tschatzl@7007 | 894 | return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end; |
tschatzl@7007 | 895 | } |
tschatzl@7007 | 896 | |
ysr@777 | 897 | class NoteStartOfMarkHRClosure: public HeapRegionClosure { |
ysr@777 | 898 | public: |
ysr@777 | 899 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 900 | if (!r->continuesHumongous()) { |
tonyp@3416 | 901 | r->note_start_of_marking(); |
ysr@777 | 902 | } |
ysr@777 | 903 | return false; |
ysr@777 | 904 | } |
ysr@777 | 905 | }; |
ysr@777 | 906 | |
ysr@777 | 907 | void ConcurrentMark::checkpointRootsInitialPre() { |
ysr@777 | 908 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 909 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
ysr@777 | 910 | |
ysr@777 | 911 | _has_aborted = false; |
ysr@777 | 912 | |
jcoomes@1902 | 913 | #ifndef PRODUCT |
tonyp@1479 | 914 | if (G1PrintReachableAtInitialMark) { |
tonyp@1823 | 915 | print_reachable("at-cycle-start", |
johnc@2969 | 916 | VerifyOption_G1UsePrevMarking, true /* all */); |
tonyp@1479 | 917 | } |
jcoomes@1902 | 918 | #endif |
ysr@777 | 919 | |
ysr@777 | 920 | // Initialise marking structures. This has to be done in a STW phase. |
ysr@777 | 921 | reset(); |
tonyp@3416 | 922 | |
tonyp@3416 | 923 | // For each region note start of marking. |
tonyp@3416 | 924 | NoteStartOfMarkHRClosure startcl; |
tonyp@3416 | 925 | g1h->heap_region_iterate(&startcl); |
ysr@777 | 926 | } |
ysr@777 | 927 | |
ysr@777 | 928 | |
ysr@777 | 929 | void ConcurrentMark::checkpointRootsInitialPost() { |
ysr@777 | 930 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 931 | |
tonyp@2848 | 932 | // If we force an overflow during remark, the remark operation will |
tonyp@2848 | 933 | // actually abort and we'll restart concurrent marking. If we always |
tonyp@2848 | 934 | // force an oveflow during remark we'll never actually complete the |
tonyp@2848 | 935 | // marking phase. So, we initilize this here, at the start of the |
tonyp@2848 | 936 | // cycle, so that at the remaining overflow number will decrease at |
tonyp@2848 | 937 | // every remark and we'll eventually not need to cause one. |
tonyp@2848 | 938 | force_overflow_stw()->init(); |
tonyp@2848 | 939 | |
johnc@3175 | 940 | // Start Concurrent Marking weak-reference discovery. |
johnc@3175 | 941 | ReferenceProcessor* rp = g1h->ref_processor_cm(); |
johnc@3175 | 942 | // enable ("weak") refs discovery |
johnc@3175 | 943 | rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); |
ysr@892 | 944 | rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle |
ysr@777 | 945 | |
ysr@777 | 946 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
tonyp@1752 | 947 | // This is the start of the marking cycle, we're expected all |
tonyp@1752 | 948 | // threads to have SATB queues with active set to false. |
tonyp@1752 | 949 | satb_mq_set.set_active_all_threads(true, /* new active value */ |
tonyp@1752 | 950 | false /* expected_active */); |
ysr@777 | 951 | |
tonyp@3464 | 952 | _root_regions.prepare_for_scan(); |
tonyp@3464 | 953 | |
ysr@777 | 954 | // update_g1_committed() will be called at the end of an evac pause |
ysr@777 | 955 | // when marking is on. So, it's also called at the end of the |
ysr@777 | 956 | // initial-mark pause to update the heap end, if the heap expands |
ysr@777 | 957 | // during it. No need to call it here. |
ysr@777 | 958 | } |
ysr@777 | 959 | |
ysr@777 | 960 | /* |
tonyp@2848 | 961 | * Notice that in the next two methods, we actually leave the STS |
tonyp@2848 | 962 | * during the barrier sync and join it immediately afterwards. If we |
tonyp@2848 | 963 | * do not do this, the following deadlock can occur: one thread could |
tonyp@2848 | 964 | * be in the barrier sync code, waiting for the other thread to also |
tonyp@2848 | 965 | * sync up, whereas another one could be trying to yield, while also |
tonyp@2848 | 966 | * waiting for the other threads to sync up too. |
tonyp@2848 | 967 | * |
tonyp@2848 | 968 | * Note, however, that this code is also used during remark and in |
tonyp@2848 | 969 | * this case we should not attempt to leave / enter the STS, otherwise |
tonyp@2848 | 970 | * we'll either hit an asseert (debug / fastdebug) or deadlock |
tonyp@2848 | 971 | * (product). So we should only leave / enter the STS if we are |
tonyp@2848 | 972 | * operating concurrently. |
tonyp@2848 | 973 | * |
tonyp@2848 | 974 | * Because the thread that does the sync barrier has left the STS, it |
tonyp@2848 | 975 | * is possible to be suspended for a Full GC or an evacuation pause |
tonyp@2848 | 976 | * could occur. This is actually safe, since the entering the sync |
tonyp@2848 | 977 | * barrier is one of the last things do_marking_step() does, and it |
tonyp@2848 | 978 | * doesn't manipulate any data structures afterwards. |
tonyp@2848 | 979 | */ |
ysr@777 | 980 | |
johnc@4173 | 981 | void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { |
tonyp@2973 | 982 | if (verbose_low()) { |
johnc@4173 | 983 | gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); |
tonyp@2973 | 984 | } |
ysr@777 | 985 | |
tonyp@2848 | 986 | if (concurrent()) { |
pliden@6906 | 987 | SuspendibleThreadSet::leave(); |
tonyp@2848 | 988 | } |
pliden@6692 | 989 | |
pliden@6692 | 990 | bool barrier_aborted = !_first_overflow_barrier_sync.enter(); |
pliden@6692 | 991 | |
tonyp@2848 | 992 | if (concurrent()) { |
pliden@6906 | 993 | SuspendibleThreadSet::join(); |
tonyp@2848 | 994 | } |
ysr@777 | 995 | // at this point everyone should have synced up and not be doing any |
ysr@777 | 996 | // more work |
ysr@777 | 997 | |
tonyp@2973 | 998 | if (verbose_low()) { |
pliden@6692 | 999 | if (barrier_aborted) { |
pliden@6692 | 1000 | gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); |
pliden@6692 | 1001 | } else { |
pliden@6692 | 1002 | gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); |
pliden@6692 | 1003 | } |
pliden@6692 | 1004 | } |
pliden@6692 | 1005 | |
pliden@6692 | 1006 | if (barrier_aborted) { |
pliden@6692 | 1007 | // If the barrier aborted we ignore the overflow condition and |
pliden@6692 | 1008 | // just abort the whole marking phase as quickly as possible. |
pliden@6692 | 1009 | return; |
tonyp@2973 | 1010 | } |
ysr@777 | 1011 | |
johnc@4788 | 1012 | // If we're executing the concurrent phase of marking, reset the marking |
johnc@4788 | 1013 | // state; otherwise the marking state is reset after reference processing, |
johnc@4788 | 1014 | // during the remark pause. |
johnc@4788 | 1015 | // If we reset here as a result of an overflow during the remark we will |
johnc@4788 | 1016 | // see assertion failures from any subsequent set_concurrency_and_phase() |
johnc@4788 | 1017 | // calls. |
johnc@4788 | 1018 | if (concurrent()) { |
johnc@4788 | 1019 | // let the task associated with with worker 0 do this |
johnc@4788 | 1020 | if (worker_id == 0) { |
johnc@4788 | 1021 | // task 0 is responsible for clearing the global data structures |
johnc@4788 | 1022 | // We should be here because of an overflow. During STW we should |
johnc@4788 | 1023 | // not clear the overflow flag since we rely on it being true when |
johnc@4788 | 1024 | // we exit this method to abort the pause and restart concurent |
johnc@4788 | 1025 | // marking. |
johnc@4788 | 1026 | reset_marking_state(true /* clear_overflow */); |
johnc@4788 | 1027 | force_overflow()->update(); |
johnc@4788 | 1028 | |
johnc@4788 | 1029 | if (G1Log::fine()) { |
brutisso@6904 | 1030 | gclog_or_tty->gclog_stamp(concurrent_gc_id()); |
johnc@4788 | 1031 | gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); |
johnc@4788 | 1032 | } |
ysr@777 | 1033 | } |
ysr@777 | 1034 | } |
ysr@777 | 1035 | |
ysr@777 | 1036 | // after this, each task should reset its own data structures then |
ysr@777 | 1037 | // then go into the second barrier |
ysr@777 | 1038 | } |
ysr@777 | 1039 | |
johnc@4173 | 1040 | void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { |
tonyp@2973 | 1041 | if (verbose_low()) { |
johnc@4173 | 1042 | gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); |
tonyp@2973 | 1043 | } |
ysr@777 | 1044 | |
tonyp@2848 | 1045 | if (concurrent()) { |
pliden@6906 | 1046 | SuspendibleThreadSet::leave(); |
tonyp@2848 | 1047 | } |
pliden@6692 | 1048 | |
pliden@6692 | 1049 | bool barrier_aborted = !_second_overflow_barrier_sync.enter(); |
pliden@6692 | 1050 | |
tonyp@2848 | 1051 | if (concurrent()) { |
pliden@6906 | 1052 | SuspendibleThreadSet::join(); |
tonyp@2848 | 1053 | } |
johnc@4788 | 1054 | // at this point everything should be re-initialized and ready to go |
ysr@777 | 1055 | |
tonyp@2973 | 1056 | if (verbose_low()) { |
pliden@6692 | 1057 | if (barrier_aborted) { |
pliden@6692 | 1058 | gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); |
pliden@6692 | 1059 | } else { |
pliden@6692 | 1060 | gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); |
pliden@6692 | 1061 | } |
tonyp@2973 | 1062 | } |
ysr@777 | 1063 | } |
ysr@777 | 1064 | |
tonyp@2848 | 1065 | #ifndef PRODUCT |
tonyp@2848 | 1066 | void ForceOverflowSettings::init() { |
tonyp@2848 | 1067 | _num_remaining = G1ConcMarkForceOverflow; |
tonyp@2848 | 1068 | _force = false; |
tonyp@2848 | 1069 | update(); |
tonyp@2848 | 1070 | } |
tonyp@2848 | 1071 | |
tonyp@2848 | 1072 | void ForceOverflowSettings::update() { |
tonyp@2848 | 1073 | if (_num_remaining > 0) { |
tonyp@2848 | 1074 | _num_remaining -= 1; |
tonyp@2848 | 1075 | _force = true; |
tonyp@2848 | 1076 | } else { |
tonyp@2848 | 1077 | _force = false; |
tonyp@2848 | 1078 | } |
tonyp@2848 | 1079 | } |
tonyp@2848 | 1080 | |
tonyp@2848 | 1081 | bool ForceOverflowSettings::should_force() { |
tonyp@2848 | 1082 | if (_force) { |
tonyp@2848 | 1083 | _force = false; |
tonyp@2848 | 1084 | return true; |
tonyp@2848 | 1085 | } else { |
tonyp@2848 | 1086 | return false; |
tonyp@2848 | 1087 | } |
tonyp@2848 | 1088 | } |
tonyp@2848 | 1089 | #endif // !PRODUCT |
tonyp@2848 | 1090 | |
ysr@777 | 1091 | class CMConcurrentMarkingTask: public AbstractGangTask { |
ysr@777 | 1092 | private: |
ysr@777 | 1093 | ConcurrentMark* _cm; |
ysr@777 | 1094 | ConcurrentMarkThread* _cmt; |
ysr@777 | 1095 | |
ysr@777 | 1096 | public: |
jmasa@3357 | 1097 | void work(uint worker_id) { |
tonyp@1458 | 1098 | assert(Thread::current()->is_ConcurrentGC_thread(), |
tonyp@1458 | 1099 | "this should only be done by a conc GC thread"); |
johnc@2316 | 1100 | ResourceMark rm; |
ysr@777 | 1101 | |
ysr@777 | 1102 | double start_vtime = os::elapsedVTime(); |
ysr@777 | 1103 | |
pliden@6906 | 1104 | SuspendibleThreadSet::join(); |
ysr@777 | 1105 | |
jmasa@3357 | 1106 | assert(worker_id < _cm->active_tasks(), "invariant"); |
jmasa@3357 | 1107 | CMTask* the_task = _cm->task(worker_id); |
ysr@777 | 1108 | the_task->record_start_time(); |
ysr@777 | 1109 | if (!_cm->has_aborted()) { |
ysr@777 | 1110 | do { |
ysr@777 | 1111 | double start_vtime_sec = os::elapsedVTime(); |
ysr@777 | 1112 | double start_time_sec = os::elapsedTime(); |
johnc@2494 | 1113 | double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
johnc@2494 | 1114 | |
johnc@2494 | 1115 | the_task->do_marking_step(mark_step_duration_ms, |
johnc@4787 | 1116 | true /* do_termination */, |
johnc@4787 | 1117 | false /* is_serial*/); |
johnc@2494 | 1118 | |
ysr@777 | 1119 | double end_time_sec = os::elapsedTime(); |
ysr@777 | 1120 | double end_vtime_sec = os::elapsedVTime(); |
ysr@777 | 1121 | double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; |
ysr@777 | 1122 | double elapsed_time_sec = end_time_sec - start_time_sec; |
ysr@777 | 1123 | _cm->clear_has_overflown(); |
ysr@777 | 1124 | |
jmasa@3357 | 1125 | bool ret = _cm->do_yield_check(worker_id); |
ysr@777 | 1126 | |
ysr@777 | 1127 | jlong sleep_time_ms; |
ysr@777 | 1128 | if (!_cm->has_aborted() && the_task->has_aborted()) { |
ysr@777 | 1129 | sleep_time_ms = |
ysr@777 | 1130 | (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); |
pliden@6906 | 1131 | SuspendibleThreadSet::leave(); |
ysr@777 | 1132 | os::sleep(Thread::current(), sleep_time_ms, false); |
pliden@6906 | 1133 | SuspendibleThreadSet::join(); |
ysr@777 | 1134 | } |
ysr@777 | 1135 | double end_time2_sec = os::elapsedTime(); |
ysr@777 | 1136 | double elapsed_time2_sec = end_time2_sec - start_time_sec; |
ysr@777 | 1137 | |
ysr@777 | 1138 | #if 0 |
ysr@777 | 1139 | gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " |
ysr@777 | 1140 | "overhead %1.4lf", |
ysr@777 | 1141 | elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, |
ysr@777 | 1142 | the_task->conc_overhead(os::elapsedTime()) * 8.0); |
ysr@777 | 1143 | gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", |
ysr@777 | 1144 | elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); |
ysr@777 | 1145 | #endif |
ysr@777 | 1146 | } while (!_cm->has_aborted() && the_task->has_aborted()); |
ysr@777 | 1147 | } |
ysr@777 | 1148 | the_task->record_end_time(); |
tonyp@1458 | 1149 | guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); |
ysr@777 | 1150 | |
pliden@6906 | 1151 | SuspendibleThreadSet::leave(); |
ysr@777 | 1152 | |
ysr@777 | 1153 | double end_vtime = os::elapsedVTime(); |
jmasa@3357 | 1154 | _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); |
ysr@777 | 1155 | } |
ysr@777 | 1156 | |
ysr@777 | 1157 | CMConcurrentMarkingTask(ConcurrentMark* cm, |
ysr@777 | 1158 | ConcurrentMarkThread* cmt) : |
ysr@777 | 1159 | AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } |
ysr@777 | 1160 | |
ysr@777 | 1161 | ~CMConcurrentMarkingTask() { } |
ysr@777 | 1162 | }; |
ysr@777 | 1163 | |
jmasa@3294 | 1164 | // Calculates the number of active workers for a concurrent |
jmasa@3294 | 1165 | // phase. |
jmasa@3357 | 1166 | uint ConcurrentMark::calc_parallel_marking_threads() { |
johnc@3338 | 1167 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3357 | 1168 | uint n_conc_workers = 0; |
jmasa@3294 | 1169 | if (!UseDynamicNumberOfGCThreads || |
jmasa@3294 | 1170 | (!FLAG_IS_DEFAULT(ConcGCThreads) && |
jmasa@3294 | 1171 | !ForceDynamicNumberOfGCThreads)) { |
jmasa@3294 | 1172 | n_conc_workers = max_parallel_marking_threads(); |
jmasa@3294 | 1173 | } else { |
jmasa@3294 | 1174 | n_conc_workers = |
jmasa@3294 | 1175 | AdaptiveSizePolicy::calc_default_active_workers( |
jmasa@3294 | 1176 | max_parallel_marking_threads(), |
jmasa@3294 | 1177 | 1, /* Minimum workers */ |
jmasa@3294 | 1178 | parallel_marking_threads(), |
jmasa@3294 | 1179 | Threads::number_of_non_daemon_threads()); |
jmasa@3294 | 1180 | // Don't scale down "n_conc_workers" by scale_parallel_threads() because |
jmasa@3294 | 1181 | // that scaling has already gone into "_max_parallel_marking_threads". |
jmasa@3294 | 1182 | } |
johnc@3338 | 1183 | assert(n_conc_workers > 0, "Always need at least 1"); |
johnc@3338 | 1184 | return n_conc_workers; |
jmasa@3294 | 1185 | } |
johnc@3338 | 1186 | // If we are not running with any parallel GC threads we will not |
johnc@3338 | 1187 | // have spawned any marking threads either. Hence the number of |
johnc@3338 | 1188 | // concurrent workers should be 0. |
johnc@3338 | 1189 | return 0; |
jmasa@3294 | 1190 | } |
jmasa@3294 | 1191 | |
tonyp@3464 | 1192 | void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { |
tonyp@3464 | 1193 | // Currently, only survivors can be root regions. |
tonyp@3464 | 1194 | assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); |
tonyp@3464 | 1195 | G1RootRegionScanClosure cl(_g1h, this, worker_id); |
tonyp@3464 | 1196 | |
tonyp@3464 | 1197 | const uintx interval = PrefetchScanIntervalInBytes; |
tonyp@3464 | 1198 | HeapWord* curr = hr->bottom(); |
tonyp@3464 | 1199 | const HeapWord* end = hr->top(); |
tonyp@3464 | 1200 | while (curr < end) { |
tonyp@3464 | 1201 | Prefetch::read(curr, interval); |
tonyp@3464 | 1202 | oop obj = oop(curr); |
tonyp@3464 | 1203 | int size = obj->oop_iterate(&cl); |
tonyp@3464 | 1204 | assert(size == obj->size(), "sanity"); |
tonyp@3464 | 1205 | curr += size; |
tonyp@3464 | 1206 | } |
tonyp@3464 | 1207 | } |
tonyp@3464 | 1208 | |
tonyp@3464 | 1209 | class CMRootRegionScanTask : public AbstractGangTask { |
tonyp@3464 | 1210 | private: |
tonyp@3464 | 1211 | ConcurrentMark* _cm; |
tonyp@3464 | 1212 | |
tonyp@3464 | 1213 | public: |
tonyp@3464 | 1214 | CMRootRegionScanTask(ConcurrentMark* cm) : |
tonyp@3464 | 1215 | AbstractGangTask("Root Region Scan"), _cm(cm) { } |
tonyp@3464 | 1216 | |
tonyp@3464 | 1217 | void work(uint worker_id) { |
tonyp@3464 | 1218 | assert(Thread::current()->is_ConcurrentGC_thread(), |
tonyp@3464 | 1219 | "this should only be done by a conc GC thread"); |
tonyp@3464 | 1220 | |
tonyp@3464 | 1221 | CMRootRegions* root_regions = _cm->root_regions(); |
tonyp@3464 | 1222 | HeapRegion* hr = root_regions->claim_next(); |
tonyp@3464 | 1223 | while (hr != NULL) { |
tonyp@3464 | 1224 | _cm->scanRootRegion(hr, worker_id); |
tonyp@3464 | 1225 | hr = root_regions->claim_next(); |
tonyp@3464 | 1226 | } |
tonyp@3464 | 1227 | } |
tonyp@3464 | 1228 | }; |
tonyp@3464 | 1229 | |
tonyp@3464 | 1230 | void ConcurrentMark::scanRootRegions() { |
stefank@6992 | 1231 | // Start of concurrent marking. |
stefank@6992 | 1232 | ClassLoaderDataGraph::clear_claimed_marks(); |
stefank@6992 | 1233 | |
tonyp@3464 | 1234 | // scan_in_progress() will have been set to true only if there was |
tonyp@3464 | 1235 | // at least one root region to scan. So, if it's false, we |
tonyp@3464 | 1236 | // should not attempt to do any further work. |
tonyp@3464 | 1237 | if (root_regions()->scan_in_progress()) { |
tonyp@3464 | 1238 | _parallel_marking_threads = calc_parallel_marking_threads(); |
tonyp@3464 | 1239 | assert(parallel_marking_threads() <= max_parallel_marking_threads(), |
tonyp@3464 | 1240 | "Maximum number of marking threads exceeded"); |
tonyp@3464 | 1241 | uint active_workers = MAX2(1U, parallel_marking_threads()); |
tonyp@3464 | 1242 | |
tonyp@3464 | 1243 | CMRootRegionScanTask task(this); |
johnc@4549 | 1244 | if (use_parallel_marking_threads()) { |
tonyp@3464 | 1245 | _parallel_workers->set_active_workers((int) active_workers); |
tonyp@3464 | 1246 | _parallel_workers->run_task(&task); |
tonyp@3464 | 1247 | } else { |
tonyp@3464 | 1248 | task.work(0); |
tonyp@3464 | 1249 | } |
tonyp@3464 | 1250 | |
tonyp@3464 | 1251 | // It's possible that has_aborted() is true here without actually |
tonyp@3464 | 1252 | // aborting the survivor scan earlier. This is OK as it's |
tonyp@3464 | 1253 | // mainly used for sanity checking. |
tonyp@3464 | 1254 | root_regions()->scan_finished(); |
tonyp@3464 | 1255 | } |
tonyp@3464 | 1256 | } |
tonyp@3464 | 1257 | |
ysr@777 | 1258 | void ConcurrentMark::markFromRoots() { |
ysr@777 | 1259 | // we might be tempted to assert that: |
ysr@777 | 1260 | // assert(asynch == !SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1261 | // "inconsistent argument?"); |
ysr@777 | 1262 | // However that wouldn't be right, because it's possible that |
ysr@777 | 1263 | // a safepoint is indeed in progress as a younger generation |
ysr@777 | 1264 | // stop-the-world GC happens even as we mark in this generation. |
ysr@777 | 1265 | |
ysr@777 | 1266 | _restart_for_overflow = false; |
tonyp@2848 | 1267 | force_overflow_conc()->init(); |
jmasa@3294 | 1268 | |
jmasa@3294 | 1269 | // _g1h has _n_par_threads |
jmasa@3294 | 1270 | _parallel_marking_threads = calc_parallel_marking_threads(); |
jmasa@3294 | 1271 | assert(parallel_marking_threads() <= max_parallel_marking_threads(), |
jmasa@3294 | 1272 | "Maximum number of marking threads exceeded"); |
johnc@3338 | 1273 | |
jmasa@3357 | 1274 | uint active_workers = MAX2(1U, parallel_marking_threads()); |
johnc@3338 | 1275 | |
johnc@4788 | 1276 | // Parallel task terminator is set in "set_concurrency_and_phase()" |
johnc@4788 | 1277 | set_concurrency_and_phase(active_workers, true /* concurrent */); |
ysr@777 | 1278 | |
ysr@777 | 1279 | CMConcurrentMarkingTask markingTask(this, cmThread()); |
johnc@4549 | 1280 | if (use_parallel_marking_threads()) { |
johnc@3338 | 1281 | _parallel_workers->set_active_workers((int)active_workers); |
stefank@6992 | 1282 | // Don't set _n_par_threads because it affects MT in process_roots() |
johnc@3338 | 1283 | // and the decisions on that MT processing is made elsewhere. |
johnc@3338 | 1284 | assert(_parallel_workers->active_workers() > 0, "Should have been set"); |
ysr@777 | 1285 | _parallel_workers->run_task(&markingTask); |
tonyp@2973 | 1286 | } else { |
ysr@777 | 1287 | markingTask.work(0); |
tonyp@2973 | 1288 | } |
ysr@777 | 1289 | print_stats(); |
ysr@777 | 1290 | } |
ysr@777 | 1291 | |
ysr@777 | 1292 | void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { |
ysr@777 | 1293 | // world is stopped at this checkpoint |
ysr@777 | 1294 | assert(SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1295 | "world should be stopped"); |
johnc@3175 | 1296 | |
ysr@777 | 1297 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 1298 | |
ysr@777 | 1299 | // If a full collection has happened, we shouldn't do this. |
ysr@777 | 1300 | if (has_aborted()) { |
ysr@777 | 1301 | g1h->set_marking_complete(); // So bitmap clearing isn't confused |
ysr@777 | 1302 | return; |
ysr@777 | 1303 | } |
ysr@777 | 1304 | |
kamg@2445 | 1305 | SvcGCMarker sgcm(SvcGCMarker::OTHER); |
kamg@2445 | 1306 | |
ysr@1280 | 1307 | if (VerifyDuringGC) { |
ysr@1280 | 1308 | HandleMark hm; // handle scope |
ysr@1280 | 1309 | Universe::heap()->prepare_for_verify(); |
stefank@5018 | 1310 | Universe::verify(VerifyOption_G1UsePrevMarking, |
stefank@5018 | 1311 | " VerifyDuringGC:(before)"); |
ysr@1280 | 1312 | } |
brutisso@7005 | 1313 | g1h->check_bitmaps("Remark Start"); |
ysr@1280 | 1314 | |
ysr@777 | 1315 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
ysr@777 | 1316 | g1p->record_concurrent_mark_remark_start(); |
ysr@777 | 1317 | |
ysr@777 | 1318 | double start = os::elapsedTime(); |
ysr@777 | 1319 | |
ysr@777 | 1320 | checkpointRootsFinalWork(); |
ysr@777 | 1321 | |
ysr@777 | 1322 | double mark_work_end = os::elapsedTime(); |
ysr@777 | 1323 | |
ysr@777 | 1324 | weakRefsWork(clear_all_soft_refs); |
ysr@777 | 1325 | |
ysr@777 | 1326 | if (has_overflown()) { |
ysr@777 | 1327 | // Oops. We overflowed. Restart concurrent marking. |
ysr@777 | 1328 | _restart_for_overflow = true; |
johnc@4789 | 1329 | if (G1TraceMarkStackOverflow) { |
johnc@4789 | 1330 | gclog_or_tty->print_cr("\nRemark led to restart for overflow."); |
johnc@4789 | 1331 | } |
johnc@4789 | 1332 | |
johnc@4789 | 1333 | // Verify the heap w.r.t. the previous marking bitmap. |
johnc@4789 | 1334 | if (VerifyDuringGC) { |
johnc@4789 | 1335 | HandleMark hm; // handle scope |
johnc@4789 | 1336 | Universe::heap()->prepare_for_verify(); |
stefank@5018 | 1337 | Universe::verify(VerifyOption_G1UsePrevMarking, |
stefank@5018 | 1338 | " VerifyDuringGC:(overflow)"); |
johnc@4789 | 1339 | } |
johnc@4789 | 1340 | |
johnc@4386 | 1341 | // Clear the marking state because we will be restarting |
johnc@4386 | 1342 | // marking due to overflowing the global mark stack. |
johnc@4386 | 1343 | reset_marking_state(); |
ysr@777 | 1344 | } else { |
johnc@3463 | 1345 | // Aggregate the per-task counting data that we have accumulated |
johnc@3463 | 1346 | // while marking. |
johnc@3463 | 1347 | aggregate_count_data(); |
johnc@3463 | 1348 | |
tonyp@2469 | 1349 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 1350 | // We're done with marking. |
tonyp@1752 | 1351 | // This is the end of the marking cycle, we're expected all |
tonyp@1752 | 1352 | // threads to have SATB queues with active set to true. |
tonyp@2469 | 1353 | satb_mq_set.set_active_all_threads(false, /* new active value */ |
tonyp@2469 | 1354 | true /* expected_active */); |
tonyp@1246 | 1355 | |
tonyp@1246 | 1356 | if (VerifyDuringGC) { |
ysr@1280 | 1357 | HandleMark hm; // handle scope |
ysr@1280 | 1358 | Universe::heap()->prepare_for_verify(); |
stefank@5018 | 1359 | Universe::verify(VerifyOption_G1UseNextMarking, |
stefank@5018 | 1360 | " VerifyDuringGC:(after)"); |
tonyp@1246 | 1361 | } |
brutisso@7005 | 1362 | g1h->check_bitmaps("Remark End"); |
johnc@2494 | 1363 | assert(!restart_for_overflow(), "sanity"); |
johnc@4386 | 1364 | // Completely reset the marking state since marking completed |
johnc@4386 | 1365 | set_non_marking_state(); |
johnc@2494 | 1366 | } |
johnc@2494 | 1367 | |
johnc@4333 | 1368 | // Expand the marking stack, if we have to and if we can. |
johnc@4333 | 1369 | if (_markStack.should_expand()) { |
johnc@4333 | 1370 | _markStack.expand(); |
johnc@4333 | 1371 | } |
johnc@4333 | 1372 | |
ysr@777 | 1373 | // Statistics |
ysr@777 | 1374 | double now = os::elapsedTime(); |
ysr@777 | 1375 | _remark_mark_times.add((mark_work_end - start) * 1000.0); |
ysr@777 | 1376 | _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); |
ysr@777 | 1377 | _remark_times.add((now - start) * 1000.0); |
ysr@777 | 1378 | |
ysr@777 | 1379 | g1p->record_concurrent_mark_remark_end(); |
sla@5237 | 1380 | |
sla@5237 | 1381 | G1CMIsAliveClosure is_alive(g1h); |
sla@5237 | 1382 | g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); |
ysr@777 | 1383 | } |
ysr@777 | 1384 | |
johnc@3731 | 1385 | // Base class of the closures that finalize and verify the |
johnc@3731 | 1386 | // liveness counting data. |
johnc@3731 | 1387 | class CMCountDataClosureBase: public HeapRegionClosure { |
johnc@3731 | 1388 | protected: |
johnc@4123 | 1389 | G1CollectedHeap* _g1h; |
ysr@777 | 1390 | ConcurrentMark* _cm; |
johnc@4123 | 1391 | CardTableModRefBS* _ct_bs; |
johnc@4123 | 1392 | |
johnc@3463 | 1393 | BitMap* _region_bm; |
johnc@3463 | 1394 | BitMap* _card_bm; |
johnc@3463 | 1395 | |
johnc@4123 | 1396 | // Takes a region that's not empty (i.e., it has at least one |
tonyp@1264 | 1397 | // live object in it and sets its corresponding bit on the region |
tonyp@1264 | 1398 | // bitmap to 1. If the region is "starts humongous" it will also set |
tonyp@1264 | 1399 | // to 1 the bits on the region bitmap that correspond to its |
tonyp@1264 | 1400 | // associated "continues humongous" regions. |
tonyp@1264 | 1401 | void set_bit_for_region(HeapRegion* hr) { |
tonyp@1264 | 1402 | assert(!hr->continuesHumongous(), "should have filtered those out"); |
tonyp@1264 | 1403 | |
tonyp@3713 | 1404 | BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); |
tonyp@1264 | 1405 | if (!hr->startsHumongous()) { |
tonyp@1264 | 1406 | // Normal (non-humongous) case: just set the bit. |
tonyp@3713 | 1407 | _region_bm->par_at_put(index, true); |
tonyp@1264 | 1408 | } else { |
tonyp@1264 | 1409 | // Starts humongous case: calculate how many regions are part of |
johnc@3463 | 1410 | // this humongous region and then set the bit range. |
tonyp@3957 | 1411 | BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); |
tonyp@3713 | 1412 | _region_bm->par_at_put_range(index, end_index, true); |
tonyp@1264 | 1413 | } |
tonyp@1264 | 1414 | } |
tonyp@1264 | 1415 | |
johnc@3731 | 1416 | public: |
johnc@4123 | 1417 | CMCountDataClosureBase(G1CollectedHeap* g1h, |
johnc@3731 | 1418 | BitMap* region_bm, BitMap* card_bm): |
johnc@4123 | 1419 | _g1h(g1h), _cm(g1h->concurrent_mark()), |
johnc@4123 | 1420 | _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), |
johnc@4123 | 1421 | _region_bm(region_bm), _card_bm(card_bm) { } |
johnc@3731 | 1422 | }; |
johnc@3731 | 1423 | |
johnc@3731 | 1424 | // Closure that calculates the # live objects per region. Used |
johnc@3731 | 1425 | // for verification purposes during the cleanup pause. |
johnc@3731 | 1426 | class CalcLiveObjectsClosure: public CMCountDataClosureBase { |
johnc@3731 | 1427 | CMBitMapRO* _bm; |
johnc@3731 | 1428 | size_t _region_marked_bytes; |
johnc@3731 | 1429 | |
johnc@3731 | 1430 | public: |
johnc@4123 | 1431 | CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, |
johnc@3731 | 1432 | BitMap* region_bm, BitMap* card_bm) : |
johnc@4123 | 1433 | CMCountDataClosureBase(g1h, region_bm, card_bm), |
johnc@3731 | 1434 | _bm(bm), _region_marked_bytes(0) { } |
johnc@3731 | 1435 | |
ysr@777 | 1436 | bool doHeapRegion(HeapRegion* hr) { |
ysr@777 | 1437 | |
iveresov@1074 | 1438 | if (hr->continuesHumongous()) { |
tonyp@1264 | 1439 | // We will ignore these here and process them when their |
tonyp@1264 | 1440 | // associated "starts humongous" region is processed (see |
tonyp@1264 | 1441 | // set_bit_for_heap_region()). Note that we cannot rely on their |
tonyp@1264 | 1442 | // associated "starts humongous" region to have their bit set to |
tonyp@1264 | 1443 | // 1 since, due to the region chunking in the parallel region |
tonyp@1264 | 1444 | // iteration, a "continues humongous" region might be visited |
tonyp@1264 | 1445 | // before its associated "starts humongous". |
iveresov@1074 | 1446 | return false; |
iveresov@1074 | 1447 | } |
ysr@777 | 1448 | |
johnc@4123 | 1449 | HeapWord* ntams = hr->next_top_at_mark_start(); |
johnc@4123 | 1450 | HeapWord* start = hr->bottom(); |
johnc@4123 | 1451 | |
johnc@4123 | 1452 | assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), |
johnc@3463 | 1453 | err_msg("Preconditions not met - " |
johnc@4123 | 1454 | "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, |
drchase@6680 | 1455 | p2i(start), p2i(ntams), p2i(hr->end()))); |
johnc@3463 | 1456 | |
ysr@777 | 1457 | // Find the first marked object at or after "start". |
johnc@4123 | 1458 | start = _bm->getNextMarkedWordAddress(start, ntams); |
johnc@3463 | 1459 | |
ysr@777 | 1460 | size_t marked_bytes = 0; |
ysr@777 | 1461 | |
johnc@4123 | 1462 | while (start < ntams) { |
ysr@777 | 1463 | oop obj = oop(start); |
ysr@777 | 1464 | int obj_sz = obj->size(); |
johnc@4123 | 1465 | HeapWord* obj_end = start + obj_sz; |
johnc@3731 | 1466 | |
johnc@3731 | 1467 | BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); |
johnc@4123 | 1468 | BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); |
johnc@4123 | 1469 | |
johnc@4123 | 1470 | // Note: if we're looking at the last region in heap - obj_end |
johnc@4123 | 1471 | // could be actually just beyond the end of the heap; end_idx |
johnc@4123 | 1472 | // will then correspond to a (non-existent) card that is also |
johnc@4123 | 1473 | // just beyond the heap. |
johnc@4123 | 1474 | if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { |
johnc@4123 | 1475 | // end of object is not card aligned - increment to cover |
johnc@4123 | 1476 | // all the cards spanned by the object |
johnc@4123 | 1477 | end_idx += 1; |
johnc@4123 | 1478 | } |
johnc@4123 | 1479 | |
johnc@4123 | 1480 | // Set the bits in the card BM for the cards spanned by this object. |
johnc@4123 | 1481 | _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); |
johnc@3731 | 1482 | |
johnc@3731 | 1483 | // Add the size of this object to the number of marked bytes. |
apetrusenko@1465 | 1484 | marked_bytes += (size_t)obj_sz * HeapWordSize; |
johnc@3463 | 1485 | |
ysr@777 | 1486 | // Find the next marked object after this one. |
johnc@4123 | 1487 | start = _bm->getNextMarkedWordAddress(obj_end, ntams); |
tonyp@2973 | 1488 | } |
johnc@3463 | 1489 | |
johnc@3463 | 1490 | // Mark the allocated-since-marking portion... |
johnc@3463 | 1491 | HeapWord* top = hr->top(); |
johnc@4123 | 1492 | if (ntams < top) { |
johnc@4123 | 1493 | BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); |
johnc@4123 | 1494 | BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); |
johnc@4123 | 1495 | |
johnc@4123 | 1496 | // Note: if we're looking at the last region in heap - top |
johnc@4123 | 1497 | // could be actually just beyond the end of the heap; end_idx |
johnc@4123 | 1498 | // will then correspond to a (non-existent) card that is also |
johnc@4123 | 1499 | // just beyond the heap. |
johnc@4123 | 1500 | if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { |
johnc@4123 | 1501 | // end of object is not card aligned - increment to cover |
johnc@4123 | 1502 | // all the cards spanned by the object |
johnc@4123 | 1503 | end_idx += 1; |
johnc@4123 | 1504 | } |
johnc@4123 | 1505 | _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); |
johnc@3463 | 1506 | |
johnc@3463 | 1507 | // This definitely means the region has live objects. |
johnc@3463 | 1508 | set_bit_for_region(hr); |
ysr@777 | 1509 | } |
ysr@777 | 1510 | |
ysr@777 | 1511 | // Update the live region bitmap. |
ysr@777 | 1512 | if (marked_bytes > 0) { |
tonyp@1264 | 1513 | set_bit_for_region(hr); |
ysr@777 | 1514 | } |
johnc@3463 | 1515 | |
johnc@3463 | 1516 | // Set the marked bytes for the current region so that |
johnc@3463 | 1517 | // it can be queried by a calling verificiation routine |
johnc@3463 | 1518 | _region_marked_bytes = marked_bytes; |
johnc@3463 | 1519 | |
johnc@3463 | 1520 | return false; |
johnc@3463 | 1521 | } |
johnc@3463 | 1522 | |
johnc@3463 | 1523 | size_t region_marked_bytes() const { return _region_marked_bytes; } |
johnc@3463 | 1524 | }; |
johnc@3463 | 1525 | |
johnc@3463 | 1526 | // Heap region closure used for verifying the counting data |
johnc@3463 | 1527 | // that was accumulated concurrently and aggregated during |
johnc@3463 | 1528 | // the remark pause. This closure is applied to the heap |
johnc@3463 | 1529 | // regions during the STW cleanup pause. |
johnc@3463 | 1530 | |
johnc@3463 | 1531 | class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { |
johnc@4123 | 1532 | G1CollectedHeap* _g1h; |
johnc@3463 | 1533 | ConcurrentMark* _cm; |
johnc@3463 | 1534 | CalcLiveObjectsClosure _calc_cl; |
johnc@3463 | 1535 | BitMap* _region_bm; // Region BM to be verified |
johnc@3463 | 1536 | BitMap* _card_bm; // Card BM to be verified |
johnc@3463 | 1537 | bool _verbose; // verbose output? |
johnc@3463 | 1538 | |
johnc@3463 | 1539 | BitMap* _exp_region_bm; // Expected Region BM values |
johnc@3463 | 1540 | BitMap* _exp_card_bm; // Expected card BM values |
johnc@3463 | 1541 | |
johnc@3463 | 1542 | int _failures; |
johnc@3463 | 1543 | |
johnc@3463 | 1544 | public: |
johnc@4123 | 1545 | VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, |
johnc@3463 | 1546 | BitMap* region_bm, |
johnc@3463 | 1547 | BitMap* card_bm, |
johnc@3463 | 1548 | BitMap* exp_region_bm, |
johnc@3463 | 1549 | BitMap* exp_card_bm, |
johnc@3463 | 1550 | bool verbose) : |
johnc@4123 | 1551 | _g1h(g1h), _cm(g1h->concurrent_mark()), |
johnc@4123 | 1552 | _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), |
johnc@3463 | 1553 | _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), |
johnc@3463 | 1554 | _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), |
johnc@3463 | 1555 | _failures(0) { } |
johnc@3463 | 1556 | |
johnc@3463 | 1557 | int failures() const { return _failures; } |
johnc@3463 | 1558 | |
johnc@3463 | 1559 | bool doHeapRegion(HeapRegion* hr) { |
johnc@3463 | 1560 | if (hr->continuesHumongous()) { |
johnc@3463 | 1561 | // We will ignore these here and process them when their |
johnc@3463 | 1562 | // associated "starts humongous" region is processed (see |
johnc@3463 | 1563 | // set_bit_for_heap_region()). Note that we cannot rely on their |
johnc@3463 | 1564 | // associated "starts humongous" region to have their bit set to |
johnc@3463 | 1565 | // 1 since, due to the region chunking in the parallel region |
johnc@3463 | 1566 | // iteration, a "continues humongous" region might be visited |
johnc@3463 | 1567 | // before its associated "starts humongous". |
johnc@3463 | 1568 | return false; |
johnc@3463 | 1569 | } |
johnc@3463 | 1570 | |
johnc@3463 | 1571 | int failures = 0; |
johnc@3463 | 1572 | |
johnc@3463 | 1573 | // Call the CalcLiveObjectsClosure to walk the marking bitmap for |
johnc@3463 | 1574 | // this region and set the corresponding bits in the expected region |
johnc@3463 | 1575 | // and card bitmaps. |
johnc@3463 | 1576 | bool res = _calc_cl.doHeapRegion(hr); |
johnc@3463 | 1577 | assert(res == false, "should be continuing"); |
johnc@3463 | 1578 | |
johnc@3463 | 1579 | MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), |
johnc@3463 | 1580 | Mutex::_no_safepoint_check_flag); |
johnc@3463 | 1581 | |
johnc@3463 | 1582 | // Verify the marked bytes for this region. |
johnc@3463 | 1583 | size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); |
johnc@3463 | 1584 | size_t act_marked_bytes = hr->next_marked_bytes(); |
johnc@3463 | 1585 | |
johnc@3463 | 1586 | // We're not OK if expected marked bytes > actual marked bytes. It means |
johnc@3463 | 1587 | // we have missed accounting some objects during the actual marking. |
johnc@3463 | 1588 | if (exp_marked_bytes > act_marked_bytes) { |
johnc@3463 | 1589 | if (_verbose) { |
tonyp@3713 | 1590 | gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " |
johnc@3463 | 1591 | "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, |
johnc@3463 | 1592 | hr->hrs_index(), exp_marked_bytes, act_marked_bytes); |
johnc@3463 | 1593 | } |
johnc@3463 | 1594 | failures += 1; |
johnc@3463 | 1595 | } |
johnc@3463 | 1596 | |
johnc@3463 | 1597 | // Verify the bit, for this region, in the actual and expected |
johnc@3463 | 1598 | // (which was just calculated) region bit maps. |
johnc@3463 | 1599 | // We're not OK if the bit in the calculated expected region |
johnc@3463 | 1600 | // bitmap is set and the bit in the actual region bitmap is not. |
tonyp@3713 | 1601 | BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); |
johnc@3463 | 1602 | |
johnc@3463 | 1603 | bool expected = _exp_region_bm->at(index); |
johnc@3463 | 1604 | bool actual = _region_bm->at(index); |
johnc@3463 | 1605 | if (expected && !actual) { |
johnc@3463 | 1606 | if (_verbose) { |
tonyp@3713 | 1607 | gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " |
tonyp@3713 | 1608 | "expected: %s, actual: %s", |
tonyp@3713 | 1609 | hr->hrs_index(), |
tonyp@3713 | 1610 | BOOL_TO_STR(expected), BOOL_TO_STR(actual)); |
johnc@3463 | 1611 | } |
johnc@3463 | 1612 | failures += 1; |
johnc@3463 | 1613 | } |
johnc@3463 | 1614 | |
johnc@3463 | 1615 | // Verify that the card bit maps for the cards spanned by the current |
johnc@3463 | 1616 | // region match. We have an error if we have a set bit in the expected |
johnc@3463 | 1617 | // bit map and the corresponding bit in the actual bitmap is not set. |
johnc@3463 | 1618 | |
johnc@3463 | 1619 | BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); |
johnc@3463 | 1620 | BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); |
johnc@3463 | 1621 | |
johnc@3463 | 1622 | for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { |
johnc@3463 | 1623 | expected = _exp_card_bm->at(i); |
johnc@3463 | 1624 | actual = _card_bm->at(i); |
johnc@3463 | 1625 | |
johnc@3463 | 1626 | if (expected && !actual) { |
johnc@3463 | 1627 | if (_verbose) { |
tonyp@3713 | 1628 | gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " |
tonyp@3713 | 1629 | "expected: %s, actual: %s", |
tonyp@3713 | 1630 | hr->hrs_index(), i, |
tonyp@3713 | 1631 | BOOL_TO_STR(expected), BOOL_TO_STR(actual)); |
ysr@777 | 1632 | } |
johnc@3463 | 1633 | failures += 1; |
ysr@777 | 1634 | } |
ysr@777 | 1635 | } |
ysr@777 | 1636 | |
johnc@3463 | 1637 | if (failures > 0 && _verbose) { |
johnc@3463 | 1638 | gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " |
johnc@3463 | 1639 | "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, |
drchase@6680 | 1640 | HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), |
johnc@3463 | 1641 | _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); |
johnc@3463 | 1642 | } |
johnc@3463 | 1643 | |
johnc@3463 | 1644 | _failures += failures; |
johnc@3463 | 1645 | |
johnc@3463 | 1646 | // We could stop iteration over the heap when we |
johnc@3731 | 1647 | // find the first violating region by returning true. |
ysr@777 | 1648 | return false; |
ysr@777 | 1649 | } |
ysr@777 | 1650 | }; |
ysr@777 | 1651 | |
johnc@3463 | 1652 | class G1ParVerifyFinalCountTask: public AbstractGangTask { |
johnc@3463 | 1653 | protected: |
johnc@3463 | 1654 | G1CollectedHeap* _g1h; |
johnc@3463 | 1655 | ConcurrentMark* _cm; |
johnc@3463 | 1656 | BitMap* _actual_region_bm; |
johnc@3463 | 1657 | BitMap* _actual_card_bm; |
johnc@3463 | 1658 | |
johnc@3463 | 1659 | uint _n_workers; |
johnc@3463 | 1660 | |
johnc@3463 | 1661 | BitMap* _expected_region_bm; |
johnc@3463 | 1662 | BitMap* _expected_card_bm; |
johnc@3463 | 1663 | |
johnc@3463 | 1664 | int _failures; |
johnc@3463 | 1665 | bool _verbose; |
johnc@3463 | 1666 | |
johnc@3463 | 1667 | public: |
johnc@3463 | 1668 | G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, |
johnc@3463 | 1669 | BitMap* region_bm, BitMap* card_bm, |
johnc@3463 | 1670 | BitMap* expected_region_bm, BitMap* expected_card_bm) |
johnc@3463 | 1671 | : AbstractGangTask("G1 verify final counting"), |
johnc@3463 | 1672 | _g1h(g1h), _cm(_g1h->concurrent_mark()), |
johnc@3463 | 1673 | _actual_region_bm(region_bm), _actual_card_bm(card_bm), |
johnc@3463 | 1674 | _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), |
johnc@3463 | 1675 | _failures(0), _verbose(false), |
johnc@3463 | 1676 | _n_workers(0) { |
johnc@3463 | 1677 | assert(VerifyDuringGC, "don't call this otherwise"); |
johnc@3463 | 1678 | |
johnc@3463 | 1679 | // Use the value already set as the number of active threads |
johnc@3463 | 1680 | // in the call to run_task(). |
johnc@3463 | 1681 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 1682 | assert( _g1h->workers()->active_workers() > 0, |
johnc@3463 | 1683 | "Should have been previously set"); |
johnc@3463 | 1684 | _n_workers = _g1h->workers()->active_workers(); |
johnc@3463 | 1685 | } else { |
johnc@3463 | 1686 | _n_workers = 1; |
johnc@3463 | 1687 | } |
johnc@3463 | 1688 | |
johnc@3463 | 1689 | assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); |
johnc@3463 | 1690 | assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); |
johnc@3463 | 1691 | |
johnc@3463 | 1692 | _verbose = _cm->verbose_medium(); |
johnc@3463 | 1693 | } |
johnc@3463 | 1694 | |
johnc@3463 | 1695 | void work(uint worker_id) { |
johnc@3463 | 1696 | assert(worker_id < _n_workers, "invariant"); |
johnc@3463 | 1697 | |
johnc@4123 | 1698 | VerifyLiveObjectDataHRClosure verify_cl(_g1h, |
johnc@3463 | 1699 | _actual_region_bm, _actual_card_bm, |
johnc@3463 | 1700 | _expected_region_bm, |
johnc@3463 | 1701 | _expected_card_bm, |
johnc@3463 | 1702 | _verbose); |
johnc@3463 | 1703 | |
johnc@3463 | 1704 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 1705 | _g1h->heap_region_par_iterate_chunked(&verify_cl, |
johnc@3463 | 1706 | worker_id, |
johnc@3463 | 1707 | _n_workers, |
johnc@3463 | 1708 | HeapRegion::VerifyCountClaimValue); |
johnc@3463 | 1709 | } else { |
johnc@3463 | 1710 | _g1h->heap_region_iterate(&verify_cl); |
johnc@3463 | 1711 | } |
johnc@3463 | 1712 | |
johnc@3463 | 1713 | Atomic::add(verify_cl.failures(), &_failures); |
johnc@3463 | 1714 | } |
johnc@3463 | 1715 | |
johnc@3463 | 1716 | int failures() const { return _failures; } |
johnc@3463 | 1717 | }; |
johnc@3463 | 1718 | |
johnc@3731 | 1719 | // Closure that finalizes the liveness counting data. |
johnc@3731 | 1720 | // Used during the cleanup pause. |
johnc@3731 | 1721 | // Sets the bits corresponding to the interval [NTAMS, top] |
johnc@3731 | 1722 | // (which contains the implicitly live objects) in the |
johnc@3731 | 1723 | // card liveness bitmap. Also sets the bit for each region, |
johnc@3731 | 1724 | // containing live data, in the region liveness bitmap. |
johnc@3731 | 1725 | |
johnc@3731 | 1726 | class FinalCountDataUpdateClosure: public CMCountDataClosureBase { |
johnc@3463 | 1727 | public: |
johnc@4123 | 1728 | FinalCountDataUpdateClosure(G1CollectedHeap* g1h, |
johnc@3463 | 1729 | BitMap* region_bm, |
johnc@3463 | 1730 | BitMap* card_bm) : |
johnc@4123 | 1731 | CMCountDataClosureBase(g1h, region_bm, card_bm) { } |
johnc@3463 | 1732 | |
johnc@3463 | 1733 | bool doHeapRegion(HeapRegion* hr) { |
johnc@3463 | 1734 | |
johnc@3463 | 1735 | if (hr->continuesHumongous()) { |
johnc@3463 | 1736 | // We will ignore these here and process them when their |
johnc@3463 | 1737 | // associated "starts humongous" region is processed (see |
johnc@3463 | 1738 | // set_bit_for_heap_region()). Note that we cannot rely on their |
johnc@3463 | 1739 | // associated "starts humongous" region to have their bit set to |
johnc@3463 | 1740 | // 1 since, due to the region chunking in the parallel region |
johnc@3463 | 1741 | // iteration, a "continues humongous" region might be visited |
johnc@3463 | 1742 | // before its associated "starts humongous". |
johnc@3463 | 1743 | return false; |
johnc@3463 | 1744 | } |
johnc@3463 | 1745 | |
johnc@3463 | 1746 | HeapWord* ntams = hr->next_top_at_mark_start(); |
johnc@3463 | 1747 | HeapWord* top = hr->top(); |
johnc@3463 | 1748 | |
johnc@3731 | 1749 | assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); |
johnc@3463 | 1750 | |
johnc@3463 | 1751 | // Mark the allocated-since-marking portion... |
johnc@3463 | 1752 | if (ntams < top) { |
johnc@3463 | 1753 | // This definitely means the region has live objects. |
johnc@3463 | 1754 | set_bit_for_region(hr); |
johnc@4123 | 1755 | |
johnc@4123 | 1756 | // Now set the bits in the card bitmap for [ntams, top) |
johnc@4123 | 1757 | BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); |
johnc@4123 | 1758 | BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); |
johnc@4123 | 1759 | |
johnc@4123 | 1760 | // Note: if we're looking at the last region in heap - top |
johnc@4123 | 1761 | // could be actually just beyond the end of the heap; end_idx |
johnc@4123 | 1762 | // will then correspond to a (non-existent) card that is also |
johnc@4123 | 1763 | // just beyond the heap. |
johnc@4123 | 1764 | if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { |
johnc@4123 | 1765 | // end of object is not card aligned - increment to cover |
johnc@4123 | 1766 | // all the cards spanned by the object |
johnc@4123 | 1767 | end_idx += 1; |
johnc@4123 | 1768 | } |
johnc@4123 | 1769 | |
johnc@4123 | 1770 | assert(end_idx <= _card_bm->size(), |
johnc@4123 | 1771 | err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, |
johnc@4123 | 1772 | end_idx, _card_bm->size())); |
johnc@4123 | 1773 | assert(start_idx < _card_bm->size(), |
johnc@4123 | 1774 | err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, |
johnc@4123 | 1775 | start_idx, _card_bm->size())); |
johnc@4123 | 1776 | |
johnc@4123 | 1777 | _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); |
coleenp@4037 | 1778 | } |
johnc@3463 | 1779 | |
johnc@3463 | 1780 | // Set the bit for the region if it contains live data |
johnc@3463 | 1781 | if (hr->next_marked_bytes() > 0) { |
johnc@3463 | 1782 | set_bit_for_region(hr); |
johnc@3463 | 1783 | } |
johnc@3463 | 1784 | |
johnc@3463 | 1785 | return false; |
johnc@3463 | 1786 | } |
johnc@3463 | 1787 | }; |
ysr@777 | 1788 | |
ysr@777 | 1789 | class G1ParFinalCountTask: public AbstractGangTask { |
ysr@777 | 1790 | protected: |
ysr@777 | 1791 | G1CollectedHeap* _g1h; |
johnc@3463 | 1792 | ConcurrentMark* _cm; |
johnc@3463 | 1793 | BitMap* _actual_region_bm; |
johnc@3463 | 1794 | BitMap* _actual_card_bm; |
johnc@3463 | 1795 | |
jmasa@3357 | 1796 | uint _n_workers; |
johnc@3463 | 1797 | |
ysr@777 | 1798 | public: |
johnc@3463 | 1799 | G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) |
johnc@3463 | 1800 | : AbstractGangTask("G1 final counting"), |
johnc@3463 | 1801 | _g1h(g1h), _cm(_g1h->concurrent_mark()), |
johnc@3463 | 1802 | _actual_region_bm(region_bm), _actual_card_bm(card_bm), |
johnc@3463 | 1803 | _n_workers(0) { |
jmasa@3294 | 1804 | // Use the value already set as the number of active threads |
tonyp@3714 | 1805 | // in the call to run_task(). |
jmasa@3294 | 1806 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3294 | 1807 | assert( _g1h->workers()->active_workers() > 0, |
jmasa@3294 | 1808 | "Should have been previously set"); |
jmasa@3294 | 1809 | _n_workers = _g1h->workers()->active_workers(); |
tonyp@2973 | 1810 | } else { |
ysr@777 | 1811 | _n_workers = 1; |
tonyp@2973 | 1812 | } |
ysr@777 | 1813 | } |
ysr@777 | 1814 | |
jmasa@3357 | 1815 | void work(uint worker_id) { |
johnc@3463 | 1816 | assert(worker_id < _n_workers, "invariant"); |
johnc@3463 | 1817 | |
johnc@4123 | 1818 | FinalCountDataUpdateClosure final_update_cl(_g1h, |
johnc@3463 | 1819 | _actual_region_bm, |
johnc@3463 | 1820 | _actual_card_bm); |
johnc@3463 | 1821 | |
jmasa@2188 | 1822 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 1823 | _g1h->heap_region_par_iterate_chunked(&final_update_cl, |
johnc@3463 | 1824 | worker_id, |
johnc@3463 | 1825 | _n_workers, |
tonyp@790 | 1826 | HeapRegion::FinalCountClaimValue); |
ysr@777 | 1827 | } else { |
johnc@3463 | 1828 | _g1h->heap_region_iterate(&final_update_cl); |
ysr@777 | 1829 | } |
ysr@777 | 1830 | } |
ysr@777 | 1831 | }; |
ysr@777 | 1832 | |
ysr@777 | 1833 | class G1ParNoteEndTask; |
ysr@777 | 1834 | |
ysr@777 | 1835 | class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { |
ysr@777 | 1836 | G1CollectedHeap* _g1; |
ysr@777 | 1837 | size_t _max_live_bytes; |
tonyp@3713 | 1838 | uint _regions_claimed; |
ysr@777 | 1839 | size_t _freed_bytes; |
tonyp@2493 | 1840 | FreeRegionList* _local_cleanup_list; |
brutisso@6385 | 1841 | HeapRegionSetCount _old_regions_removed; |
brutisso@6385 | 1842 | HeapRegionSetCount _humongous_regions_removed; |
tonyp@2493 | 1843 | HRRSCleanupTask* _hrrs_cleanup_task; |
ysr@777 | 1844 | double _claimed_region_time; |
ysr@777 | 1845 | double _max_region_time; |
ysr@777 | 1846 | |
ysr@777 | 1847 | public: |
ysr@777 | 1848 | G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, |
tonyp@2493 | 1849 | FreeRegionList* local_cleanup_list, |
johnc@3292 | 1850 | HRRSCleanupTask* hrrs_cleanup_task) : |
vkempik@6552 | 1851 | _g1(g1), |
johnc@3292 | 1852 | _max_live_bytes(0), _regions_claimed(0), |
johnc@3292 | 1853 | _freed_bytes(0), |
johnc@3292 | 1854 | _claimed_region_time(0.0), _max_region_time(0.0), |
johnc@3292 | 1855 | _local_cleanup_list(local_cleanup_list), |
brutisso@6385 | 1856 | _old_regions_removed(), |
brutisso@6385 | 1857 | _humongous_regions_removed(), |
johnc@3292 | 1858 | _hrrs_cleanup_task(hrrs_cleanup_task) { } |
johnc@3292 | 1859 | |
ysr@777 | 1860 | size_t freed_bytes() { return _freed_bytes; } |
brutisso@6385 | 1861 | const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } |
brutisso@6385 | 1862 | const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } |
ysr@777 | 1863 | |
johnc@3292 | 1864 | bool doHeapRegion(HeapRegion *hr) { |
tonyp@3957 | 1865 | if (hr->continuesHumongous()) { |
tonyp@3957 | 1866 | return false; |
tonyp@3957 | 1867 | } |
johnc@3292 | 1868 | // We use a claim value of zero here because all regions |
johnc@3292 | 1869 | // were claimed with value 1 in the FinalCount task. |
tonyp@3957 | 1870 | _g1->reset_gc_time_stamps(hr); |
tonyp@3957 | 1871 | double start = os::elapsedTime(); |
tonyp@3957 | 1872 | _regions_claimed++; |
tonyp@3957 | 1873 | hr->note_end_of_marking(); |
tonyp@3957 | 1874 | _max_live_bytes += hr->max_live_bytes(); |
brutisso@6385 | 1875 | |
brutisso@6385 | 1876 | if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { |
brutisso@6385 | 1877 | _freed_bytes += hr->used(); |
brutisso@6385 | 1878 | hr->set_containing_set(NULL); |
brutisso@6385 | 1879 | if (hr->isHumongous()) { |
brutisso@6385 | 1880 | assert(hr->startsHumongous(), "we should only see starts humongous"); |
brutisso@6385 | 1881 | _humongous_regions_removed.increment(1u, hr->capacity()); |
brutisso@6385 | 1882 | _g1->free_humongous_region(hr, _local_cleanup_list, true); |
brutisso@6385 | 1883 | } else { |
brutisso@6385 | 1884 | _old_regions_removed.increment(1u, hr->capacity()); |
brutisso@6385 | 1885 | _g1->free_region(hr, _local_cleanup_list, true); |
brutisso@6385 | 1886 | } |
brutisso@6385 | 1887 | } else { |
brutisso@6385 | 1888 | hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); |
brutisso@6385 | 1889 | } |
brutisso@6385 | 1890 | |
tonyp@3957 | 1891 | double region_time = (os::elapsedTime() - start); |
tonyp@3957 | 1892 | _claimed_region_time += region_time; |
tonyp@3957 | 1893 | if (region_time > _max_region_time) { |
tonyp@3957 | 1894 | _max_region_time = region_time; |
johnc@3292 | 1895 | } |
johnc@3292 | 1896 | return false; |
johnc@3292 | 1897 | } |
ysr@777 | 1898 | |
ysr@777 | 1899 | size_t max_live_bytes() { return _max_live_bytes; } |
tonyp@3713 | 1900 | uint regions_claimed() { return _regions_claimed; } |
ysr@777 | 1901 | double claimed_region_time_sec() { return _claimed_region_time; } |
ysr@777 | 1902 | double max_region_time_sec() { return _max_region_time; } |
ysr@777 | 1903 | }; |
ysr@777 | 1904 | |
ysr@777 | 1905 | class G1ParNoteEndTask: public AbstractGangTask { |
ysr@777 | 1906 | friend class G1NoteEndOfConcMarkClosure; |
tonyp@2472 | 1907 | |
ysr@777 | 1908 | protected: |
ysr@777 | 1909 | G1CollectedHeap* _g1h; |
ysr@777 | 1910 | size_t _max_live_bytes; |
ysr@777 | 1911 | size_t _freed_bytes; |
tonyp@2472 | 1912 | FreeRegionList* _cleanup_list; |
tonyp@2472 | 1913 | |
ysr@777 | 1914 | public: |
ysr@777 | 1915 | G1ParNoteEndTask(G1CollectedHeap* g1h, |
tonyp@2472 | 1916 | FreeRegionList* cleanup_list) : |
ysr@777 | 1917 | AbstractGangTask("G1 note end"), _g1h(g1h), |
tonyp@2472 | 1918 | _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } |
ysr@777 | 1919 | |
jmasa@3357 | 1920 | void work(uint worker_id) { |
ysr@777 | 1921 | double start = os::elapsedTime(); |
tonyp@2493 | 1922 | FreeRegionList local_cleanup_list("Local Cleanup List"); |
tonyp@2493 | 1923 | HRRSCleanupTask hrrs_cleanup_task; |
vkempik@6552 | 1924 | G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, |
tonyp@2493 | 1925 | &hrrs_cleanup_task); |
jmasa@2188 | 1926 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3357 | 1927 | _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, |
jmasa@3294 | 1928 | _g1h->workers()->active_workers(), |
tonyp@790 | 1929 | HeapRegion::NoteEndClaimValue); |
ysr@777 | 1930 | } else { |
ysr@777 | 1931 | _g1h->heap_region_iterate(&g1_note_end); |
ysr@777 | 1932 | } |
ysr@777 | 1933 | assert(g1_note_end.complete(), "Shouldn't have yielded!"); |
ysr@777 | 1934 | |
tonyp@2472 | 1935 | // Now update the lists |
brutisso@6385 | 1936 | _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); |
ysr@777 | 1937 | { |
ysr@777 | 1938 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
brutisso@6385 | 1939 | _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); |
ysr@777 | 1940 | _max_live_bytes += g1_note_end.max_live_bytes(); |
ysr@777 | 1941 | _freed_bytes += g1_note_end.freed_bytes(); |
tonyp@2472 | 1942 | |
tonyp@2975 | 1943 | // If we iterate over the global cleanup list at the end of |
tonyp@2975 | 1944 | // cleanup to do this printing we will not guarantee to only |
tonyp@2975 | 1945 | // generate output for the newly-reclaimed regions (the list |
tonyp@2975 | 1946 | // might not be empty at the beginning of cleanup; we might |
tonyp@2975 | 1947 | // still be working on its previous contents). So we do the |
tonyp@2975 | 1948 | // printing here, before we append the new regions to the global |
tonyp@2975 | 1949 | // cleanup list. |
tonyp@2975 | 1950 | |
tonyp@2975 | 1951 | G1HRPrinter* hr_printer = _g1h->hr_printer(); |
tonyp@2975 | 1952 | if (hr_printer->is_active()) { |
brutisso@6385 | 1953 | FreeRegionListIterator iter(&local_cleanup_list); |
tonyp@2975 | 1954 | while (iter.more_available()) { |
tonyp@2975 | 1955 | HeapRegion* hr = iter.get_next(); |
tonyp@2975 | 1956 | hr_printer->cleanup(hr); |
tonyp@2975 | 1957 | } |
tonyp@2975 | 1958 | } |
tonyp@2975 | 1959 | |
jwilhelm@6422 | 1960 | _cleanup_list->add_ordered(&local_cleanup_list); |
tonyp@2493 | 1961 | assert(local_cleanup_list.is_empty(), "post-condition"); |
tonyp@2493 | 1962 | |
tonyp@2493 | 1963 | HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); |
ysr@777 | 1964 | } |
ysr@777 | 1965 | } |
ysr@777 | 1966 | size_t max_live_bytes() { return _max_live_bytes; } |
ysr@777 | 1967 | size_t freed_bytes() { return _freed_bytes; } |
ysr@777 | 1968 | }; |
ysr@777 | 1969 | |
ysr@777 | 1970 | class G1ParScrubRemSetTask: public AbstractGangTask { |
ysr@777 | 1971 | protected: |
ysr@777 | 1972 | G1RemSet* _g1rs; |
ysr@777 | 1973 | BitMap* _region_bm; |
ysr@777 | 1974 | BitMap* _card_bm; |
ysr@777 | 1975 | public: |
ysr@777 | 1976 | G1ParScrubRemSetTask(G1CollectedHeap* g1h, |
ysr@777 | 1977 | BitMap* region_bm, BitMap* card_bm) : |
ysr@777 | 1978 | AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), |
johnc@3463 | 1979 | _region_bm(region_bm), _card_bm(card_bm) { } |
ysr@777 | 1980 | |
jmasa@3357 | 1981 | void work(uint worker_id) { |
jmasa@2188 | 1982 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3357 | 1983 | _g1rs->scrub_par(_region_bm, _card_bm, worker_id, |
tonyp@790 | 1984 | HeapRegion::ScrubRemSetClaimValue); |
ysr@777 | 1985 | } else { |
ysr@777 | 1986 | _g1rs->scrub(_region_bm, _card_bm); |
ysr@777 | 1987 | } |
ysr@777 | 1988 | } |
ysr@777 | 1989 | |
ysr@777 | 1990 | }; |
ysr@777 | 1991 | |
ysr@777 | 1992 | void ConcurrentMark::cleanup() { |
ysr@777 | 1993 | // world is stopped at this checkpoint |
ysr@777 | 1994 | assert(SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1995 | "world should be stopped"); |
ysr@777 | 1996 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 1997 | |
ysr@777 | 1998 | // If a full collection has happened, we shouldn't do this. |
ysr@777 | 1999 | if (has_aborted()) { |
ysr@777 | 2000 | g1h->set_marking_complete(); // So bitmap clearing isn't confused |
ysr@777 | 2001 | return; |
ysr@777 | 2002 | } |
ysr@777 | 2003 | |
tonyp@2472 | 2004 | g1h->verify_region_sets_optional(); |
tonyp@2472 | 2005 | |
ysr@1280 | 2006 | if (VerifyDuringGC) { |
ysr@1280 | 2007 | HandleMark hm; // handle scope |
ysr@1280 | 2008 | Universe::heap()->prepare_for_verify(); |
stefank@5018 | 2009 | Universe::verify(VerifyOption_G1UsePrevMarking, |
stefank@5018 | 2010 | " VerifyDuringGC:(before)"); |
ysr@1280 | 2011 | } |
brutisso@7005 | 2012 | g1h->check_bitmaps("Cleanup Start"); |
ysr@1280 | 2013 | |
ysr@777 | 2014 | G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); |
ysr@777 | 2015 | g1p->record_concurrent_mark_cleanup_start(); |
ysr@777 | 2016 | |
ysr@777 | 2017 | double start = os::elapsedTime(); |
ysr@777 | 2018 | |
tonyp@2493 | 2019 | HeapRegionRemSet::reset_for_cleanup_tasks(); |
tonyp@2493 | 2020 | |
jmasa@3357 | 2021 | uint n_workers; |
jmasa@3294 | 2022 | |
ysr@777 | 2023 | // Do counting once more with the world stopped for good measure. |
johnc@3463 | 2024 | G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); |
johnc@3463 | 2025 | |
jmasa@2188 | 2026 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 2027 | assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
tonyp@790 | 2028 | "sanity check"); |
tonyp@790 | 2029 | |
johnc@3338 | 2030 | g1h->set_par_threads(); |
johnc@3338 | 2031 | n_workers = g1h->n_par_threads(); |
jmasa@3357 | 2032 | assert(g1h->n_par_threads() == n_workers, |
johnc@3338 | 2033 | "Should not have been reset"); |
ysr@777 | 2034 | g1h->workers()->run_task(&g1_par_count_task); |
jmasa@3294 | 2035 | // Done with the parallel phase so reset to 0. |
ysr@777 | 2036 | g1h->set_par_threads(0); |
tonyp@790 | 2037 | |
johnc@3463 | 2038 | assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), |
tonyp@790 | 2039 | "sanity check"); |
ysr@777 | 2040 | } else { |
johnc@3338 | 2041 | n_workers = 1; |
ysr@777 | 2042 | g1_par_count_task.work(0); |
ysr@777 | 2043 | } |
ysr@777 | 2044 | |
johnc@3463 | 2045 | if (VerifyDuringGC) { |
johnc@3463 | 2046 | // Verify that the counting data accumulated during marking matches |
johnc@3463 | 2047 | // that calculated by walking the marking bitmap. |
johnc@3463 | 2048 | |
johnc@3463 | 2049 | // Bitmaps to hold expected values |
mgerdin@6977 | 2050 | BitMap expected_region_bm(_region_bm.size(), true); |
mgerdin@6977 | 2051 | BitMap expected_card_bm(_card_bm.size(), true); |
johnc@3463 | 2052 | |
johnc@3463 | 2053 | G1ParVerifyFinalCountTask g1_par_verify_task(g1h, |
johnc@3463 | 2054 | &_region_bm, |
johnc@3463 | 2055 | &_card_bm, |
johnc@3463 | 2056 | &expected_region_bm, |
johnc@3463 | 2057 | &expected_card_bm); |
johnc@3463 | 2058 | |
johnc@3463 | 2059 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 2060 | g1h->set_par_threads((int)n_workers); |
johnc@3463 | 2061 | g1h->workers()->run_task(&g1_par_verify_task); |
johnc@3463 | 2062 | // Done with the parallel phase so reset to 0. |
johnc@3463 | 2063 | g1h->set_par_threads(0); |
johnc@3463 | 2064 | |
johnc@3463 | 2065 | assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), |
johnc@3463 | 2066 | "sanity check"); |
johnc@3463 | 2067 | } else { |
johnc@3463 | 2068 | g1_par_verify_task.work(0); |
johnc@3463 | 2069 | } |
johnc@3463 | 2070 | |
johnc@3463 | 2071 | guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); |
johnc@3463 | 2072 | } |
johnc@3463 | 2073 | |
ysr@777 | 2074 | size_t start_used_bytes = g1h->used(); |
ysr@777 | 2075 | g1h->set_marking_complete(); |
ysr@777 | 2076 | |
ysr@777 | 2077 | double count_end = os::elapsedTime(); |
ysr@777 | 2078 | double this_final_counting_time = (count_end - start); |
ysr@777 | 2079 | _total_counting_time += this_final_counting_time; |
ysr@777 | 2080 | |
tonyp@2717 | 2081 | if (G1PrintRegionLivenessInfo) { |
tonyp@2717 | 2082 | G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); |
tonyp@2717 | 2083 | _g1h->heap_region_iterate(&cl); |
tonyp@2717 | 2084 | } |
tonyp@2717 | 2085 | |
ysr@777 | 2086 | // Install newly created mark bitMap as "prev". |
ysr@777 | 2087 | swapMarkBitMaps(); |
ysr@777 | 2088 | |
ysr@777 | 2089 | g1h->reset_gc_time_stamp(); |
ysr@777 | 2090 | |
ysr@777 | 2091 | // Note end of marking in all heap regions. |
tonyp@2472 | 2092 | G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); |
jmasa@2188 | 2093 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3294 | 2094 | g1h->set_par_threads((int)n_workers); |
ysr@777 | 2095 | g1h->workers()->run_task(&g1_par_note_end_task); |
ysr@777 | 2096 | g1h->set_par_threads(0); |
tonyp@790 | 2097 | |
tonyp@790 | 2098 | assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), |
tonyp@790 | 2099 | "sanity check"); |
ysr@777 | 2100 | } else { |
ysr@777 | 2101 | g1_par_note_end_task.work(0); |
ysr@777 | 2102 | } |
tonyp@3957 | 2103 | g1h->check_gc_time_stamps(); |
tonyp@2472 | 2104 | |
tonyp@2472 | 2105 | if (!cleanup_list_is_empty()) { |
tonyp@2472 | 2106 | // The cleanup list is not empty, so we'll have to process it |
tonyp@2472 | 2107 | // concurrently. Notify anyone else that might be wanting free |
tonyp@2472 | 2108 | // regions that there will be more free regions coming soon. |
tonyp@2472 | 2109 | g1h->set_free_regions_coming(); |
tonyp@2472 | 2110 | } |
ysr@777 | 2111 | |
ysr@777 | 2112 | // call below, since it affects the metric by which we sort the heap |
ysr@777 | 2113 | // regions. |
ysr@777 | 2114 | if (G1ScrubRemSets) { |
ysr@777 | 2115 | double rs_scrub_start = os::elapsedTime(); |
ysr@777 | 2116 | G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); |
jmasa@2188 | 2117 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jmasa@3294 | 2118 | g1h->set_par_threads((int)n_workers); |
ysr@777 | 2119 | g1h->workers()->run_task(&g1_par_scrub_rs_task); |
ysr@777 | 2120 | g1h->set_par_threads(0); |
tonyp@790 | 2121 | |
tonyp@790 | 2122 | assert(g1h->check_heap_region_claim_values( |
tonyp@790 | 2123 | HeapRegion::ScrubRemSetClaimValue), |
tonyp@790 | 2124 | "sanity check"); |
ysr@777 | 2125 | } else { |
ysr@777 | 2126 | g1_par_scrub_rs_task.work(0); |
ysr@777 | 2127 | } |
ysr@777 | 2128 | |
ysr@777 | 2129 | double rs_scrub_end = os::elapsedTime(); |
ysr@777 | 2130 | double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); |
ysr@777 | 2131 | _total_rs_scrub_time += this_rs_scrub_time; |
ysr@777 | 2132 | } |
ysr@777 | 2133 | |
ysr@777 | 2134 | // this will also free any regions totally full of garbage objects, |
ysr@777 | 2135 | // and sort the regions. |
jmasa@3294 | 2136 | g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); |
ysr@777 | 2137 | |
ysr@777 | 2138 | // Statistics. |
ysr@777 | 2139 | double end = os::elapsedTime(); |
ysr@777 | 2140 | _cleanup_times.add((end - start) * 1000.0); |
ysr@777 | 2141 | |
brutisso@3710 | 2142 | if (G1Log::fine()) { |
ysr@777 | 2143 | g1h->print_size_transition(gclog_or_tty, |
ysr@777 | 2144 | start_used_bytes, |
ysr@777 | 2145 | g1h->used(), |
ysr@777 | 2146 | g1h->capacity()); |
ysr@777 | 2147 | } |
ysr@777 | 2148 | |
johnc@3175 | 2149 | // Clean up will have freed any regions completely full of garbage. |
johnc@3175 | 2150 | // Update the soft reference policy with the new heap occupancy. |
johnc@3175 | 2151 | Universe::update_heap_info_at_gc(); |
johnc@3175 | 2152 | |
johnc@1186 | 2153 | if (VerifyDuringGC) { |
ysr@1280 | 2154 | HandleMark hm; // handle scope |
ysr@1280 | 2155 | Universe::heap()->prepare_for_verify(); |
stefank@5018 | 2156 | Universe::verify(VerifyOption_G1UsePrevMarking, |
stefank@5018 | 2157 | " VerifyDuringGC:(after)"); |
ysr@777 | 2158 | } |
brutisso@7005 | 2159 | g1h->check_bitmaps("Cleanup End"); |
tonyp@2472 | 2160 | |
tonyp@2472 | 2161 | g1h->verify_region_sets_optional(); |
stefank@6992 | 2162 | |
stefank@6992 | 2163 | // We need to make this be a "collection" so any collection pause that |
stefank@6992 | 2164 | // races with it goes around and waits for completeCleanup to finish. |
stefank@6992 | 2165 | g1h->increment_total_collections(); |
stefank@6992 | 2166 | |
stefank@6992 | 2167 | // Clean out dead classes and update Metaspace sizes. |
stefank@6996 | 2168 | if (ClassUnloadingWithConcurrentMark) { |
stefank@6996 | 2169 | ClassLoaderDataGraph::purge(); |
stefank@6996 | 2170 | } |
stefank@6992 | 2171 | MetaspaceGC::compute_new_size(); |
stefank@6992 | 2172 | |
stefank@6992 | 2173 | // We reclaimed old regions so we should calculate the sizes to make |
stefank@6992 | 2174 | // sure we update the old gen/space data. |
stefank@6992 | 2175 | g1h->g1mm()->update_sizes(); |
stefank@6992 | 2176 | |
sla@5237 | 2177 | g1h->trace_heap_after_concurrent_cycle(); |
ysr@777 | 2178 | } |
ysr@777 | 2179 | |
ysr@777 | 2180 | void ConcurrentMark::completeCleanup() { |
ysr@777 | 2181 | if (has_aborted()) return; |
ysr@777 | 2182 | |
tonyp@2472 | 2183 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2472 | 2184 | |
jwilhelm@6549 | 2185 | _cleanup_list.verify_optional(); |
tonyp@2643 | 2186 | FreeRegionList tmp_free_list("Tmp Free List"); |
tonyp@2472 | 2187 | |
tonyp@2472 | 2188 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 2189 | gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " |
tonyp@3713 | 2190 | "cleanup list has %u entries", |
tonyp@2472 | 2191 | _cleanup_list.length()); |
tonyp@2472 | 2192 | } |
tonyp@2472 | 2193 | |
tonyp@2472 | 2194 | // Noone else should be accessing the _cleanup_list at this point, |
tonyp@2472 | 2195 | // so it's not necessary to take any locks |
tonyp@2472 | 2196 | while (!_cleanup_list.is_empty()) { |
tonyp@2472 | 2197 | HeapRegion* hr = _cleanup_list.remove_head(); |
jwilhelm@6422 | 2198 | assert(hr != NULL, "Got NULL from a non-empty list"); |
tonyp@2849 | 2199 | hr->par_clear(); |
jwilhelm@6422 | 2200 | tmp_free_list.add_ordered(hr); |
tonyp@2472 | 2201 | |
tonyp@2472 | 2202 | // Instead of adding one region at a time to the secondary_free_list, |
tonyp@2472 | 2203 | // we accumulate them in the local list and move them a few at a |
tonyp@2472 | 2204 | // time. This also cuts down on the number of notify_all() calls |
tonyp@2472 | 2205 | // we do during this process. We'll also append the local list when |
tonyp@2472 | 2206 | // _cleanup_list is empty (which means we just removed the last |
tonyp@2472 | 2207 | // region from the _cleanup_list). |
tonyp@2643 | 2208 | if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || |
tonyp@2472 | 2209 | _cleanup_list.is_empty()) { |
tonyp@2472 | 2210 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 2211 | gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " |
tonyp@3713 | 2212 | "appending %u entries to the secondary_free_list, " |
tonyp@3713 | 2213 | "cleanup list still has %u entries", |
tonyp@2643 | 2214 | tmp_free_list.length(), |
tonyp@2472 | 2215 | _cleanup_list.length()); |
ysr@777 | 2216 | } |
tonyp@2472 | 2217 | |
tonyp@2472 | 2218 | { |
tonyp@2472 | 2219 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
jwilhelm@6422 | 2220 | g1h->secondary_free_list_add(&tmp_free_list); |
tonyp@2472 | 2221 | SecondaryFreeList_lock->notify_all(); |
tonyp@2472 | 2222 | } |
tonyp@2472 | 2223 | |
tonyp@2472 | 2224 | if (G1StressConcRegionFreeing) { |
tonyp@2472 | 2225 | for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { |
tonyp@2472 | 2226 | os::sleep(Thread::current(), (jlong) 1, false); |
tonyp@2472 | 2227 | } |
tonyp@2472 | 2228 | } |
ysr@777 | 2229 | } |
ysr@777 | 2230 | } |
tonyp@2643 | 2231 | assert(tmp_free_list.is_empty(), "post-condition"); |
ysr@777 | 2232 | } |
ysr@777 | 2233 | |
johnc@4555 | 2234 | // Supporting Object and Oop closures for reference discovery |
johnc@4555 | 2235 | // and processing in during marking |
johnc@2494 | 2236 | |
johnc@2379 | 2237 | bool G1CMIsAliveClosure::do_object_b(oop obj) { |
johnc@2379 | 2238 | HeapWord* addr = (HeapWord*)obj; |
johnc@2379 | 2239 | return addr != NULL && |
johnc@2379 | 2240 | (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); |
johnc@2379 | 2241 | } |
ysr@777 | 2242 | |
johnc@4555 | 2243 | // 'Keep Alive' oop closure used by both serial parallel reference processing. |
johnc@4555 | 2244 | // Uses the CMTask associated with a worker thread (for serial reference |
johnc@4555 | 2245 | // processing the CMTask for worker 0 is used) to preserve (mark) and |
johnc@4555 | 2246 | // trace referent objects. |
johnc@4555 | 2247 | // |
johnc@4555 | 2248 | // Using the CMTask and embedded local queues avoids having the worker |
johnc@4555 | 2249 | // threads operating on the global mark stack. This reduces the risk |
johnc@4555 | 2250 | // of overflowing the stack - which we would rather avoid at this late |
johnc@4555 | 2251 | // state. Also using the tasks' local queues removes the potential |
johnc@4555 | 2252 | // of the workers interfering with each other that could occur if |
johnc@4555 | 2253 | // operating on the global stack. |
johnc@4555 | 2254 | |
johnc@4555 | 2255 | class G1CMKeepAliveAndDrainClosure: public OopClosure { |
johnc@4787 | 2256 | ConcurrentMark* _cm; |
johnc@4787 | 2257 | CMTask* _task; |
johnc@4787 | 2258 | int _ref_counter_limit; |
johnc@4787 | 2259 | int _ref_counter; |
johnc@4787 | 2260 | bool _is_serial; |
johnc@2494 | 2261 | public: |
johnc@4787 | 2262 | G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : |
johnc@4787 | 2263 | _cm(cm), _task(task), _is_serial(is_serial), |
johnc@4787 | 2264 | _ref_counter_limit(G1RefProcDrainInterval) { |
johnc@2494 | 2265 | assert(_ref_counter_limit > 0, "sanity"); |
johnc@4787 | 2266 | assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); |
johnc@2494 | 2267 | _ref_counter = _ref_counter_limit; |
johnc@2494 | 2268 | } |
johnc@2494 | 2269 | |
johnc@2494 | 2270 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
johnc@2494 | 2271 | virtual void do_oop( oop* p) { do_oop_work(p); } |
johnc@2494 | 2272 | |
johnc@2494 | 2273 | template <class T> void do_oop_work(T* p) { |
johnc@2494 | 2274 | if (!_cm->has_overflown()) { |
johnc@2494 | 2275 | oop obj = oopDesc::load_decode_heap_oop(p); |
tonyp@2973 | 2276 | if (_cm->verbose_high()) { |
johnc@4173 | 2277 | gclog_or_tty->print_cr("\t[%u] we're looking at location " |
johnc@2494 | 2278 | "*"PTR_FORMAT" = "PTR_FORMAT, |
drchase@6680 | 2279 | _task->worker_id(), p2i(p), p2i((void*) obj)); |
tonyp@2973 | 2280 | } |
johnc@2494 | 2281 | |
johnc@2494 | 2282 | _task->deal_with_reference(obj); |
johnc@2494 | 2283 | _ref_counter--; |
johnc@2494 | 2284 | |
johnc@2494 | 2285 | if (_ref_counter == 0) { |
johnc@4555 | 2286 | // We have dealt with _ref_counter_limit references, pushing them |
johnc@4555 | 2287 | // and objects reachable from them on to the local stack (and |
johnc@4555 | 2288 | // possibly the global stack). Call CMTask::do_marking_step() to |
johnc@4555 | 2289 | // process these entries. |
johnc@4555 | 2290 | // |
johnc@4555 | 2291 | // We call CMTask::do_marking_step() in a loop, which we'll exit if |
johnc@4555 | 2292 | // there's nothing more to do (i.e. we're done with the entries that |
johnc@4555 | 2293 | // were pushed as a result of the CMTask::deal_with_reference() calls |
johnc@4555 | 2294 | // above) or we overflow. |
johnc@4555 | 2295 | // |
johnc@4555 | 2296 | // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() |
johnc@4555 | 2297 | // flag while there may still be some work to do. (See the comment at |
johnc@4555 | 2298 | // the beginning of CMTask::do_marking_step() for those conditions - |
johnc@4555 | 2299 | // one of which is reaching the specified time target.) It is only |
johnc@4555 | 2300 | // when CMTask::do_marking_step() returns without setting the |
johnc@4555 | 2301 | // has_aborted() flag that the marking step has completed. |
johnc@2494 | 2302 | do { |
johnc@2494 | 2303 | double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
johnc@2494 | 2304 | _task->do_marking_step(mark_step_duration_ms, |
johnc@4787 | 2305 | false /* do_termination */, |
johnc@4787 | 2306 | _is_serial); |
johnc@2494 | 2307 | } while (_task->has_aborted() && !_cm->has_overflown()); |
johnc@2494 | 2308 | _ref_counter = _ref_counter_limit; |
johnc@2494 | 2309 | } |
johnc@2494 | 2310 | } else { |
tonyp@2973 | 2311 | if (_cm->verbose_high()) { |
johnc@4173 | 2312 | gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); |
tonyp@2973 | 2313 | } |
johnc@2494 | 2314 | } |
johnc@2494 | 2315 | } |
johnc@2494 | 2316 | }; |
johnc@2494 | 2317 | |
johnc@4555 | 2318 | // 'Drain' oop closure used by both serial and parallel reference processing. |
johnc@4555 | 2319 | // Uses the CMTask associated with a given worker thread (for serial |
johnc@4555 | 2320 | // reference processing the CMtask for worker 0 is used). Calls the |
johnc@4555 | 2321 | // do_marking_step routine, with an unbelievably large timeout value, |
johnc@4555 | 2322 | // to drain the marking data structures of the remaining entries |
johnc@4555 | 2323 | // added by the 'keep alive' oop closure above. |
johnc@4555 | 2324 | |
johnc@4555 | 2325 | class G1CMDrainMarkingStackClosure: public VoidClosure { |
johnc@2494 | 2326 | ConcurrentMark* _cm; |
johnc@4555 | 2327 | CMTask* _task; |
johnc@4787 | 2328 | bool _is_serial; |
johnc@2494 | 2329 | public: |
johnc@4787 | 2330 | G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : |
johnc@4787 | 2331 | _cm(cm), _task(task), _is_serial(is_serial) { |
johnc@4787 | 2332 | assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); |
johnc@4555 | 2333 | } |
johnc@2494 | 2334 | |
johnc@2494 | 2335 | void do_void() { |
johnc@2494 | 2336 | do { |
tonyp@2973 | 2337 | if (_cm->verbose_high()) { |
johnc@4787 | 2338 | gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", |
johnc@4787 | 2339 | _task->worker_id(), BOOL_TO_STR(_is_serial)); |
tonyp@2973 | 2340 | } |
johnc@2494 | 2341 | |
johnc@4555 | 2342 | // We call CMTask::do_marking_step() to completely drain the local |
johnc@4555 | 2343 | // and global marking stacks of entries pushed by the 'keep alive' |
johnc@4555 | 2344 | // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). |
johnc@4555 | 2345 | // |
johnc@4555 | 2346 | // CMTask::do_marking_step() is called in a loop, which we'll exit |
johnc@4555 | 2347 | // if there's nothing more to do (i.e. we'completely drained the |
johnc@4555 | 2348 | // entries that were pushed as a a result of applying the 'keep alive' |
johnc@4555 | 2349 | // closure to the entries on the discovered ref lists) or we overflow |
johnc@4555 | 2350 | // the global marking stack. |
johnc@4555 | 2351 | // |
johnc@4555 | 2352 | // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() |
johnc@4555 | 2353 | // flag while there may still be some work to do. (See the comment at |
johnc@4555 | 2354 | // the beginning of CMTask::do_marking_step() for those conditions - |
johnc@4555 | 2355 | // one of which is reaching the specified time target.) It is only |
johnc@4555 | 2356 | // when CMTask::do_marking_step() returns without setting the |
johnc@4555 | 2357 | // has_aborted() flag that the marking step has completed. |
johnc@2494 | 2358 | |
johnc@2494 | 2359 | _task->do_marking_step(1000000000.0 /* something very large */, |
johnc@4787 | 2360 | true /* do_termination */, |
johnc@4787 | 2361 | _is_serial); |
johnc@2494 | 2362 | } while (_task->has_aborted() && !_cm->has_overflown()); |
johnc@2494 | 2363 | } |
johnc@2494 | 2364 | }; |
johnc@2494 | 2365 | |
johnc@3175 | 2366 | // Implementation of AbstractRefProcTaskExecutor for parallel |
johnc@3175 | 2367 | // reference processing at the end of G1 concurrent marking |
johnc@3175 | 2368 | |
johnc@3175 | 2369 | class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
johnc@2494 | 2370 | private: |
johnc@2494 | 2371 | G1CollectedHeap* _g1h; |
johnc@2494 | 2372 | ConcurrentMark* _cm; |
johnc@2494 | 2373 | WorkGang* _workers; |
johnc@2494 | 2374 | int _active_workers; |
johnc@2494 | 2375 | |
johnc@2494 | 2376 | public: |
johnc@3175 | 2377 | G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, |
johnc@2494 | 2378 | ConcurrentMark* cm, |
johnc@2494 | 2379 | WorkGang* workers, |
johnc@2494 | 2380 | int n_workers) : |
johnc@3292 | 2381 | _g1h(g1h), _cm(cm), |
johnc@3292 | 2382 | _workers(workers), _active_workers(n_workers) { } |
johnc@2494 | 2383 | |
johnc@2494 | 2384 | // Executes the given task using concurrent marking worker threads. |
johnc@2494 | 2385 | virtual void execute(ProcessTask& task); |
johnc@2494 | 2386 | virtual void execute(EnqueueTask& task); |
johnc@2494 | 2387 | }; |
johnc@2494 | 2388 | |
johnc@3175 | 2389 | class G1CMRefProcTaskProxy: public AbstractGangTask { |
johnc@2494 | 2390 | typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
johnc@2494 | 2391 | ProcessTask& _proc_task; |
johnc@2494 | 2392 | G1CollectedHeap* _g1h; |
johnc@2494 | 2393 | ConcurrentMark* _cm; |
johnc@2494 | 2394 | |
johnc@2494 | 2395 | public: |
johnc@3175 | 2396 | G1CMRefProcTaskProxy(ProcessTask& proc_task, |
johnc@2494 | 2397 | G1CollectedHeap* g1h, |
johnc@3292 | 2398 | ConcurrentMark* cm) : |
johnc@2494 | 2399 | AbstractGangTask("Process reference objects in parallel"), |
johnc@4555 | 2400 | _proc_task(proc_task), _g1h(g1h), _cm(cm) { |
johnc@4787 | 2401 | ReferenceProcessor* rp = _g1h->ref_processor_cm(); |
johnc@4787 | 2402 | assert(rp->processing_is_mt(), "shouldn't be here otherwise"); |
johnc@4787 | 2403 | } |
johnc@2494 | 2404 | |
jmasa@3357 | 2405 | virtual void work(uint worker_id) { |
mdoerr@7011 | 2406 | ResourceMark rm; |
mdoerr@7011 | 2407 | HandleMark hm; |
johnc@4787 | 2408 | CMTask* task = _cm->task(worker_id); |
johnc@2494 | 2409 | G1CMIsAliveClosure g1_is_alive(_g1h); |
johnc@4787 | 2410 | G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); |
johnc@4787 | 2411 | G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); |
johnc@2494 | 2412 | |
jmasa@3357 | 2413 | _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); |
johnc@2494 | 2414 | } |
johnc@2494 | 2415 | }; |
johnc@2494 | 2416 | |
johnc@3175 | 2417 | void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { |
johnc@2494 | 2418 | assert(_workers != NULL, "Need parallel worker threads."); |
johnc@4555 | 2419 | assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); |
johnc@2494 | 2420 | |
johnc@3292 | 2421 | G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); |
johnc@2494 | 2422 | |
johnc@4788 | 2423 | // We need to reset the concurrency level before each |
johnc@4788 | 2424 | // proxy task execution, so that the termination protocol |
johnc@4788 | 2425 | // and overflow handling in CMTask::do_marking_step() knows |
johnc@4788 | 2426 | // how many workers to wait for. |
johnc@4788 | 2427 | _cm->set_concurrency(_active_workers); |
johnc@2494 | 2428 | _g1h->set_par_threads(_active_workers); |
johnc@2494 | 2429 | _workers->run_task(&proc_task_proxy); |
johnc@2494 | 2430 | _g1h->set_par_threads(0); |
johnc@2494 | 2431 | } |
johnc@2494 | 2432 | |
johnc@3175 | 2433 | class G1CMRefEnqueueTaskProxy: public AbstractGangTask { |
johnc@2494 | 2434 | typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
johnc@2494 | 2435 | EnqueueTask& _enq_task; |
johnc@2494 | 2436 | |
johnc@2494 | 2437 | public: |
johnc@3175 | 2438 | G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : |
johnc@2494 | 2439 | AbstractGangTask("Enqueue reference objects in parallel"), |
johnc@3292 | 2440 | _enq_task(enq_task) { } |
johnc@2494 | 2441 | |
jmasa@3357 | 2442 | virtual void work(uint worker_id) { |
jmasa@3357 | 2443 | _enq_task.work(worker_id); |
johnc@2494 | 2444 | } |
johnc@2494 | 2445 | }; |
johnc@2494 | 2446 | |
johnc@3175 | 2447 | void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
johnc@2494 | 2448 | assert(_workers != NULL, "Need parallel worker threads."); |
johnc@4555 | 2449 | assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); |
johnc@2494 | 2450 | |
johnc@3175 | 2451 | G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); |
johnc@2494 | 2452 | |
johnc@4788 | 2453 | // Not strictly necessary but... |
johnc@4788 | 2454 | // |
johnc@4788 | 2455 | // We need to reset the concurrency level before each |
johnc@4788 | 2456 | // proxy task execution, so that the termination protocol |
johnc@4788 | 2457 | // and overflow handling in CMTask::do_marking_step() knows |
johnc@4788 | 2458 | // how many workers to wait for. |
johnc@4788 | 2459 | _cm->set_concurrency(_active_workers); |
johnc@2494 | 2460 | _g1h->set_par_threads(_active_workers); |
johnc@2494 | 2461 | _workers->run_task(&enq_task_proxy); |
johnc@2494 | 2462 | _g1h->set_par_threads(0); |
johnc@2494 | 2463 | } |
johnc@2494 | 2464 | |
stefank@6992 | 2465 | void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { |
stefank@6992 | 2466 | G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); |
stefank@6992 | 2467 | } |
stefank@6992 | 2468 | |
stefank@6992 | 2469 | // Helper class to get rid of some boilerplate code. |
stefank@6992 | 2470 | class G1RemarkGCTraceTime : public GCTraceTime { |
stefank@6992 | 2471 | static bool doit_and_prepend(bool doit) { |
stefank@6992 | 2472 | if (doit) { |
stefank@6992 | 2473 | gclog_or_tty->put(' '); |
stefank@6992 | 2474 | } |
stefank@6992 | 2475 | return doit; |
stefank@6992 | 2476 | } |
stefank@6992 | 2477 | |
stefank@6992 | 2478 | public: |
stefank@6992 | 2479 | G1RemarkGCTraceTime(const char* title, bool doit) |
stefank@6992 | 2480 | : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), |
stefank@6992 | 2481 | G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { |
stefank@6992 | 2482 | } |
stefank@6992 | 2483 | }; |
stefank@6992 | 2484 | |
ysr@777 | 2485 | void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { |
johnc@4788 | 2486 | if (has_overflown()) { |
johnc@4788 | 2487 | // Skip processing the discovered references if we have |
johnc@4788 | 2488 | // overflown the global marking stack. Reference objects |
johnc@4788 | 2489 | // only get discovered once so it is OK to not |
johnc@4788 | 2490 | // de-populate the discovered reference lists. We could have, |
johnc@4788 | 2491 | // but the only benefit would be that, when marking restarts, |
johnc@4788 | 2492 | // less reference objects are discovered. |
johnc@4788 | 2493 | return; |
johnc@4788 | 2494 | } |
johnc@4788 | 2495 | |
ysr@777 | 2496 | ResourceMark rm; |
ysr@777 | 2497 | HandleMark hm; |
johnc@3171 | 2498 | |
johnc@3171 | 2499 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
johnc@3171 | 2500 | |
johnc@3171 | 2501 | // Is alive closure. |
johnc@3171 | 2502 | G1CMIsAliveClosure g1_is_alive(g1h); |
johnc@3171 | 2503 | |
johnc@3171 | 2504 | // Inner scope to exclude the cleaning of the string and symbol |
johnc@3171 | 2505 | // tables from the displayed time. |
johnc@3171 | 2506 | { |
brutisso@3710 | 2507 | if (G1Log::finer()) { |
johnc@3171 | 2508 | gclog_or_tty->put(' '); |
johnc@3171 | 2509 | } |
brutisso@6904 | 2510 | GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id()); |
johnc@3171 | 2511 | |
johnc@3175 | 2512 | ReferenceProcessor* rp = g1h->ref_processor_cm(); |
johnc@3171 | 2513 | |
johnc@3171 | 2514 | // See the comment in G1CollectedHeap::ref_processing_init() |
johnc@3171 | 2515 | // about how reference processing currently works in G1. |
johnc@3171 | 2516 | |
johnc@4555 | 2517 | // Set the soft reference policy |
johnc@3171 | 2518 | rp->setup_policy(clear_all_soft_refs); |
johnc@3171 | 2519 | assert(_markStack.isEmpty(), "mark stack should be empty"); |
johnc@3171 | 2520 | |
johnc@4787 | 2521 | // Instances of the 'Keep Alive' and 'Complete GC' closures used |
johnc@4787 | 2522 | // in serial reference processing. Note these closures are also |
johnc@4787 | 2523 | // used for serially processing (by the the current thread) the |
johnc@4787 | 2524 | // JNI references during parallel reference processing. |
johnc@4787 | 2525 | // |
johnc@4787 | 2526 | // These closures do not need to synchronize with the worker |
johnc@4787 | 2527 | // threads involved in parallel reference processing as these |
johnc@4787 | 2528 | // instances are executed serially by the current thread (e.g. |
johnc@4787 | 2529 | // reference processing is not multi-threaded and is thus |
johnc@4787 | 2530 | // performed by the current thread instead of a gang worker). |
johnc@4787 | 2531 | // |
johnc@4787 | 2532 | // The gang tasks involved in parallel reference procssing create |
johnc@4787 | 2533 | // their own instances of these closures, which do their own |
johnc@4787 | 2534 | // synchronization among themselves. |
johnc@4787 | 2535 | G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); |
johnc@4787 | 2536 | G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); |
johnc@4787 | 2537 | |
johnc@4787 | 2538 | // We need at least one active thread. If reference processing |
johnc@4787 | 2539 | // is not multi-threaded we use the current (VMThread) thread, |
johnc@4787 | 2540 | // otherwise we use the work gang from the G1CollectedHeap and |
johnc@4787 | 2541 | // we utilize all the worker threads we can. |
johnc@4787 | 2542 | bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; |
johnc@4787 | 2543 | uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); |
johnc@4173 | 2544 | active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); |
johnc@3171 | 2545 | |
johnc@4787 | 2546 | // Parallel processing task executor. |
johnc@3292 | 2547 | G1CMRefProcTaskExecutor par_task_executor(g1h, this, |
johnc@3175 | 2548 | g1h->workers(), active_workers); |
johnc@4787 | 2549 | AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); |
johnc@4555 | 2550 | |
johnc@4788 | 2551 | // Set the concurrency level. The phase was already set prior to |
johnc@4788 | 2552 | // executing the remark task. |
johnc@4788 | 2553 | set_concurrency(active_workers); |
johnc@4788 | 2554 | |
johnc@4555 | 2555 | // Set the degree of MT processing here. If the discovery was done MT, |
johnc@4555 | 2556 | // the number of threads involved during discovery could differ from |
johnc@4555 | 2557 | // the number of active workers. This is OK as long as the discovered |
johnc@4555 | 2558 | // Reference lists are balanced (see balance_all_queues() and balance_queues()). |
johnc@4555 | 2559 | rp->set_active_mt_degree(active_workers); |
johnc@4555 | 2560 | |
johnc@4555 | 2561 | // Process the weak references. |
sla@5237 | 2562 | const ReferenceProcessorStats& stats = |
sla@5237 | 2563 | rp->process_discovered_references(&g1_is_alive, |
sla@5237 | 2564 | &g1_keep_alive, |
sla@5237 | 2565 | &g1_drain_mark_stack, |
sla@5237 | 2566 | executor, |
brutisso@6904 | 2567 | g1h->gc_timer_cm(), |
brutisso@6904 | 2568 | concurrent_gc_id()); |
sla@5237 | 2569 | g1h->gc_tracer_cm()->report_gc_reference_stats(stats); |
johnc@4555 | 2570 | |
johnc@4555 | 2571 | // The do_oop work routines of the keep_alive and drain_marking_stack |
johnc@4555 | 2572 | // oop closures will set the has_overflown flag if we overflow the |
johnc@4555 | 2573 | // global marking stack. |
johnc@3171 | 2574 | |
johnc@3171 | 2575 | assert(_markStack.overflow() || _markStack.isEmpty(), |
johnc@3171 | 2576 | "mark stack should be empty (unless it overflowed)"); |
johnc@4787 | 2577 | |
johnc@3171 | 2578 | if (_markStack.overflow()) { |
johnc@4555 | 2579 | // This should have been done already when we tried to push an |
johnc@3171 | 2580 | // entry on to the global mark stack. But let's do it again. |
johnc@3171 | 2581 | set_has_overflown(); |
johnc@3171 | 2582 | } |
johnc@3171 | 2583 | |
johnc@4555 | 2584 | assert(rp->num_q() == active_workers, "why not"); |
johnc@4555 | 2585 | |
johnc@4555 | 2586 | rp->enqueue_discovered_references(executor); |
johnc@3171 | 2587 | |
johnc@3171 | 2588 | rp->verify_no_references_recorded(); |
johnc@3175 | 2589 | assert(!rp->discovery_enabled(), "Post condition"); |
johnc@2494 | 2590 | } |
johnc@2494 | 2591 | |
pliden@6399 | 2592 | if (has_overflown()) { |
pliden@6399 | 2593 | // We can not trust g1_is_alive if the marking stack overflowed |
pliden@6399 | 2594 | return; |
pliden@6399 | 2595 | } |
pliden@6399 | 2596 | |
stefank@6992 | 2597 | assert(_markStack.isEmpty(), "Marking should have completed"); |
stefank@6992 | 2598 | |
stefank@6992 | 2599 | // Unload Klasses, String, Symbols, Code Cache, etc. |
stefank@6992 | 2600 | { |
stefank@6996 | 2601 | G1RemarkGCTraceTime trace("Unloading", G1Log::finer()); |
stefank@6996 | 2602 | |
stefank@6996 | 2603 | if (ClassUnloadingWithConcurrentMark) { |
stefank@6996 | 2604 | bool purged_classes; |
stefank@6996 | 2605 | |
stefank@6996 | 2606 | { |
stefank@6996 | 2607 | G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest()); |
stefank@6996 | 2608 | purged_classes = SystemDictionary::do_unloading(&g1_is_alive); |
stefank@6996 | 2609 | } |
stefank@6996 | 2610 | |
stefank@6996 | 2611 | { |
stefank@6996 | 2612 | G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest()); |
stefank@6996 | 2613 | weakRefsWorkParallelPart(&g1_is_alive, purged_classes); |
stefank@6996 | 2614 | } |
stefank@6996 | 2615 | } |
stefank@6996 | 2616 | |
stefank@6996 | 2617 | if (G1StringDedup::is_enabled()) { |
stefank@6996 | 2618 | G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest()); |
stefank@6996 | 2619 | G1StringDedup::unlink(&g1_is_alive); |
stefank@6996 | 2620 | } |
stefank@6992 | 2621 | } |
ysr@777 | 2622 | } |
ysr@777 | 2623 | |
ysr@777 | 2624 | void ConcurrentMark::swapMarkBitMaps() { |
ysr@777 | 2625 | CMBitMapRO* temp = _prevMarkBitMap; |
ysr@777 | 2626 | _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; |
ysr@777 | 2627 | _nextMarkBitMap = (CMBitMap*) temp; |
ysr@777 | 2628 | } |
ysr@777 | 2629 | |
stefank@6992 | 2630 | class CMObjectClosure; |
stefank@6992 | 2631 | |
stefank@6992 | 2632 | // Closure for iterating over objects, currently only used for |
stefank@6992 | 2633 | // processing SATB buffers. |
stefank@6992 | 2634 | class CMObjectClosure : public ObjectClosure { |
stefank@6992 | 2635 | private: |
stefank@6992 | 2636 | CMTask* _task; |
stefank@6992 | 2637 | |
stefank@6992 | 2638 | public: |
stefank@6992 | 2639 | void do_object(oop obj) { |
stefank@6992 | 2640 | _task->deal_with_reference(obj); |
stefank@6992 | 2641 | } |
stefank@6992 | 2642 | |
stefank@6992 | 2643 | CMObjectClosure(CMTask* task) : _task(task) { } |
stefank@6992 | 2644 | }; |
stefank@6992 | 2645 | |
stefank@6992 | 2646 | class G1RemarkThreadsClosure : public ThreadClosure { |
stefank@6992 | 2647 | CMObjectClosure _cm_obj; |
stefank@6992 | 2648 | G1CMOopClosure _cm_cl; |
stefank@6992 | 2649 | MarkingCodeBlobClosure _code_cl; |
stefank@6992 | 2650 | int _thread_parity; |
stefank@6992 | 2651 | bool _is_par; |
stefank@6992 | 2652 | |
stefank@6992 | 2653 | public: |
stefank@6992 | 2654 | G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) : |
stefank@6992 | 2655 | _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), |
stefank@6992 | 2656 | _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {} |
stefank@6992 | 2657 | |
stefank@6992 | 2658 | void do_thread(Thread* thread) { |
stefank@6992 | 2659 | if (thread->is_Java_thread()) { |
stefank@6992 | 2660 | if (thread->claim_oops_do(_is_par, _thread_parity)) { |
stefank@6992 | 2661 | JavaThread* jt = (JavaThread*)thread; |
stefank@6992 | 2662 | |
stefank@6992 | 2663 | // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking |
stefank@6992 | 2664 | // however the liveness of oops reachable from nmethods have very complex lifecycles: |
stefank@6992 | 2665 | // * Alive if on the stack of an executing method |
stefank@6992 | 2666 | // * Weakly reachable otherwise |
stefank@6992 | 2667 | // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be |
stefank@6992 | 2668 | // live by the SATB invariant but other oops recorded in nmethods may behave differently. |
stefank@6992 | 2669 | jt->nmethods_do(&_code_cl); |
stefank@6992 | 2670 | |
stefank@6992 | 2671 | jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); |
stefank@6992 | 2672 | } |
stefank@6992 | 2673 | } else if (thread->is_VM_thread()) { |
stefank@6992 | 2674 | if (thread->claim_oops_do(_is_par, _thread_parity)) { |
stefank@6992 | 2675 | JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); |
stefank@6992 | 2676 | } |
stefank@6992 | 2677 | } |
stefank@6992 | 2678 | } |
stefank@6992 | 2679 | }; |
stefank@6992 | 2680 | |
ysr@777 | 2681 | class CMRemarkTask: public AbstractGangTask { |
ysr@777 | 2682 | private: |
johnc@4787 | 2683 | ConcurrentMark* _cm; |
johnc@4787 | 2684 | bool _is_serial; |
ysr@777 | 2685 | public: |
jmasa@3357 | 2686 | void work(uint worker_id) { |
ysr@777 | 2687 | // Since all available tasks are actually started, we should |
ysr@777 | 2688 | // only proceed if we're supposed to be actived. |
jmasa@3357 | 2689 | if (worker_id < _cm->active_tasks()) { |
jmasa@3357 | 2690 | CMTask* task = _cm->task(worker_id); |
ysr@777 | 2691 | task->record_start_time(); |
stefank@6992 | 2692 | { |
stefank@6992 | 2693 | ResourceMark rm; |
stefank@6992 | 2694 | HandleMark hm; |
stefank@6992 | 2695 | |
stefank@6992 | 2696 | G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial); |
stefank@6992 | 2697 | Threads::threads_do(&threads_f); |
stefank@6992 | 2698 | } |
stefank@6992 | 2699 | |
ysr@777 | 2700 | do { |
johnc@2494 | 2701 | task->do_marking_step(1000000000.0 /* something very large */, |
johnc@4787 | 2702 | true /* do_termination */, |
johnc@4787 | 2703 | _is_serial); |
ysr@777 | 2704 | } while (task->has_aborted() && !_cm->has_overflown()); |
ysr@777 | 2705 | // If we overflow, then we do not want to restart. We instead |
ysr@777 | 2706 | // want to abort remark and do concurrent marking again. |
ysr@777 | 2707 | task->record_end_time(); |
ysr@777 | 2708 | } |
ysr@777 | 2709 | } |
ysr@777 | 2710 | |
johnc@4787 | 2711 | CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : |
johnc@4787 | 2712 | AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { |
johnc@3338 | 2713 | _cm->terminator()->reset_for_reuse(active_workers); |
jmasa@3294 | 2714 | } |
ysr@777 | 2715 | }; |
ysr@777 | 2716 | |
ysr@777 | 2717 | void ConcurrentMark::checkpointRootsFinalWork() { |
ysr@777 | 2718 | ResourceMark rm; |
ysr@777 | 2719 | HandleMark hm; |
ysr@777 | 2720 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 2721 | |
stefank@6992 | 2722 | G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer()); |
stefank@6992 | 2723 | |
ysr@777 | 2724 | g1h->ensure_parsability(false); |
ysr@777 | 2725 | |
jmasa@2188 | 2726 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jrose@1424 | 2727 | G1CollectedHeap::StrongRootsScope srs(g1h); |
jmasa@3294 | 2728 | // this is remark, so we'll use up all active threads |
jmasa@3357 | 2729 | uint active_workers = g1h->workers()->active_workers(); |
jmasa@3294 | 2730 | if (active_workers == 0) { |
jmasa@3294 | 2731 | assert(active_workers > 0, "Should have been set earlier"); |
jmasa@3357 | 2732 | active_workers = (uint) ParallelGCThreads; |
jmasa@3294 | 2733 | g1h->workers()->set_active_workers(active_workers); |
jmasa@3294 | 2734 | } |
johnc@4788 | 2735 | set_concurrency_and_phase(active_workers, false /* concurrent */); |
jmasa@3294 | 2736 | // Leave _parallel_marking_threads at it's |
jmasa@3294 | 2737 | // value originally calculated in the ConcurrentMark |
jmasa@3294 | 2738 | // constructor and pass values of the active workers |
jmasa@3294 | 2739 | // through the gang in the task. |
ysr@777 | 2740 | |
johnc@4787 | 2741 | CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); |
johnc@4787 | 2742 | // We will start all available threads, even if we decide that the |
johnc@4787 | 2743 | // active_workers will be fewer. The extra ones will just bail out |
johnc@4787 | 2744 | // immediately. |
jmasa@3294 | 2745 | g1h->set_par_threads(active_workers); |
ysr@777 | 2746 | g1h->workers()->run_task(&remarkTask); |
ysr@777 | 2747 | g1h->set_par_threads(0); |
ysr@777 | 2748 | } else { |
jrose@1424 | 2749 | G1CollectedHeap::StrongRootsScope srs(g1h); |
jmasa@3357 | 2750 | uint active_workers = 1; |
johnc@4788 | 2751 | set_concurrency_and_phase(active_workers, false /* concurrent */); |
ysr@777 | 2752 | |
johnc@4787 | 2753 | // Note - if there's no work gang then the VMThread will be |
johnc@4787 | 2754 | // the thread to execute the remark - serially. We have |
johnc@4787 | 2755 | // to pass true for the is_serial parameter so that |
johnc@4787 | 2756 | // CMTask::do_marking_step() doesn't enter the sync |
johnc@4787 | 2757 | // barriers in the event of an overflow. Doing so will |
johnc@4787 | 2758 | // cause an assert that the current thread is not a |
johnc@4787 | 2759 | // concurrent GC thread. |
johnc@4787 | 2760 | CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); |
ysr@777 | 2761 | remarkTask.work(0); |
ysr@777 | 2762 | } |
tonyp@1458 | 2763 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
johnc@4789 | 2764 | guarantee(has_overflown() || |
johnc@4789 | 2765 | satb_mq_set.completed_buffers_num() == 0, |
johnc@4789 | 2766 | err_msg("Invariant: has_overflown = %s, num buffers = %d", |
johnc@4789 | 2767 | BOOL_TO_STR(has_overflown()), |
johnc@4789 | 2768 | satb_mq_set.completed_buffers_num())); |
ysr@777 | 2769 | |
ysr@777 | 2770 | print_stats(); |
ysr@777 | 2771 | } |
ysr@777 | 2772 | |
tonyp@1479 | 2773 | #ifndef PRODUCT |
tonyp@1479 | 2774 | |
tonyp@1823 | 2775 | class PrintReachableOopClosure: public OopClosure { |
ysr@777 | 2776 | private: |
ysr@777 | 2777 | G1CollectedHeap* _g1h; |
ysr@777 | 2778 | outputStream* _out; |
johnc@2969 | 2779 | VerifyOption _vo; |
tonyp@1823 | 2780 | bool _all; |
ysr@777 | 2781 | |
ysr@777 | 2782 | public: |
johnc@2969 | 2783 | PrintReachableOopClosure(outputStream* out, |
johnc@2969 | 2784 | VerifyOption vo, |
tonyp@1823 | 2785 | bool all) : |
tonyp@1479 | 2786 | _g1h(G1CollectedHeap::heap()), |
johnc@2969 | 2787 | _out(out), _vo(vo), _all(all) { } |
ysr@777 | 2788 | |
ysr@1280 | 2789 | void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 2790 | void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 2791 | |
ysr@1280 | 2792 | template <class T> void do_oop_work(T* p) { |
ysr@1280 | 2793 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@777 | 2794 | const char* str = NULL; |
ysr@777 | 2795 | const char* str2 = ""; |
ysr@777 | 2796 | |
tonyp@1823 | 2797 | if (obj == NULL) { |
tonyp@1823 | 2798 | str = ""; |
tonyp@1823 | 2799 | } else if (!_g1h->is_in_g1_reserved(obj)) { |
tonyp@1823 | 2800 | str = " O"; |
tonyp@1823 | 2801 | } else { |
ysr@777 | 2802 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
tonyp@1458 | 2803 | guarantee(hr != NULL, "invariant"); |
tonyp@3957 | 2804 | bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); |
tonyp@3957 | 2805 | bool marked = _g1h->is_marked(obj, _vo); |
tonyp@1479 | 2806 | |
tonyp@1479 | 2807 | if (over_tams) { |
tonyp@1823 | 2808 | str = " >"; |
tonyp@1823 | 2809 | if (marked) { |
ysr@777 | 2810 | str2 = " AND MARKED"; |
tonyp@1479 | 2811 | } |
tonyp@1823 | 2812 | } else if (marked) { |
tonyp@1823 | 2813 | str = " M"; |
tonyp@1479 | 2814 | } else { |
tonyp@1823 | 2815 | str = " NOT"; |
tonyp@1479 | 2816 | } |
ysr@777 | 2817 | } |
ysr@777 | 2818 | |
tonyp@1823 | 2819 | _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", |
drchase@6680 | 2820 | p2i(p), p2i((void*) obj), str, str2); |
ysr@777 | 2821 | } |
ysr@777 | 2822 | }; |
ysr@777 | 2823 | |
tonyp@1823 | 2824 | class PrintReachableObjectClosure : public ObjectClosure { |
ysr@777 | 2825 | private: |
johnc@2969 | 2826 | G1CollectedHeap* _g1h; |
johnc@2969 | 2827 | outputStream* _out; |
johnc@2969 | 2828 | VerifyOption _vo; |
johnc@2969 | 2829 | bool _all; |
johnc@2969 | 2830 | HeapRegion* _hr; |
ysr@777 | 2831 | |
ysr@777 | 2832 | public: |
johnc@2969 | 2833 | PrintReachableObjectClosure(outputStream* out, |
johnc@2969 | 2834 | VerifyOption vo, |
tonyp@1823 | 2835 | bool all, |
tonyp@1823 | 2836 | HeapRegion* hr) : |
johnc@2969 | 2837 | _g1h(G1CollectedHeap::heap()), |
johnc@2969 | 2838 | _out(out), _vo(vo), _all(all), _hr(hr) { } |
tonyp@1823 | 2839 | |
tonyp@1823 | 2840 | void do_object(oop o) { |
tonyp@3957 | 2841 | bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); |
tonyp@3957 | 2842 | bool marked = _g1h->is_marked(o, _vo); |
tonyp@1823 | 2843 | bool print_it = _all || over_tams || marked; |
tonyp@1823 | 2844 | |
tonyp@1823 | 2845 | if (print_it) { |
tonyp@1823 | 2846 | _out->print_cr(" "PTR_FORMAT"%s", |
drchase@6680 | 2847 | p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); |
johnc@2969 | 2848 | PrintReachableOopClosure oopCl(_out, _vo, _all); |
coleenp@4037 | 2849 | o->oop_iterate_no_header(&oopCl); |
tonyp@1823 | 2850 | } |
ysr@777 | 2851 | } |
ysr@777 | 2852 | }; |
ysr@777 | 2853 | |
tonyp@1823 | 2854 | class PrintReachableRegionClosure : public HeapRegionClosure { |
ysr@777 | 2855 | private: |
tonyp@3957 | 2856 | G1CollectedHeap* _g1h; |
tonyp@3957 | 2857 | outputStream* _out; |
tonyp@3957 | 2858 | VerifyOption _vo; |
tonyp@3957 | 2859 | bool _all; |
ysr@777 | 2860 | |
ysr@777 | 2861 | public: |
ysr@777 | 2862 | bool doHeapRegion(HeapRegion* hr) { |
ysr@777 | 2863 | HeapWord* b = hr->bottom(); |
ysr@777 | 2864 | HeapWord* e = hr->end(); |
ysr@777 | 2865 | HeapWord* t = hr->top(); |
tonyp@3957 | 2866 | HeapWord* p = _g1h->top_at_mark_start(hr, _vo); |
ysr@777 | 2867 | _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " |
drchase@6680 | 2868 | "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); |
tonyp@1823 | 2869 | _out->cr(); |
tonyp@1823 | 2870 | |
tonyp@1823 | 2871 | HeapWord* from = b; |
tonyp@1823 | 2872 | HeapWord* to = t; |
tonyp@1823 | 2873 | |
tonyp@1823 | 2874 | if (to > from) { |
drchase@6680 | 2875 | _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); |
tonyp@1823 | 2876 | _out->cr(); |
johnc@2969 | 2877 | PrintReachableObjectClosure ocl(_out, _vo, _all, hr); |
tonyp@1823 | 2878 | hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); |
tonyp@1823 | 2879 | _out->cr(); |
tonyp@1823 | 2880 | } |
ysr@777 | 2881 | |
ysr@777 | 2882 | return false; |
ysr@777 | 2883 | } |
ysr@777 | 2884 | |
johnc@2969 | 2885 | PrintReachableRegionClosure(outputStream* out, |
johnc@2969 | 2886 | VerifyOption vo, |
tonyp@1823 | 2887 | bool all) : |
tonyp@3957 | 2888 | _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } |
ysr@777 | 2889 | }; |
ysr@777 | 2890 | |
tonyp@1823 | 2891 | void ConcurrentMark::print_reachable(const char* str, |
johnc@2969 | 2892 | VerifyOption vo, |
tonyp@1823 | 2893 | bool all) { |
tonyp@1823 | 2894 | gclog_or_tty->cr(); |
tonyp@1823 | 2895 | gclog_or_tty->print_cr("== Doing heap dump... "); |
tonyp@1479 | 2896 | |
tonyp@1479 | 2897 | if (G1PrintReachableBaseFile == NULL) { |
tonyp@1479 | 2898 | gclog_or_tty->print_cr(" #### error: no base file defined"); |
tonyp@1479 | 2899 | return; |
tonyp@1479 | 2900 | } |
tonyp@1479 | 2901 | |
tonyp@1479 | 2902 | if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > |
tonyp@1479 | 2903 | (JVM_MAXPATHLEN - 1)) { |
tonyp@1479 | 2904 | gclog_or_tty->print_cr(" #### error: file name too long"); |
tonyp@1479 | 2905 | return; |
tonyp@1479 | 2906 | } |
tonyp@1479 | 2907 | |
tonyp@1479 | 2908 | char file_name[JVM_MAXPATHLEN]; |
tonyp@1479 | 2909 | sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); |
tonyp@1479 | 2910 | gclog_or_tty->print_cr(" dumping to file %s", file_name); |
tonyp@1479 | 2911 | |
tonyp@1479 | 2912 | fileStream fout(file_name); |
tonyp@1479 | 2913 | if (!fout.is_open()) { |
tonyp@1479 | 2914 | gclog_or_tty->print_cr(" #### error: could not open file"); |
tonyp@1479 | 2915 | return; |
tonyp@1479 | 2916 | } |
tonyp@1479 | 2917 | |
tonyp@1479 | 2918 | outputStream* out = &fout; |
tonyp@3957 | 2919 | out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); |
tonyp@1479 | 2920 | out->cr(); |
tonyp@1479 | 2921 | |
tonyp@1823 | 2922 | out->print_cr("--- ITERATING OVER REGIONS"); |
tonyp@1479 | 2923 | out->cr(); |
johnc@2969 | 2924 | PrintReachableRegionClosure rcl(out, vo, all); |
ysr@777 | 2925 | _g1h->heap_region_iterate(&rcl); |
tonyp@1479 | 2926 | out->cr(); |
tonyp@1479 | 2927 | |
tonyp@1479 | 2928 | gclog_or_tty->print_cr(" done"); |
tonyp@1823 | 2929 | gclog_or_tty->flush(); |
ysr@777 | 2930 | } |
ysr@777 | 2931 | |
tonyp@1479 | 2932 | #endif // PRODUCT |
tonyp@1479 | 2933 | |
tonyp@3416 | 2934 | void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { |
ysr@777 | 2935 | // Note we are overriding the read-only view of the prev map here, via |
ysr@777 | 2936 | // the cast. |
ysr@777 | 2937 | ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); |
tonyp@3416 | 2938 | } |
tonyp@3416 | 2939 | |
tonyp@3416 | 2940 | void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { |
ysr@777 | 2941 | _nextMarkBitMap->clearRange(mr); |
ysr@777 | 2942 | } |
ysr@777 | 2943 | |
tonyp@3416 | 2944 | void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { |
tonyp@3416 | 2945 | clearRangePrevBitmap(mr); |
tonyp@3416 | 2946 | clearRangeNextBitmap(mr); |
tonyp@3416 | 2947 | } |
tonyp@3416 | 2948 | |
ysr@777 | 2949 | HeapRegion* |
johnc@4173 | 2950 | ConcurrentMark::claim_region(uint worker_id) { |
ysr@777 | 2951 | // "checkpoint" the finger |
ysr@777 | 2952 | HeapWord* finger = _finger; |
ysr@777 | 2953 | |
ysr@777 | 2954 | // _heap_end will not change underneath our feet; it only changes at |
ysr@777 | 2955 | // yield points. |
ysr@777 | 2956 | while (finger < _heap_end) { |
tonyp@1458 | 2957 | assert(_g1h->is_in_g1_reserved(finger), "invariant"); |
ysr@777 | 2958 | |
tonyp@2968 | 2959 | // Note on how this code handles humongous regions. In the |
tonyp@2968 | 2960 | // normal case the finger will reach the start of a "starts |
tonyp@2968 | 2961 | // humongous" (SH) region. Its end will either be the end of the |
tonyp@2968 | 2962 | // last "continues humongous" (CH) region in the sequence, or the |
tonyp@2968 | 2963 | // standard end of the SH region (if the SH is the only region in |
tonyp@2968 | 2964 | // the sequence). That way claim_region() will skip over the CH |
tonyp@2968 | 2965 | // regions. However, there is a subtle race between a CM thread |
tonyp@2968 | 2966 | // executing this method and a mutator thread doing a humongous |
tonyp@2968 | 2967 | // object allocation. The two are not mutually exclusive as the CM |
tonyp@2968 | 2968 | // thread does not need to hold the Heap_lock when it gets |
tonyp@2968 | 2969 | // here. So there is a chance that claim_region() will come across |
tonyp@2968 | 2970 | // a free region that's in the progress of becoming a SH or a CH |
tonyp@2968 | 2971 | // region. In the former case, it will either |
tonyp@2968 | 2972 | // a) Miss the update to the region's end, in which case it will |
tonyp@2968 | 2973 | // visit every subsequent CH region, will find their bitmaps |
tonyp@2968 | 2974 | // empty, and do nothing, or |
tonyp@2968 | 2975 | // b) Will observe the update of the region's end (in which case |
tonyp@2968 | 2976 | // it will skip the subsequent CH regions). |
tonyp@2968 | 2977 | // If it comes across a region that suddenly becomes CH, the |
tonyp@2968 | 2978 | // scenario will be similar to b). So, the race between |
tonyp@2968 | 2979 | // claim_region() and a humongous object allocation might force us |
tonyp@2968 | 2980 | // to do a bit of unnecessary work (due to some unnecessary bitmap |
tonyp@2968 | 2981 | // iterations) but it should not introduce and correctness issues. |
tonyp@2968 | 2982 | HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); |
ysr@777 | 2983 | HeapWord* bottom = curr_region->bottom(); |
ysr@777 | 2984 | HeapWord* end = curr_region->end(); |
ysr@777 | 2985 | HeapWord* limit = curr_region->next_top_at_mark_start(); |
ysr@777 | 2986 | |
tonyp@2968 | 2987 | if (verbose_low()) { |
johnc@4173 | 2988 | gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " |
ysr@777 | 2989 | "["PTR_FORMAT", "PTR_FORMAT"), " |
ysr@777 | 2990 | "limit = "PTR_FORMAT, |
drchase@6680 | 2991 | worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); |
tonyp@2968 | 2992 | } |
tonyp@2968 | 2993 | |
tonyp@2968 | 2994 | // Is the gap between reading the finger and doing the CAS too long? |
tonyp@2968 | 2995 | HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); |
ysr@777 | 2996 | if (res == finger) { |
ysr@777 | 2997 | // we succeeded |
ysr@777 | 2998 | |
ysr@777 | 2999 | // notice that _finger == end cannot be guaranteed here since, |
ysr@777 | 3000 | // someone else might have moved the finger even further |
tonyp@1458 | 3001 | assert(_finger >= end, "the finger should have moved forward"); |
ysr@777 | 3002 | |
tonyp@2973 | 3003 | if (verbose_low()) { |
johnc@4173 | 3004 | gclog_or_tty->print_cr("[%u] we were successful with region = " |
drchase@6680 | 3005 | PTR_FORMAT, worker_id, p2i(curr_region)); |
tonyp@2973 | 3006 | } |
ysr@777 | 3007 | |
ysr@777 | 3008 | if (limit > bottom) { |
tonyp@2973 | 3009 | if (verbose_low()) { |
johnc@4173 | 3010 | gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " |
drchase@6680 | 3011 | "returning it ", worker_id, p2i(curr_region)); |
tonyp@2973 | 3012 | } |
ysr@777 | 3013 | return curr_region; |
ysr@777 | 3014 | } else { |
tonyp@1458 | 3015 | assert(limit == bottom, |
tonyp@1458 | 3016 | "the region limit should be at bottom"); |
tonyp@2973 | 3017 | if (verbose_low()) { |
johnc@4173 | 3018 | gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " |
drchase@6680 | 3019 | "returning NULL", worker_id, p2i(curr_region)); |
tonyp@2973 | 3020 | } |
ysr@777 | 3021 | // we return NULL and the caller should try calling |
ysr@777 | 3022 | // claim_region() again. |
ysr@777 | 3023 | return NULL; |
ysr@777 | 3024 | } |
ysr@777 | 3025 | } else { |
tonyp@1458 | 3026 | assert(_finger > finger, "the finger should have moved forward"); |
tonyp@2973 | 3027 | if (verbose_low()) { |
johnc@4173 | 3028 | gclog_or_tty->print_cr("[%u] somebody else moved the finger, " |
ysr@777 | 3029 | "global finger = "PTR_FORMAT", " |
ysr@777 | 3030 | "our finger = "PTR_FORMAT, |
drchase@6680 | 3031 | worker_id, p2i(_finger), p2i(finger)); |
tonyp@2973 | 3032 | } |
ysr@777 | 3033 | |
ysr@777 | 3034 | // read it again |
ysr@777 | 3035 | finger = _finger; |
ysr@777 | 3036 | } |
ysr@777 | 3037 | } |
ysr@777 | 3038 | |
ysr@777 | 3039 | return NULL; |
ysr@777 | 3040 | } |
ysr@777 | 3041 | |
tonyp@3416 | 3042 | #ifndef PRODUCT |
tonyp@3416 | 3043 | enum VerifyNoCSetOopsPhase { |
tonyp@3416 | 3044 | VerifyNoCSetOopsStack, |
tonyp@3416 | 3045 | VerifyNoCSetOopsQueues, |
tonyp@3416 | 3046 | VerifyNoCSetOopsSATBCompleted, |
tonyp@3416 | 3047 | VerifyNoCSetOopsSATBThread |
tonyp@3416 | 3048 | }; |
tonyp@3416 | 3049 | |
tonyp@3416 | 3050 | class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { |
tonyp@3416 | 3051 | private: |
tonyp@3416 | 3052 | G1CollectedHeap* _g1h; |
tonyp@3416 | 3053 | VerifyNoCSetOopsPhase _phase; |
tonyp@3416 | 3054 | int _info; |
tonyp@3416 | 3055 | |
tonyp@3416 | 3056 | const char* phase_str() { |
tonyp@3416 | 3057 | switch (_phase) { |
tonyp@3416 | 3058 | case VerifyNoCSetOopsStack: return "Stack"; |
tonyp@3416 | 3059 | case VerifyNoCSetOopsQueues: return "Queue"; |
tonyp@3416 | 3060 | case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; |
tonyp@3416 | 3061 | case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; |
tonyp@3416 | 3062 | default: ShouldNotReachHere(); |
tonyp@3416 | 3063 | } |
tonyp@3416 | 3064 | return NULL; |
ysr@777 | 3065 | } |
johnc@2190 | 3066 | |
tonyp@3416 | 3067 | void do_object_work(oop obj) { |
tonyp@3416 | 3068 | guarantee(!_g1h->obj_in_cs(obj), |
tonyp@3416 | 3069 | err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", |
drchase@6680 | 3070 | p2i((void*) obj), phase_str(), _info)); |
johnc@2190 | 3071 | } |
johnc@2190 | 3072 | |
tonyp@3416 | 3073 | public: |
tonyp@3416 | 3074 | VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } |
tonyp@3416 | 3075 | |
tonyp@3416 | 3076 | void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { |
tonyp@3416 | 3077 | _phase = phase; |
tonyp@3416 | 3078 | _info = info; |
tonyp@3416 | 3079 | } |
tonyp@3416 | 3080 | |
tonyp@3416 | 3081 | virtual void do_oop(oop* p) { |
tonyp@3416 | 3082 | oop obj = oopDesc::load_decode_heap_oop(p); |
tonyp@3416 | 3083 | do_object_work(obj); |
tonyp@3416 | 3084 | } |
tonyp@3416 | 3085 | |
tonyp@3416 | 3086 | virtual void do_oop(narrowOop* p) { |
tonyp@3416 | 3087 | // We should not come across narrow oops while scanning marking |
tonyp@3416 | 3088 | // stacks and SATB buffers. |
tonyp@3416 | 3089 | ShouldNotReachHere(); |
tonyp@3416 | 3090 | } |
tonyp@3416 | 3091 | |
tonyp@3416 | 3092 | virtual void do_object(oop obj) { |
tonyp@3416 | 3093 | do_object_work(obj); |
tonyp@3416 | 3094 | } |
tonyp@3416 | 3095 | }; |
tonyp@3416 | 3096 | |
tonyp@3416 | 3097 | void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, |
tonyp@3416 | 3098 | bool verify_enqueued_buffers, |
tonyp@3416 | 3099 | bool verify_thread_buffers, |
tonyp@3416 | 3100 | bool verify_fingers) { |
tonyp@3416 | 3101 | assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); |
tonyp@3416 | 3102 | if (!G1CollectedHeap::heap()->mark_in_progress()) { |
tonyp@3416 | 3103 | return; |
tonyp@3416 | 3104 | } |
tonyp@3416 | 3105 | |
tonyp@3416 | 3106 | VerifyNoCSetOopsClosure cl; |
tonyp@3416 | 3107 | |
tonyp@3416 | 3108 | if (verify_stacks) { |
tonyp@3416 | 3109 | // Verify entries on the global mark stack |
tonyp@3416 | 3110 | cl.set_phase(VerifyNoCSetOopsStack); |
tonyp@3416 | 3111 | _markStack.oops_do(&cl); |
tonyp@3416 | 3112 | |
tonyp@3416 | 3113 | // Verify entries on the task queues |
johnc@4173 | 3114 | for (uint i = 0; i < _max_worker_id; i += 1) { |
tonyp@3416 | 3115 | cl.set_phase(VerifyNoCSetOopsQueues, i); |
johnc@4333 | 3116 | CMTaskQueue* queue = _task_queues->queue(i); |
tonyp@3416 | 3117 | queue->oops_do(&cl); |
tonyp@3416 | 3118 | } |
tonyp@3416 | 3119 | } |
tonyp@3416 | 3120 | |
tonyp@3416 | 3121 | SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); |
tonyp@3416 | 3122 | |
tonyp@3416 | 3123 | // Verify entries on the enqueued SATB buffers |
tonyp@3416 | 3124 | if (verify_enqueued_buffers) { |
tonyp@3416 | 3125 | cl.set_phase(VerifyNoCSetOopsSATBCompleted); |
tonyp@3416 | 3126 | satb_qs.iterate_completed_buffers_read_only(&cl); |
tonyp@3416 | 3127 | } |
tonyp@3416 | 3128 | |
tonyp@3416 | 3129 | // Verify entries on the per-thread SATB buffers |
tonyp@3416 | 3130 | if (verify_thread_buffers) { |
tonyp@3416 | 3131 | cl.set_phase(VerifyNoCSetOopsSATBThread); |
tonyp@3416 | 3132 | satb_qs.iterate_thread_buffers_read_only(&cl); |
tonyp@3416 | 3133 | } |
tonyp@3416 | 3134 | |
tonyp@3416 | 3135 | if (verify_fingers) { |
tonyp@3416 | 3136 | // Verify the global finger |
tonyp@3416 | 3137 | HeapWord* global_finger = finger(); |
tonyp@3416 | 3138 | if (global_finger != NULL && global_finger < _heap_end) { |
tonyp@3416 | 3139 | // The global finger always points to a heap region boundary. We |
tonyp@3416 | 3140 | // use heap_region_containing_raw() to get the containing region |
tonyp@3416 | 3141 | // given that the global finger could be pointing to a free region |
tonyp@3416 | 3142 | // which subsequently becomes continues humongous. If that |
tonyp@3416 | 3143 | // happens, heap_region_containing() will return the bottom of the |
tonyp@3416 | 3144 | // corresponding starts humongous region and the check below will |
tonyp@3416 | 3145 | // not hold any more. |
tonyp@3416 | 3146 | HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); |
tonyp@3416 | 3147 | guarantee(global_finger == global_hr->bottom(), |
tonyp@3416 | 3148 | err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, |
drchase@6680 | 3149 | p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); |
tonyp@3416 | 3150 | } |
tonyp@3416 | 3151 | |
tonyp@3416 | 3152 | // Verify the task fingers |
johnc@4173 | 3153 | assert(parallel_marking_threads() <= _max_worker_id, "sanity"); |
tonyp@3416 | 3154 | for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { |
tonyp@3416 | 3155 | CMTask* task = _tasks[i]; |
tonyp@3416 | 3156 | HeapWord* task_finger = task->finger(); |
tonyp@3416 | 3157 | if (task_finger != NULL && task_finger < _heap_end) { |
tonyp@3416 | 3158 | // See above note on the global finger verification. |
tonyp@3416 | 3159 | HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); |
tonyp@3416 | 3160 | guarantee(task_finger == task_hr->bottom() || |
tonyp@3416 | 3161 | !task_hr->in_collection_set(), |
tonyp@3416 | 3162 | err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, |
drchase@6680 | 3163 | p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); |
tonyp@3416 | 3164 | } |
tonyp@3416 | 3165 | } |
tonyp@3416 | 3166 | } |
ysr@777 | 3167 | } |
tonyp@3416 | 3168 | #endif // PRODUCT |
ysr@777 | 3169 | |
johnc@3463 | 3170 | // Aggregate the counting data that was constructed concurrently |
johnc@3463 | 3171 | // with marking. |
johnc@3463 | 3172 | class AggregateCountDataHRClosure: public HeapRegionClosure { |
johnc@4123 | 3173 | G1CollectedHeap* _g1h; |
johnc@3463 | 3174 | ConcurrentMark* _cm; |
johnc@4123 | 3175 | CardTableModRefBS* _ct_bs; |
johnc@3463 | 3176 | BitMap* _cm_card_bm; |
johnc@4173 | 3177 | uint _max_worker_id; |
johnc@3463 | 3178 | |
johnc@3463 | 3179 | public: |
johnc@4123 | 3180 | AggregateCountDataHRClosure(G1CollectedHeap* g1h, |
johnc@3463 | 3181 | BitMap* cm_card_bm, |
johnc@4173 | 3182 | uint max_worker_id) : |
johnc@4123 | 3183 | _g1h(g1h), _cm(g1h->concurrent_mark()), |
johnc@4123 | 3184 | _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), |
johnc@4173 | 3185 | _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } |
johnc@3463 | 3186 | |
johnc@3463 | 3187 | bool doHeapRegion(HeapRegion* hr) { |
johnc@3463 | 3188 | if (hr->continuesHumongous()) { |
johnc@3463 | 3189 | // We will ignore these here and process them when their |
johnc@3463 | 3190 | // associated "starts humongous" region is processed. |
johnc@3463 | 3191 | // Note that we cannot rely on their associated |
johnc@3463 | 3192 | // "starts humongous" region to have their bit set to 1 |
johnc@3463 | 3193 | // since, due to the region chunking in the parallel region |
johnc@3463 | 3194 | // iteration, a "continues humongous" region might be visited |
johnc@3463 | 3195 | // before its associated "starts humongous". |
johnc@3463 | 3196 | return false; |
johnc@3463 | 3197 | } |
johnc@3463 | 3198 | |
johnc@3463 | 3199 | HeapWord* start = hr->bottom(); |
johnc@3463 | 3200 | HeapWord* limit = hr->next_top_at_mark_start(); |
johnc@3463 | 3201 | HeapWord* end = hr->end(); |
johnc@3463 | 3202 | |
johnc@3463 | 3203 | assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), |
johnc@3463 | 3204 | err_msg("Preconditions not met - " |
johnc@3463 | 3205 | "start: "PTR_FORMAT", limit: "PTR_FORMAT", " |
johnc@3463 | 3206 | "top: "PTR_FORMAT", end: "PTR_FORMAT, |
drchase@6680 | 3207 | p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); |
johnc@3463 | 3208 | |
johnc@3463 | 3209 | assert(hr->next_marked_bytes() == 0, "Precondition"); |
johnc@3463 | 3210 | |
johnc@3463 | 3211 | if (start == limit) { |
johnc@3463 | 3212 | // NTAMS of this region has not been set so nothing to do. |
johnc@3463 | 3213 | return false; |
johnc@3463 | 3214 | } |
johnc@3463 | 3215 | |
johnc@4123 | 3216 | // 'start' should be in the heap. |
johnc@4123 | 3217 | assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); |
johnc@4123 | 3218 | // 'end' *may* be just beyone the end of the heap (if hr is the last region) |
johnc@4123 | 3219 | assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); |
johnc@3463 | 3220 | |
johnc@3463 | 3221 | BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); |
johnc@3463 | 3222 | BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); |
johnc@3463 | 3223 | BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); |
johnc@3463 | 3224 | |
johnc@4123 | 3225 | // If ntams is not card aligned then we bump card bitmap index |
johnc@4123 | 3226 | // for limit so that we get the all the cards spanned by |
johnc@4123 | 3227 | // the object ending at ntams. |
johnc@4123 | 3228 | // Note: if this is the last region in the heap then ntams |
johnc@4123 | 3229 | // could be actually just beyond the end of the the heap; |
johnc@4123 | 3230 | // limit_idx will then correspond to a (non-existent) card |
johnc@4123 | 3231 | // that is also outside the heap. |
johnc@4123 | 3232 | if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { |
johnc@3463 | 3233 | limit_idx += 1; |
johnc@3463 | 3234 | } |
johnc@3463 | 3235 | |
johnc@3463 | 3236 | assert(limit_idx <= end_idx, "or else use atomics"); |
johnc@3463 | 3237 | |
johnc@3463 | 3238 | // Aggregate the "stripe" in the count data associated with hr. |
tonyp@3713 | 3239 | uint hrs_index = hr->hrs_index(); |
johnc@3463 | 3240 | size_t marked_bytes = 0; |
johnc@3463 | 3241 | |
johnc@4173 | 3242 | for (uint i = 0; i < _max_worker_id; i += 1) { |
johnc@3463 | 3243 | size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); |
johnc@3463 | 3244 | BitMap* task_card_bm = _cm->count_card_bitmap_for(i); |
johnc@3463 | 3245 | |
johnc@3463 | 3246 | // Fetch the marked_bytes in this region for task i and |
johnc@3463 | 3247 | // add it to the running total for this region. |
johnc@3463 | 3248 | marked_bytes += marked_bytes_array[hrs_index]; |
johnc@3463 | 3249 | |
johnc@4173 | 3250 | // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) |
johnc@3463 | 3251 | // into the global card bitmap. |
johnc@3463 | 3252 | BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); |
johnc@3463 | 3253 | |
johnc@3463 | 3254 | while (scan_idx < limit_idx) { |
johnc@3463 | 3255 | assert(task_card_bm->at(scan_idx) == true, "should be"); |
johnc@3463 | 3256 | _cm_card_bm->set_bit(scan_idx); |
johnc@3463 | 3257 | assert(_cm_card_bm->at(scan_idx) == true, "should be"); |
johnc@3463 | 3258 | |
johnc@3463 | 3259 | // BitMap::get_next_one_offset() can handle the case when |
johnc@3463 | 3260 | // its left_offset parameter is greater than its right_offset |
johnc@4123 | 3261 | // parameter. It does, however, have an early exit if |
johnc@3463 | 3262 | // left_offset == right_offset. So let's limit the value |
johnc@3463 | 3263 | // passed in for left offset here. |
johnc@3463 | 3264 | BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); |
johnc@3463 | 3265 | scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); |
johnc@3463 | 3266 | } |
johnc@3463 | 3267 | } |
johnc@3463 | 3268 | |
johnc@3463 | 3269 | // Update the marked bytes for this region. |
johnc@3463 | 3270 | hr->add_to_marked_bytes(marked_bytes); |
johnc@3463 | 3271 | |
johnc@3463 | 3272 | // Next heap region |
johnc@3463 | 3273 | return false; |
johnc@3463 | 3274 | } |
johnc@3463 | 3275 | }; |
johnc@3463 | 3276 | |
johnc@3463 | 3277 | class G1AggregateCountDataTask: public AbstractGangTask { |
johnc@3463 | 3278 | protected: |
johnc@3463 | 3279 | G1CollectedHeap* _g1h; |
johnc@3463 | 3280 | ConcurrentMark* _cm; |
johnc@3463 | 3281 | BitMap* _cm_card_bm; |
johnc@4173 | 3282 | uint _max_worker_id; |
johnc@3463 | 3283 | int _active_workers; |
johnc@3463 | 3284 | |
johnc@3463 | 3285 | public: |
johnc@3463 | 3286 | G1AggregateCountDataTask(G1CollectedHeap* g1h, |
johnc@3463 | 3287 | ConcurrentMark* cm, |
johnc@3463 | 3288 | BitMap* cm_card_bm, |
johnc@4173 | 3289 | uint max_worker_id, |
johnc@3463 | 3290 | int n_workers) : |
johnc@3463 | 3291 | AbstractGangTask("Count Aggregation"), |
johnc@3463 | 3292 | _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), |
johnc@4173 | 3293 | _max_worker_id(max_worker_id), |
johnc@3463 | 3294 | _active_workers(n_workers) { } |
johnc@3463 | 3295 | |
johnc@3463 | 3296 | void work(uint worker_id) { |
johnc@4173 | 3297 | AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); |
johnc@3463 | 3298 | |
johnc@3463 | 3299 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 3300 | _g1h->heap_region_par_iterate_chunked(&cl, worker_id, |
johnc@3463 | 3301 | _active_workers, |
johnc@3463 | 3302 | HeapRegion::AggregateCountClaimValue); |
johnc@3463 | 3303 | } else { |
johnc@3463 | 3304 | _g1h->heap_region_iterate(&cl); |
johnc@3463 | 3305 | } |
johnc@3463 | 3306 | } |
johnc@3463 | 3307 | }; |
johnc@3463 | 3308 | |
johnc@3463 | 3309 | |
johnc@3463 | 3310 | void ConcurrentMark::aggregate_count_data() { |
johnc@3463 | 3311 | int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? |
johnc@3463 | 3312 | _g1h->workers()->active_workers() : |
johnc@3463 | 3313 | 1); |
johnc@3463 | 3314 | |
johnc@3463 | 3315 | G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, |
johnc@4173 | 3316 | _max_worker_id, n_workers); |
johnc@3463 | 3317 | |
johnc@3463 | 3318 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@3463 | 3319 | assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), |
johnc@3463 | 3320 | "sanity check"); |
johnc@3463 | 3321 | _g1h->set_par_threads(n_workers); |
johnc@3463 | 3322 | _g1h->workers()->run_task(&g1_par_agg_task); |
johnc@3463 | 3323 | _g1h->set_par_threads(0); |
johnc@3463 | 3324 | |
johnc@3463 | 3325 | assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), |
johnc@3463 | 3326 | "sanity check"); |
johnc@3463 | 3327 | _g1h->reset_heap_region_claim_values(); |
johnc@3463 | 3328 | } else { |
johnc@3463 | 3329 | g1_par_agg_task.work(0); |
johnc@3463 | 3330 | } |
johnc@3463 | 3331 | } |
johnc@3463 | 3332 | |
johnc@3463 | 3333 | // Clear the per-worker arrays used to store the per-region counting data |
johnc@3463 | 3334 | void ConcurrentMark::clear_all_count_data() { |
johnc@3463 | 3335 | // Clear the global card bitmap - it will be filled during |
johnc@3463 | 3336 | // liveness count aggregation (during remark) and the |
johnc@3463 | 3337 | // final counting task. |
johnc@3463 | 3338 | _card_bm.clear(); |
johnc@3463 | 3339 | |
johnc@3463 | 3340 | // Clear the global region bitmap - it will be filled as part |
johnc@3463 | 3341 | // of the final counting task. |
johnc@3463 | 3342 | _region_bm.clear(); |
johnc@3463 | 3343 | |
tonyp@3713 | 3344 | uint max_regions = _g1h->max_regions(); |
johnc@4173 | 3345 | assert(_max_worker_id > 0, "uninitialized"); |
johnc@4173 | 3346 | |
johnc@4173 | 3347 | for (uint i = 0; i < _max_worker_id; i += 1) { |
johnc@3463 | 3348 | BitMap* task_card_bm = count_card_bitmap_for(i); |
johnc@3463 | 3349 | size_t* marked_bytes_array = count_marked_bytes_array_for(i); |
johnc@3463 | 3350 | |
johnc@3463 | 3351 | assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); |
johnc@3463 | 3352 | assert(marked_bytes_array != NULL, "uninitialized"); |
johnc@3463 | 3353 | |
tonyp@3713 | 3354 | memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); |
johnc@3463 | 3355 | task_card_bm->clear(); |
johnc@3463 | 3356 | } |
johnc@3463 | 3357 | } |
johnc@3463 | 3358 | |
ysr@777 | 3359 | void ConcurrentMark::print_stats() { |
ysr@777 | 3360 | if (verbose_stats()) { |
ysr@777 | 3361 | gclog_or_tty->print_cr("---------------------------------------------------------------------"); |
ysr@777 | 3362 | for (size_t i = 0; i < _active_tasks; ++i) { |
ysr@777 | 3363 | _tasks[i]->print_stats(); |
ysr@777 | 3364 | gclog_or_tty->print_cr("---------------------------------------------------------------------"); |
ysr@777 | 3365 | } |
ysr@777 | 3366 | } |
ysr@777 | 3367 | } |
ysr@777 | 3368 | |
ysr@777 | 3369 | // abandon current marking iteration due to a Full GC |
ysr@777 | 3370 | void ConcurrentMark::abort() { |
tschatzl@7007 | 3371 | // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next |
tschatzl@7007 | 3372 | // concurrent bitmap clearing. |
ysr@777 | 3373 | _nextMarkBitMap->clearAll(); |
brutisso@7005 | 3374 | |
brutisso@7005 | 3375 | // Note we cannot clear the previous marking bitmap here |
brutisso@7005 | 3376 | // since VerifyDuringGC verifies the objects marked during |
brutisso@7005 | 3377 | // a full GC against the previous bitmap. |
brutisso@7005 | 3378 | |
johnc@3463 | 3379 | // Clear the liveness counting data |
johnc@3463 | 3380 | clear_all_count_data(); |
ysr@777 | 3381 | // Empty mark stack |
johnc@4386 | 3382 | reset_marking_state(); |
johnc@4173 | 3383 | for (uint i = 0; i < _max_worker_id; ++i) { |
ysr@777 | 3384 | _tasks[i]->clear_region_fields(); |
johnc@2190 | 3385 | } |
pliden@6692 | 3386 | _first_overflow_barrier_sync.abort(); |
pliden@6692 | 3387 | _second_overflow_barrier_sync.abort(); |
brutisso@6904 | 3388 | const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); |
brutisso@6904 | 3389 | if (!gc_id.is_undefined()) { |
brutisso@6904 | 3390 | // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance |
brutisso@6904 | 3391 | // to detect that it was aborted. Only keep track of the first GC id that we aborted. |
brutisso@6904 | 3392 | _aborted_gc_id = gc_id; |
brutisso@6904 | 3393 | } |
ysr@777 | 3394 | _has_aborted = true; |
ysr@777 | 3395 | |
ysr@777 | 3396 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 3397 | satb_mq_set.abandon_partial_marking(); |
tonyp@1752 | 3398 | // This can be called either during or outside marking, we'll read |
tonyp@1752 | 3399 | // the expected_active value from the SATB queue set. |
tonyp@1752 | 3400 | satb_mq_set.set_active_all_threads( |
tonyp@1752 | 3401 | false, /* new active value */ |
tonyp@1752 | 3402 | satb_mq_set.is_active() /* expected_active */); |
sla@5237 | 3403 | |
sla@5237 | 3404 | _g1h->trace_heap_after_concurrent_cycle(); |
sla@5237 | 3405 | _g1h->register_concurrent_cycle_end(); |
ysr@777 | 3406 | } |
ysr@777 | 3407 | |
brutisso@6904 | 3408 | const GCId& ConcurrentMark::concurrent_gc_id() { |
brutisso@6904 | 3409 | if (has_aborted()) { |
brutisso@6904 | 3410 | return _aborted_gc_id; |
brutisso@6904 | 3411 | } |
brutisso@6904 | 3412 | return _g1h->gc_tracer_cm()->gc_id(); |
brutisso@6904 | 3413 | } |
brutisso@6904 | 3414 | |
ysr@777 | 3415 | static void print_ms_time_info(const char* prefix, const char* name, |
ysr@777 | 3416 | NumberSeq& ns) { |
ysr@777 | 3417 | gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3418 | prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); |
ysr@777 | 3419 | if (ns.num() > 0) { |
ysr@777 | 3420 | gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", |
ysr@777 | 3421 | prefix, ns.sd(), ns.maximum()); |
ysr@777 | 3422 | } |
ysr@777 | 3423 | } |
ysr@777 | 3424 | |
ysr@777 | 3425 | void ConcurrentMark::print_summary_info() { |
ysr@777 | 3426 | gclog_or_tty->print_cr(" Concurrent marking:"); |
ysr@777 | 3427 | print_ms_time_info(" ", "init marks", _init_times); |
ysr@777 | 3428 | print_ms_time_info(" ", "remarks", _remark_times); |
ysr@777 | 3429 | { |
ysr@777 | 3430 | print_ms_time_info(" ", "final marks", _remark_mark_times); |
ysr@777 | 3431 | print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); |
ysr@777 | 3432 | |
ysr@777 | 3433 | } |
ysr@777 | 3434 | print_ms_time_info(" ", "cleanups", _cleanup_times); |
ysr@777 | 3435 | gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3436 | _total_counting_time, |
ysr@777 | 3437 | (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / |
ysr@777 | 3438 | (double)_cleanup_times.num() |
ysr@777 | 3439 | : 0.0)); |
ysr@777 | 3440 | if (G1ScrubRemSets) { |
ysr@777 | 3441 | gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3442 | _total_rs_scrub_time, |
ysr@777 | 3443 | (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / |
ysr@777 | 3444 | (double)_cleanup_times.num() |
ysr@777 | 3445 | : 0.0)); |
ysr@777 | 3446 | } |
ysr@777 | 3447 | gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", |
ysr@777 | 3448 | (_init_times.sum() + _remark_times.sum() + |
ysr@777 | 3449 | _cleanup_times.sum())/1000.0); |
ysr@777 | 3450 | gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " |
johnc@3463 | 3451 | "(%8.2f s marking).", |
ysr@777 | 3452 | cmThread()->vtime_accum(), |
johnc@3463 | 3453 | cmThread()->vtime_mark_accum()); |
ysr@777 | 3454 | } |
ysr@777 | 3455 | |
tonyp@1454 | 3456 | void ConcurrentMark::print_worker_threads_on(outputStream* st) const { |
johnc@4549 | 3457 | if (use_parallel_marking_threads()) { |
johnc@4549 | 3458 | _parallel_workers->print_worker_threads_on(st); |
johnc@4549 | 3459 | } |
tonyp@1454 | 3460 | } |
tonyp@1454 | 3461 | |
stefank@4904 | 3462 | void ConcurrentMark::print_on_error(outputStream* st) const { |
stefank@4904 | 3463 | st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, |
drchase@6680 | 3464 | p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); |
stefank@4904 | 3465 | _prevMarkBitMap->print_on_error(st, " Prev Bits: "); |
stefank@4904 | 3466 | _nextMarkBitMap->print_on_error(st, " Next Bits: "); |
stefank@4904 | 3467 | } |
stefank@4904 | 3468 | |
ysr@777 | 3469 | // We take a break if someone is trying to stop the world. |
jmasa@3357 | 3470 | bool ConcurrentMark::do_yield_check(uint worker_id) { |
pliden@6906 | 3471 | if (SuspendibleThreadSet::should_yield()) { |
jmasa@3357 | 3472 | if (worker_id == 0) { |
ysr@777 | 3473 | _g1h->g1_policy()->record_concurrent_pause(); |
tonyp@2973 | 3474 | } |
pliden@6906 | 3475 | SuspendibleThreadSet::yield(); |
ysr@777 | 3476 | return true; |
ysr@777 | 3477 | } else { |
ysr@777 | 3478 | return false; |
ysr@777 | 3479 | } |
ysr@777 | 3480 | } |
ysr@777 | 3481 | |
ysr@777 | 3482 | bool ConcurrentMark::containing_card_is_marked(void* p) { |
ysr@777 | 3483 | size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); |
ysr@777 | 3484 | return _card_bm.at(offset >> CardTableModRefBS::card_shift); |
ysr@777 | 3485 | } |
ysr@777 | 3486 | |
ysr@777 | 3487 | bool ConcurrentMark::containing_cards_are_marked(void* start, |
ysr@777 | 3488 | void* last) { |
tonyp@2973 | 3489 | return containing_card_is_marked(start) && |
tonyp@2973 | 3490 | containing_card_is_marked(last); |
ysr@777 | 3491 | } |
ysr@777 | 3492 | |
ysr@777 | 3493 | #ifndef PRODUCT |
ysr@777 | 3494 | // for debugging purposes |
ysr@777 | 3495 | void ConcurrentMark::print_finger() { |
ysr@777 | 3496 | gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, |
drchase@6680 | 3497 | p2i(_heap_start), p2i(_heap_end), p2i(_finger)); |
johnc@4173 | 3498 | for (uint i = 0; i < _max_worker_id; ++i) { |
drchase@6680 | 3499 | gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); |
ysr@777 | 3500 | } |
drchase@6680 | 3501 | gclog_or_tty->cr(); |
ysr@777 | 3502 | } |
ysr@777 | 3503 | #endif |
ysr@777 | 3504 | |
tonyp@2968 | 3505 | void CMTask::scan_object(oop obj) { |
tonyp@2968 | 3506 | assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); |
tonyp@2968 | 3507 | |
tonyp@2968 | 3508 | if (_cm->verbose_high()) { |
johnc@4173 | 3509 | gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, |
drchase@6680 | 3510 | _worker_id, p2i((void*) obj)); |
tonyp@2968 | 3511 | } |
tonyp@2968 | 3512 | |
tonyp@2968 | 3513 | size_t obj_size = obj->size(); |
tonyp@2968 | 3514 | _words_scanned += obj_size; |
tonyp@2968 | 3515 | |
tonyp@2968 | 3516 | obj->oop_iterate(_cm_oop_closure); |
tonyp@2968 | 3517 | statsOnly( ++_objs_scanned ); |
tonyp@2968 | 3518 | check_limits(); |
tonyp@2968 | 3519 | } |
tonyp@2968 | 3520 | |
ysr@777 | 3521 | // Closure for iteration over bitmaps |
ysr@777 | 3522 | class CMBitMapClosure : public BitMapClosure { |
ysr@777 | 3523 | private: |
ysr@777 | 3524 | // the bitmap that is being iterated over |
ysr@777 | 3525 | CMBitMap* _nextMarkBitMap; |
ysr@777 | 3526 | ConcurrentMark* _cm; |
ysr@777 | 3527 | CMTask* _task; |
ysr@777 | 3528 | |
ysr@777 | 3529 | public: |
tonyp@3691 | 3530 | CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : |
tonyp@3691 | 3531 | _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } |
ysr@777 | 3532 | |
ysr@777 | 3533 | bool do_bit(size_t offset) { |
ysr@777 | 3534 | HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); |
tonyp@1458 | 3535 | assert(_nextMarkBitMap->isMarked(addr), "invariant"); |
tonyp@1458 | 3536 | assert( addr < _cm->finger(), "invariant"); |
ysr@777 | 3537 | |
tonyp@3691 | 3538 | statsOnly( _task->increase_objs_found_on_bitmap() ); |
tonyp@3691 | 3539 | assert(addr >= _task->finger(), "invariant"); |
tonyp@3691 | 3540 | |
tonyp@3691 | 3541 | // We move that task's local finger along. |
tonyp@3691 | 3542 | _task->move_finger_to(addr); |
ysr@777 | 3543 | |
ysr@777 | 3544 | _task->scan_object(oop(addr)); |
ysr@777 | 3545 | // we only partially drain the local queue and global stack |
ysr@777 | 3546 | _task->drain_local_queue(true); |
ysr@777 | 3547 | _task->drain_global_stack(true); |
ysr@777 | 3548 | |
ysr@777 | 3549 | // if the has_aborted flag has been raised, we need to bail out of |
ysr@777 | 3550 | // the iteration |
ysr@777 | 3551 | return !_task->has_aborted(); |
ysr@777 | 3552 | } |
ysr@777 | 3553 | }; |
ysr@777 | 3554 | |
tonyp@2968 | 3555 | G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, |
tonyp@2968 | 3556 | ConcurrentMark* cm, |
tonyp@2968 | 3557 | CMTask* task) |
tonyp@2968 | 3558 | : _g1h(g1h), _cm(cm), _task(task) { |
tonyp@2968 | 3559 | assert(_ref_processor == NULL, "should be initialized to NULL"); |
tonyp@2968 | 3560 | |
tonyp@2968 | 3561 | if (G1UseConcMarkReferenceProcessing) { |
johnc@3175 | 3562 | _ref_processor = g1h->ref_processor_cm(); |
tonyp@2968 | 3563 | assert(_ref_processor != NULL, "should not be NULL"); |
ysr@777 | 3564 | } |
tonyp@2968 | 3565 | } |
ysr@777 | 3566 | |
ysr@777 | 3567 | void CMTask::setup_for_region(HeapRegion* hr) { |
tonyp@1458 | 3568 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 3569 | assert(hr != NULL, |
tonyp@1458 | 3570 | "claim_region() should have filtered out continues humongous regions"); |
tonyp@1458 | 3571 | assert(!hr->continuesHumongous(), |
tonyp@1458 | 3572 | "claim_region() should have filtered out continues humongous regions"); |
ysr@777 | 3573 | |
tonyp@2973 | 3574 | if (_cm->verbose_low()) { |
johnc@4173 | 3575 | gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, |
drchase@6680 | 3576 | _worker_id, p2i(hr)); |
tonyp@2973 | 3577 | } |
ysr@777 | 3578 | |
ysr@777 | 3579 | _curr_region = hr; |
ysr@777 | 3580 | _finger = hr->bottom(); |
ysr@777 | 3581 | update_region_limit(); |
ysr@777 | 3582 | } |
ysr@777 | 3583 | |
ysr@777 | 3584 | void CMTask::update_region_limit() { |
ysr@777 | 3585 | HeapRegion* hr = _curr_region; |
ysr@777 | 3586 | HeapWord* bottom = hr->bottom(); |
ysr@777 | 3587 | HeapWord* limit = hr->next_top_at_mark_start(); |
ysr@777 | 3588 | |
ysr@777 | 3589 | if (limit == bottom) { |
tonyp@2973 | 3590 | if (_cm->verbose_low()) { |
johnc@4173 | 3591 | gclog_or_tty->print_cr("[%u] found an empty region " |
ysr@777 | 3592 | "["PTR_FORMAT", "PTR_FORMAT")", |
drchase@6680 | 3593 | _worker_id, p2i(bottom), p2i(limit)); |
tonyp@2973 | 3594 | } |
ysr@777 | 3595 | // The region was collected underneath our feet. |
ysr@777 | 3596 | // We set the finger to bottom to ensure that the bitmap |
ysr@777 | 3597 | // iteration that will follow this will not do anything. |
ysr@777 | 3598 | // (this is not a condition that holds when we set the region up, |
ysr@777 | 3599 | // as the region is not supposed to be empty in the first place) |
ysr@777 | 3600 | _finger = bottom; |
ysr@777 | 3601 | } else if (limit >= _region_limit) { |
tonyp@1458 | 3602 | assert(limit >= _finger, "peace of mind"); |
ysr@777 | 3603 | } else { |
tonyp@1458 | 3604 | assert(limit < _region_limit, "only way to get here"); |
ysr@777 | 3605 | // This can happen under some pretty unusual circumstances. An |
ysr@777 | 3606 | // evacuation pause empties the region underneath our feet (NTAMS |
ysr@777 | 3607 | // at bottom). We then do some allocation in the region (NTAMS |
ysr@777 | 3608 | // stays at bottom), followed by the region being used as a GC |
ysr@777 | 3609 | // alloc region (NTAMS will move to top() and the objects |
ysr@777 | 3610 | // originally below it will be grayed). All objects now marked in |
ysr@777 | 3611 | // the region are explicitly grayed, if below the global finger, |
ysr@777 | 3612 | // and we do not need in fact to scan anything else. So, we simply |
ysr@777 | 3613 | // set _finger to be limit to ensure that the bitmap iteration |
ysr@777 | 3614 | // doesn't do anything. |
ysr@777 | 3615 | _finger = limit; |
ysr@777 | 3616 | } |
ysr@777 | 3617 | |
ysr@777 | 3618 | _region_limit = limit; |
ysr@777 | 3619 | } |
ysr@777 | 3620 | |
ysr@777 | 3621 | void CMTask::giveup_current_region() { |
tonyp@1458 | 3622 | assert(_curr_region != NULL, "invariant"); |
tonyp@2973 | 3623 | if (_cm->verbose_low()) { |
johnc@4173 | 3624 | gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, |
drchase@6680 | 3625 | _worker_id, p2i(_curr_region)); |
tonyp@2973 | 3626 | } |
ysr@777 | 3627 | clear_region_fields(); |
ysr@777 | 3628 | } |
ysr@777 | 3629 | |
ysr@777 | 3630 | void CMTask::clear_region_fields() { |
ysr@777 | 3631 | // Values for these three fields that indicate that we're not |
ysr@777 | 3632 | // holding on to a region. |
ysr@777 | 3633 | _curr_region = NULL; |
ysr@777 | 3634 | _finger = NULL; |
ysr@777 | 3635 | _region_limit = NULL; |
ysr@777 | 3636 | } |
ysr@777 | 3637 | |
tonyp@2968 | 3638 | void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { |
tonyp@2968 | 3639 | if (cm_oop_closure == NULL) { |
tonyp@2968 | 3640 | assert(_cm_oop_closure != NULL, "invariant"); |
tonyp@2968 | 3641 | } else { |
tonyp@2968 | 3642 | assert(_cm_oop_closure == NULL, "invariant"); |
tonyp@2968 | 3643 | } |
tonyp@2968 | 3644 | _cm_oop_closure = cm_oop_closure; |
tonyp@2968 | 3645 | } |
tonyp@2968 | 3646 | |
ysr@777 | 3647 | void CMTask::reset(CMBitMap* nextMarkBitMap) { |
tonyp@1458 | 3648 | guarantee(nextMarkBitMap != NULL, "invariant"); |
ysr@777 | 3649 | |
tonyp@2973 | 3650 | if (_cm->verbose_low()) { |
johnc@4173 | 3651 | gclog_or_tty->print_cr("[%u] resetting", _worker_id); |
tonyp@2973 | 3652 | } |
ysr@777 | 3653 | |
ysr@777 | 3654 | _nextMarkBitMap = nextMarkBitMap; |
ysr@777 | 3655 | clear_region_fields(); |
ysr@777 | 3656 | |
ysr@777 | 3657 | _calls = 0; |
ysr@777 | 3658 | _elapsed_time_ms = 0.0; |
ysr@777 | 3659 | _termination_time_ms = 0.0; |
ysr@777 | 3660 | _termination_start_time_ms = 0.0; |
ysr@777 | 3661 | |
ysr@777 | 3662 | #if _MARKING_STATS_ |
ysr@777 | 3663 | _local_pushes = 0; |
ysr@777 | 3664 | _local_pops = 0; |
ysr@777 | 3665 | _local_max_size = 0; |
ysr@777 | 3666 | _objs_scanned = 0; |
ysr@777 | 3667 | _global_pushes = 0; |
ysr@777 | 3668 | _global_pops = 0; |
ysr@777 | 3669 | _global_max_size = 0; |
ysr@777 | 3670 | _global_transfers_to = 0; |
ysr@777 | 3671 | _global_transfers_from = 0; |
ysr@777 | 3672 | _regions_claimed = 0; |
ysr@777 | 3673 | _objs_found_on_bitmap = 0; |
ysr@777 | 3674 | _satb_buffers_processed = 0; |
ysr@777 | 3675 | _steal_attempts = 0; |
ysr@777 | 3676 | _steals = 0; |
ysr@777 | 3677 | _aborted = 0; |
ysr@777 | 3678 | _aborted_overflow = 0; |
ysr@777 | 3679 | _aborted_cm_aborted = 0; |
ysr@777 | 3680 | _aborted_yield = 0; |
ysr@777 | 3681 | _aborted_timed_out = 0; |
ysr@777 | 3682 | _aborted_satb = 0; |
ysr@777 | 3683 | _aborted_termination = 0; |
ysr@777 | 3684 | #endif // _MARKING_STATS_ |
ysr@777 | 3685 | } |
ysr@777 | 3686 | |
ysr@777 | 3687 | bool CMTask::should_exit_termination() { |
ysr@777 | 3688 | regular_clock_call(); |
ysr@777 | 3689 | // This is called when we are in the termination protocol. We should |
ysr@777 | 3690 | // quit if, for some reason, this task wants to abort or the global |
ysr@777 | 3691 | // stack is not empty (this means that we can get work from it). |
ysr@777 | 3692 | return !_cm->mark_stack_empty() || has_aborted(); |
ysr@777 | 3693 | } |
ysr@777 | 3694 | |
ysr@777 | 3695 | void CMTask::reached_limit() { |
tonyp@1458 | 3696 | assert(_words_scanned >= _words_scanned_limit || |
tonyp@1458 | 3697 | _refs_reached >= _refs_reached_limit , |
tonyp@1458 | 3698 | "shouldn't have been called otherwise"); |
ysr@777 | 3699 | regular_clock_call(); |
ysr@777 | 3700 | } |
ysr@777 | 3701 | |
ysr@777 | 3702 | void CMTask::regular_clock_call() { |
tonyp@2973 | 3703 | if (has_aborted()) return; |
ysr@777 | 3704 | |
ysr@777 | 3705 | // First, we need to recalculate the words scanned and refs reached |
ysr@777 | 3706 | // limits for the next clock call. |
ysr@777 | 3707 | recalculate_limits(); |
ysr@777 | 3708 | |
ysr@777 | 3709 | // During the regular clock call we do the following |
ysr@777 | 3710 | |
ysr@777 | 3711 | // (1) If an overflow has been flagged, then we abort. |
ysr@777 | 3712 | if (_cm->has_overflown()) { |
ysr@777 | 3713 | set_has_aborted(); |
ysr@777 | 3714 | return; |
ysr@777 | 3715 | } |
ysr@777 | 3716 | |
ysr@777 | 3717 | // If we are not concurrent (i.e. we're doing remark) we don't need |
ysr@777 | 3718 | // to check anything else. The other steps are only needed during |
ysr@777 | 3719 | // the concurrent marking phase. |
tonyp@2973 | 3720 | if (!concurrent()) return; |
ysr@777 | 3721 | |
ysr@777 | 3722 | // (2) If marking has been aborted for Full GC, then we also abort. |
ysr@777 | 3723 | if (_cm->has_aborted()) { |
ysr@777 | 3724 | set_has_aborted(); |
ysr@777 | 3725 | statsOnly( ++_aborted_cm_aborted ); |
ysr@777 | 3726 | return; |
ysr@777 | 3727 | } |
ysr@777 | 3728 | |
ysr@777 | 3729 | double curr_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 3730 | |
ysr@777 | 3731 | // (3) If marking stats are enabled, then we update the step history. |
ysr@777 | 3732 | #if _MARKING_STATS_ |
tonyp@2973 | 3733 | if (_words_scanned >= _words_scanned_limit) { |
ysr@777 | 3734 | ++_clock_due_to_scanning; |
tonyp@2973 | 3735 | } |
tonyp@2973 | 3736 | if (_refs_reached >= _refs_reached_limit) { |
ysr@777 | 3737 | ++_clock_due_to_marking; |
tonyp@2973 | 3738 | } |
ysr@777 | 3739 | |
ysr@777 | 3740 | double last_interval_ms = curr_time_ms - _interval_start_time_ms; |
ysr@777 | 3741 | _interval_start_time_ms = curr_time_ms; |
ysr@777 | 3742 | _all_clock_intervals_ms.add(last_interval_ms); |
ysr@777 | 3743 | |
ysr@777 | 3744 | if (_cm->verbose_medium()) { |
johnc@4173 | 3745 | gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " |
tonyp@2973 | 3746 | "scanned = %d%s, refs reached = %d%s", |
johnc@4173 | 3747 | _worker_id, last_interval_ms, |
tonyp@2973 | 3748 | _words_scanned, |
tonyp@2973 | 3749 | (_words_scanned >= _words_scanned_limit) ? " (*)" : "", |
tonyp@2973 | 3750 | _refs_reached, |
tonyp@2973 | 3751 | (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); |
ysr@777 | 3752 | } |
ysr@777 | 3753 | #endif // _MARKING_STATS_ |
ysr@777 | 3754 | |
ysr@777 | 3755 | // (4) We check whether we should yield. If we have to, then we abort. |
pliden@6906 | 3756 | if (SuspendibleThreadSet::should_yield()) { |
ysr@777 | 3757 | // We should yield. To do this we abort the task. The caller is |
ysr@777 | 3758 | // responsible for yielding. |
ysr@777 | 3759 | set_has_aborted(); |
ysr@777 | 3760 | statsOnly( ++_aborted_yield ); |
ysr@777 | 3761 | return; |
ysr@777 | 3762 | } |
ysr@777 | 3763 | |
ysr@777 | 3764 | // (5) We check whether we've reached our time quota. If we have, |
ysr@777 | 3765 | // then we abort. |
ysr@777 | 3766 | double elapsed_time_ms = curr_time_ms - _start_time_ms; |
ysr@777 | 3767 | if (elapsed_time_ms > _time_target_ms) { |
ysr@777 | 3768 | set_has_aborted(); |
johnc@2494 | 3769 | _has_timed_out = true; |
ysr@777 | 3770 | statsOnly( ++_aborted_timed_out ); |
ysr@777 | 3771 | return; |
ysr@777 | 3772 | } |
ysr@777 | 3773 | |
ysr@777 | 3774 | // (6) Finally, we check whether there are enough completed STAB |
ysr@777 | 3775 | // buffers available for processing. If there are, we abort. |
ysr@777 | 3776 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 3777 | if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { |
tonyp@2973 | 3778 | if (_cm->verbose_low()) { |
johnc@4173 | 3779 | gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", |
johnc@4173 | 3780 | _worker_id); |
tonyp@2973 | 3781 | } |
ysr@777 | 3782 | // we do need to process SATB buffers, we'll abort and restart |
ysr@777 | 3783 | // the marking task to do so |
ysr@777 | 3784 | set_has_aborted(); |
ysr@777 | 3785 | statsOnly( ++_aborted_satb ); |
ysr@777 | 3786 | return; |
ysr@777 | 3787 | } |
ysr@777 | 3788 | } |
ysr@777 | 3789 | |
ysr@777 | 3790 | void CMTask::recalculate_limits() { |
ysr@777 | 3791 | _real_words_scanned_limit = _words_scanned + words_scanned_period; |
ysr@777 | 3792 | _words_scanned_limit = _real_words_scanned_limit; |
ysr@777 | 3793 | |
ysr@777 | 3794 | _real_refs_reached_limit = _refs_reached + refs_reached_period; |
ysr@777 | 3795 | _refs_reached_limit = _real_refs_reached_limit; |
ysr@777 | 3796 | } |
ysr@777 | 3797 | |
ysr@777 | 3798 | void CMTask::decrease_limits() { |
ysr@777 | 3799 | // This is called when we believe that we're going to do an infrequent |
ysr@777 | 3800 | // operation which will increase the per byte scanned cost (i.e. move |
ysr@777 | 3801 | // entries to/from the global stack). It basically tries to decrease the |
ysr@777 | 3802 | // scanning limit so that the clock is called earlier. |
ysr@777 | 3803 | |
tonyp@2973 | 3804 | if (_cm->verbose_medium()) { |
johnc@4173 | 3805 | gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); |
tonyp@2973 | 3806 | } |
ysr@777 | 3807 | |
ysr@777 | 3808 | _words_scanned_limit = _real_words_scanned_limit - |
ysr@777 | 3809 | 3 * words_scanned_period / 4; |
ysr@777 | 3810 | _refs_reached_limit = _real_refs_reached_limit - |
ysr@777 | 3811 | 3 * refs_reached_period / 4; |
ysr@777 | 3812 | } |
ysr@777 | 3813 | |
ysr@777 | 3814 | void CMTask::move_entries_to_global_stack() { |
ysr@777 | 3815 | // local array where we'll store the entries that will be popped |
ysr@777 | 3816 | // from the local queue |
ysr@777 | 3817 | oop buffer[global_stack_transfer_size]; |
ysr@777 | 3818 | |
ysr@777 | 3819 | int n = 0; |
ysr@777 | 3820 | oop obj; |
ysr@777 | 3821 | while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { |
ysr@777 | 3822 | buffer[n] = obj; |
ysr@777 | 3823 | ++n; |
ysr@777 | 3824 | } |
ysr@777 | 3825 | |
ysr@777 | 3826 | if (n > 0) { |
ysr@777 | 3827 | // we popped at least one entry from the local queue |
ysr@777 | 3828 | |
ysr@777 | 3829 | statsOnly( ++_global_transfers_to; _local_pops += n ); |
ysr@777 | 3830 | |
ysr@777 | 3831 | if (!_cm->mark_stack_push(buffer, n)) { |
tonyp@2973 | 3832 | if (_cm->verbose_low()) { |
johnc@4173 | 3833 | gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", |
johnc@4173 | 3834 | _worker_id); |
tonyp@2973 | 3835 | } |
ysr@777 | 3836 | set_has_aborted(); |
ysr@777 | 3837 | } else { |
ysr@777 | 3838 | // the transfer was successful |
ysr@777 | 3839 | |
tonyp@2973 | 3840 | if (_cm->verbose_medium()) { |
johnc@4173 | 3841 | gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", |
johnc@4173 | 3842 | _worker_id, n); |
tonyp@2973 | 3843 | } |
ysr@777 | 3844 | statsOnly( int tmp_size = _cm->mark_stack_size(); |
tonyp@2973 | 3845 | if (tmp_size > _global_max_size) { |
ysr@777 | 3846 | _global_max_size = tmp_size; |
tonyp@2973 | 3847 | } |
ysr@777 | 3848 | _global_pushes += n ); |
ysr@777 | 3849 | } |
ysr@777 | 3850 | } |
ysr@777 | 3851 | |
ysr@777 | 3852 | // this operation was quite expensive, so decrease the limits |
ysr@777 | 3853 | decrease_limits(); |
ysr@777 | 3854 | } |
ysr@777 | 3855 | |
ysr@777 | 3856 | void CMTask::get_entries_from_global_stack() { |
ysr@777 | 3857 | // local array where we'll store the entries that will be popped |
ysr@777 | 3858 | // from the global stack. |
ysr@777 | 3859 | oop buffer[global_stack_transfer_size]; |
ysr@777 | 3860 | int n; |
ysr@777 | 3861 | _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); |
tonyp@1458 | 3862 | assert(n <= global_stack_transfer_size, |
tonyp@1458 | 3863 | "we should not pop more than the given limit"); |
ysr@777 | 3864 | if (n > 0) { |
ysr@777 | 3865 | // yes, we did actually pop at least one entry |
ysr@777 | 3866 | |
ysr@777 | 3867 | statsOnly( ++_global_transfers_from; _global_pops += n ); |
tonyp@2973 | 3868 | if (_cm->verbose_medium()) { |
johnc@4173 | 3869 | gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", |
johnc@4173 | 3870 | _worker_id, n); |
tonyp@2973 | 3871 | } |
ysr@777 | 3872 | for (int i = 0; i < n; ++i) { |
ysr@777 | 3873 | bool success = _task_queue->push(buffer[i]); |
ysr@777 | 3874 | // We only call this when the local queue is empty or under a |
ysr@777 | 3875 | // given target limit. So, we do not expect this push to fail. |
tonyp@1458 | 3876 | assert(success, "invariant"); |
ysr@777 | 3877 | } |
ysr@777 | 3878 | |
ysr@777 | 3879 | statsOnly( int tmp_size = _task_queue->size(); |
tonyp@2973 | 3880 | if (tmp_size > _local_max_size) { |
ysr@777 | 3881 | _local_max_size = tmp_size; |
tonyp@2973 | 3882 | } |
ysr@777 | 3883 | _local_pushes += n ); |
ysr@777 | 3884 | } |
ysr@777 | 3885 | |
ysr@777 | 3886 | // this operation was quite expensive, so decrease the limits |
ysr@777 | 3887 | decrease_limits(); |
ysr@777 | 3888 | } |
ysr@777 | 3889 | |
ysr@777 | 3890 | void CMTask::drain_local_queue(bool partially) { |
tonyp@2973 | 3891 | if (has_aborted()) return; |
ysr@777 | 3892 | |
ysr@777 | 3893 | // Decide what the target size is, depending whether we're going to |
ysr@777 | 3894 | // drain it partially (so that other tasks can steal if they run out |
ysr@777 | 3895 | // of things to do) or totally (at the very end). |
ysr@777 | 3896 | size_t target_size; |
tonyp@2973 | 3897 | if (partially) { |
ysr@777 | 3898 | target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); |
tonyp@2973 | 3899 | } else { |
ysr@777 | 3900 | target_size = 0; |
tonyp@2973 | 3901 | } |
ysr@777 | 3902 | |
ysr@777 | 3903 | if (_task_queue->size() > target_size) { |
tonyp@2973 | 3904 | if (_cm->verbose_high()) { |
drchase@6680 | 3905 | gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, |
johnc@4173 | 3906 | _worker_id, target_size); |
tonyp@2973 | 3907 | } |
ysr@777 | 3908 | |
ysr@777 | 3909 | oop obj; |
ysr@777 | 3910 | bool ret = _task_queue->pop_local(obj); |
ysr@777 | 3911 | while (ret) { |
ysr@777 | 3912 | statsOnly( ++_local_pops ); |
ysr@777 | 3913 | |
tonyp@2973 | 3914 | if (_cm->verbose_high()) { |
johnc@4173 | 3915 | gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, |
drchase@6680 | 3916 | p2i((void*) obj)); |
tonyp@2973 | 3917 | } |
ysr@777 | 3918 | |
tonyp@1458 | 3919 | assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); |
tonyp@2643 | 3920 | assert(!_g1h->is_on_master_free_list( |
tonyp@2472 | 3921 | _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); |
ysr@777 | 3922 | |
ysr@777 | 3923 | scan_object(obj); |
ysr@777 | 3924 | |
tonyp@2973 | 3925 | if (_task_queue->size() <= target_size || has_aborted()) { |
ysr@777 | 3926 | ret = false; |
tonyp@2973 | 3927 | } else { |
ysr@777 | 3928 | ret = _task_queue->pop_local(obj); |
tonyp@2973 | 3929 | } |
ysr@777 | 3930 | } |
ysr@777 | 3931 | |
tonyp@2973 | 3932 | if (_cm->verbose_high()) { |
johnc@4173 | 3933 | gclog_or_tty->print_cr("[%u] drained local queue, size = %d", |
johnc@4173 | 3934 | _worker_id, _task_queue->size()); |
tonyp@2973 | 3935 | } |
ysr@777 | 3936 | } |
ysr@777 | 3937 | } |
ysr@777 | 3938 | |
ysr@777 | 3939 | void CMTask::drain_global_stack(bool partially) { |
tonyp@2973 | 3940 | if (has_aborted()) return; |
ysr@777 | 3941 | |
ysr@777 | 3942 | // We have a policy to drain the local queue before we attempt to |
ysr@777 | 3943 | // drain the global stack. |
tonyp@1458 | 3944 | assert(partially || _task_queue->size() == 0, "invariant"); |
ysr@777 | 3945 | |
ysr@777 | 3946 | // Decide what the target size is, depending whether we're going to |
ysr@777 | 3947 | // drain it partially (so that other tasks can steal if they run out |
ysr@777 | 3948 | // of things to do) or totally (at the very end). Notice that, |
ysr@777 | 3949 | // because we move entries from the global stack in chunks or |
ysr@777 | 3950 | // because another task might be doing the same, we might in fact |
ysr@777 | 3951 | // drop below the target. But, this is not a problem. |
ysr@777 | 3952 | size_t target_size; |
tonyp@2973 | 3953 | if (partially) { |
ysr@777 | 3954 | target_size = _cm->partial_mark_stack_size_target(); |
tonyp@2973 | 3955 | } else { |
ysr@777 | 3956 | target_size = 0; |
tonyp@2973 | 3957 | } |
ysr@777 | 3958 | |
ysr@777 | 3959 | if (_cm->mark_stack_size() > target_size) { |
tonyp@2973 | 3960 | if (_cm->verbose_low()) { |
drchase@6680 | 3961 | gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, |
johnc@4173 | 3962 | _worker_id, target_size); |
tonyp@2973 | 3963 | } |
ysr@777 | 3964 | |
ysr@777 | 3965 | while (!has_aborted() && _cm->mark_stack_size() > target_size) { |
ysr@777 | 3966 | get_entries_from_global_stack(); |
ysr@777 | 3967 | drain_local_queue(partially); |
ysr@777 | 3968 | } |
ysr@777 | 3969 | |
tonyp@2973 | 3970 | if (_cm->verbose_low()) { |
drchase@6680 | 3971 | gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, |
johnc@4173 | 3972 | _worker_id, _cm->mark_stack_size()); |
tonyp@2973 | 3973 | } |
ysr@777 | 3974 | } |
ysr@777 | 3975 | } |
ysr@777 | 3976 | |
ysr@777 | 3977 | // SATB Queue has several assumptions on whether to call the par or |
ysr@777 | 3978 | // non-par versions of the methods. this is why some of the code is |
ysr@777 | 3979 | // replicated. We should really get rid of the single-threaded version |
ysr@777 | 3980 | // of the code to simplify things. |
ysr@777 | 3981 | void CMTask::drain_satb_buffers() { |
tonyp@2973 | 3982 | if (has_aborted()) return; |
ysr@777 | 3983 | |
ysr@777 | 3984 | // We set this so that the regular clock knows that we're in the |
ysr@777 | 3985 | // middle of draining buffers and doesn't set the abort flag when it |
ysr@777 | 3986 | // notices that SATB buffers are available for draining. It'd be |
ysr@777 | 3987 | // very counter productive if it did that. :-) |
ysr@777 | 3988 | _draining_satb_buffers = true; |
ysr@777 | 3989 | |
ysr@777 | 3990 | CMObjectClosure oc(this); |
ysr@777 | 3991 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
tonyp@2973 | 3992 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@4173 | 3993 | satb_mq_set.set_par_closure(_worker_id, &oc); |
tonyp@2973 | 3994 | } else { |
ysr@777 | 3995 | satb_mq_set.set_closure(&oc); |
tonyp@2973 | 3996 | } |
ysr@777 | 3997 | |
ysr@777 | 3998 | // This keeps claiming and applying the closure to completed buffers |
ysr@777 | 3999 | // until we run out of buffers or we need to abort. |
jmasa@2188 | 4000 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 4001 | while (!has_aborted() && |
johnc@4173 | 4002 | satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { |
tonyp@2973 | 4003 | if (_cm->verbose_medium()) { |
johnc@4173 | 4004 | gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); |
tonyp@2973 | 4005 | } |
ysr@777 | 4006 | statsOnly( ++_satb_buffers_processed ); |
ysr@777 | 4007 | regular_clock_call(); |
ysr@777 | 4008 | } |
ysr@777 | 4009 | } else { |
ysr@777 | 4010 | while (!has_aborted() && |
ysr@777 | 4011 | satb_mq_set.apply_closure_to_completed_buffer()) { |
tonyp@2973 | 4012 | if (_cm->verbose_medium()) { |
johnc@4173 | 4013 | gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); |
tonyp@2973 | 4014 | } |
ysr@777 | 4015 | statsOnly( ++_satb_buffers_processed ); |
ysr@777 | 4016 | regular_clock_call(); |
ysr@777 | 4017 | } |
ysr@777 | 4018 | } |
ysr@777 | 4019 | |
ysr@777 | 4020 | _draining_satb_buffers = false; |
ysr@777 | 4021 | |
tonyp@1458 | 4022 | assert(has_aborted() || |
tonyp@1458 | 4023 | concurrent() || |
tonyp@1458 | 4024 | satb_mq_set.completed_buffers_num() == 0, "invariant"); |
ysr@777 | 4025 | |
tonyp@2973 | 4026 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
johnc@4173 | 4027 | satb_mq_set.set_par_closure(_worker_id, NULL); |
tonyp@2973 | 4028 | } else { |
ysr@777 | 4029 | satb_mq_set.set_closure(NULL); |
tonyp@2973 | 4030 | } |
ysr@777 | 4031 | |
ysr@777 | 4032 | // again, this was a potentially expensive operation, decrease the |
ysr@777 | 4033 | // limits to get the regular clock call early |
ysr@777 | 4034 | decrease_limits(); |
ysr@777 | 4035 | } |
ysr@777 | 4036 | |
ysr@777 | 4037 | void CMTask::print_stats() { |
johnc@4173 | 4038 | gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", |
johnc@4173 | 4039 | _worker_id, _calls); |
ysr@777 | 4040 | gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", |
ysr@777 | 4041 | _elapsed_time_ms, _termination_time_ms); |
ysr@777 | 4042 | gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", |
ysr@777 | 4043 | _step_times_ms.num(), _step_times_ms.avg(), |
ysr@777 | 4044 | _step_times_ms.sd()); |
ysr@777 | 4045 | gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", |
ysr@777 | 4046 | _step_times_ms.maximum(), _step_times_ms.sum()); |
ysr@777 | 4047 | |
ysr@777 | 4048 | #if _MARKING_STATS_ |
ysr@777 | 4049 | gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", |
ysr@777 | 4050 | _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), |
ysr@777 | 4051 | _all_clock_intervals_ms.sd()); |
ysr@777 | 4052 | gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", |
ysr@777 | 4053 | _all_clock_intervals_ms.maximum(), |
ysr@777 | 4054 | _all_clock_intervals_ms.sum()); |
ysr@777 | 4055 | gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", |
ysr@777 | 4056 | _clock_due_to_scanning, _clock_due_to_marking); |
ysr@777 | 4057 | gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", |
ysr@777 | 4058 | _objs_scanned, _objs_found_on_bitmap); |
ysr@777 | 4059 | gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", |
ysr@777 | 4060 | _local_pushes, _local_pops, _local_max_size); |
ysr@777 | 4061 | gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", |
ysr@777 | 4062 | _global_pushes, _global_pops, _global_max_size); |
ysr@777 | 4063 | gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", |
ysr@777 | 4064 | _global_transfers_to,_global_transfers_from); |
tonyp@3691 | 4065 | gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); |
ysr@777 | 4066 | gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); |
ysr@777 | 4067 | gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", |
ysr@777 | 4068 | _steal_attempts, _steals); |
ysr@777 | 4069 | gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); |
ysr@777 | 4070 | gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", |
ysr@777 | 4071 | _aborted_overflow, _aborted_cm_aborted, _aborted_yield); |
ysr@777 | 4072 | gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", |
ysr@777 | 4073 | _aborted_timed_out, _aborted_satb, _aborted_termination); |
ysr@777 | 4074 | #endif // _MARKING_STATS_ |
ysr@777 | 4075 | } |
ysr@777 | 4076 | |
ysr@777 | 4077 | /***************************************************************************** |
ysr@777 | 4078 | |
johnc@4787 | 4079 | The do_marking_step(time_target_ms, ...) method is the building |
johnc@4787 | 4080 | block of the parallel marking framework. It can be called in parallel |
ysr@777 | 4081 | with other invocations of do_marking_step() on different tasks |
ysr@777 | 4082 | (but only one per task, obviously) and concurrently with the |
ysr@777 | 4083 | mutator threads, or during remark, hence it eliminates the need |
ysr@777 | 4084 | for two versions of the code. When called during remark, it will |
ysr@777 | 4085 | pick up from where the task left off during the concurrent marking |
ysr@777 | 4086 | phase. Interestingly, tasks are also claimable during evacuation |
ysr@777 | 4087 | pauses too, since do_marking_step() ensures that it aborts before |
ysr@777 | 4088 | it needs to yield. |
ysr@777 | 4089 | |
johnc@4787 | 4090 | The data structures that it uses to do marking work are the |
ysr@777 | 4091 | following: |
ysr@777 | 4092 | |
ysr@777 | 4093 | (1) Marking Bitmap. If there are gray objects that appear only |
ysr@777 | 4094 | on the bitmap (this happens either when dealing with an overflow |
ysr@777 | 4095 | or when the initial marking phase has simply marked the roots |
ysr@777 | 4096 | and didn't push them on the stack), then tasks claim heap |
ysr@777 | 4097 | regions whose bitmap they then scan to find gray objects. A |
ysr@777 | 4098 | global finger indicates where the end of the last claimed region |
ysr@777 | 4099 | is. A local finger indicates how far into the region a task has |
ysr@777 | 4100 | scanned. The two fingers are used to determine how to gray an |
ysr@777 | 4101 | object (i.e. whether simply marking it is OK, as it will be |
ysr@777 | 4102 | visited by a task in the future, or whether it needs to be also |
ysr@777 | 4103 | pushed on a stack). |
ysr@777 | 4104 | |
ysr@777 | 4105 | (2) Local Queue. The local queue of the task which is accessed |
ysr@777 | 4106 | reasonably efficiently by the task. Other tasks can steal from |
ysr@777 | 4107 | it when they run out of work. Throughout the marking phase, a |
ysr@777 | 4108 | task attempts to keep its local queue short but not totally |
ysr@777 | 4109 | empty, so that entries are available for stealing by other |
ysr@777 | 4110 | tasks. Only when there is no more work, a task will totally |
ysr@777 | 4111 | drain its local queue. |
ysr@777 | 4112 | |
ysr@777 | 4113 | (3) Global Mark Stack. This handles local queue overflow. During |
ysr@777 | 4114 | marking only sets of entries are moved between it and the local |
ysr@777 | 4115 | queues, as access to it requires a mutex and more fine-grain |
ysr@777 | 4116 | interaction with it which might cause contention. If it |
ysr@777 | 4117 | overflows, then the marking phase should restart and iterate |
ysr@777 | 4118 | over the bitmap to identify gray objects. Throughout the marking |
ysr@777 | 4119 | phase, tasks attempt to keep the global mark stack at a small |
ysr@777 | 4120 | length but not totally empty, so that entries are available for |
ysr@777 | 4121 | popping by other tasks. Only when there is no more work, tasks |
ysr@777 | 4122 | will totally drain the global mark stack. |
ysr@777 | 4123 | |
tonyp@3691 | 4124 | (4) SATB Buffer Queue. This is where completed SATB buffers are |
ysr@777 | 4125 | made available. Buffers are regularly removed from this queue |
ysr@777 | 4126 | and scanned for roots, so that the queue doesn't get too |
ysr@777 | 4127 | long. During remark, all completed buffers are processed, as |
ysr@777 | 4128 | well as the filled in parts of any uncompleted buffers. |
ysr@777 | 4129 | |
ysr@777 | 4130 | The do_marking_step() method tries to abort when the time target |
ysr@777 | 4131 | has been reached. There are a few other cases when the |
ysr@777 | 4132 | do_marking_step() method also aborts: |
ysr@777 | 4133 | |
ysr@777 | 4134 | (1) When the marking phase has been aborted (after a Full GC). |
ysr@777 | 4135 | |
tonyp@3691 | 4136 | (2) When a global overflow (on the global stack) has been |
tonyp@3691 | 4137 | triggered. Before the task aborts, it will actually sync up with |
tonyp@3691 | 4138 | the other tasks to ensure that all the marking data structures |
johnc@4788 | 4139 | (local queues, stacks, fingers etc.) are re-initialized so that |
tonyp@3691 | 4140 | when do_marking_step() completes, the marking phase can |
tonyp@3691 | 4141 | immediately restart. |
ysr@777 | 4142 | |
ysr@777 | 4143 | (3) When enough completed SATB buffers are available. The |
ysr@777 | 4144 | do_marking_step() method only tries to drain SATB buffers right |
ysr@777 | 4145 | at the beginning. So, if enough buffers are available, the |
ysr@777 | 4146 | marking step aborts and the SATB buffers are processed at |
ysr@777 | 4147 | the beginning of the next invocation. |
ysr@777 | 4148 | |
ysr@777 | 4149 | (4) To yield. when we have to yield then we abort and yield |
ysr@777 | 4150 | right at the end of do_marking_step(). This saves us from a lot |
ysr@777 | 4151 | of hassle as, by yielding we might allow a Full GC. If this |
ysr@777 | 4152 | happens then objects will be compacted underneath our feet, the |
ysr@777 | 4153 | heap might shrink, etc. We save checking for this by just |
ysr@777 | 4154 | aborting and doing the yield right at the end. |
ysr@777 | 4155 | |
ysr@777 | 4156 | From the above it follows that the do_marking_step() method should |
ysr@777 | 4157 | be called in a loop (or, otherwise, regularly) until it completes. |
ysr@777 | 4158 | |
ysr@777 | 4159 | If a marking step completes without its has_aborted() flag being |
ysr@777 | 4160 | true, it means it has completed the current marking phase (and |
ysr@777 | 4161 | also all other marking tasks have done so and have all synced up). |
ysr@777 | 4162 | |
ysr@777 | 4163 | A method called regular_clock_call() is invoked "regularly" (in |
ysr@777 | 4164 | sub ms intervals) throughout marking. It is this clock method that |
ysr@777 | 4165 | checks all the abort conditions which were mentioned above and |
ysr@777 | 4166 | decides when the task should abort. A work-based scheme is used to |
ysr@777 | 4167 | trigger this clock method: when the number of object words the |
ysr@777 | 4168 | marking phase has scanned or the number of references the marking |
ysr@777 | 4169 | phase has visited reach a given limit. Additional invocations to |
ysr@777 | 4170 | the method clock have been planted in a few other strategic places |
ysr@777 | 4171 | too. The initial reason for the clock method was to avoid calling |
ysr@777 | 4172 | vtime too regularly, as it is quite expensive. So, once it was in |
ysr@777 | 4173 | place, it was natural to piggy-back all the other conditions on it |
ysr@777 | 4174 | too and not constantly check them throughout the code. |
ysr@777 | 4175 | |
johnc@4787 | 4176 | If do_termination is true then do_marking_step will enter its |
johnc@4787 | 4177 | termination protocol. |
johnc@4787 | 4178 | |
johnc@4787 | 4179 | The value of is_serial must be true when do_marking_step is being |
johnc@4787 | 4180 | called serially (i.e. by the VMThread) and do_marking_step should |
johnc@4787 | 4181 | skip any synchronization in the termination and overflow code. |
johnc@4787 | 4182 | Examples include the serial remark code and the serial reference |
johnc@4787 | 4183 | processing closures. |
johnc@4787 | 4184 | |
johnc@4787 | 4185 | The value of is_serial must be false when do_marking_step is |
johnc@4787 | 4186 | being called by any of the worker threads in a work gang. |
johnc@4787 | 4187 | Examples include the concurrent marking code (CMMarkingTask), |
johnc@4787 | 4188 | the MT remark code, and the MT reference processing closures. |
johnc@4787 | 4189 | |
ysr@777 | 4190 | *****************************************************************************/ |
ysr@777 | 4191 | |
johnc@2494 | 4192 | void CMTask::do_marking_step(double time_target_ms, |
johnc@4787 | 4193 | bool do_termination, |
johnc@4787 | 4194 | bool is_serial) { |
tonyp@1458 | 4195 | assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); |
tonyp@1458 | 4196 | assert(concurrent() == _cm->concurrent(), "they should be the same"); |
tonyp@1458 | 4197 | |
ysr@777 | 4198 | G1CollectorPolicy* g1_policy = _g1h->g1_policy(); |
tonyp@1458 | 4199 | assert(_task_queues != NULL, "invariant"); |
tonyp@1458 | 4200 | assert(_task_queue != NULL, "invariant"); |
johnc@4173 | 4201 | assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); |
tonyp@1458 | 4202 | |
tonyp@1458 | 4203 | assert(!_claimed, |
tonyp@1458 | 4204 | "only one thread should claim this task at any one time"); |
ysr@777 | 4205 | |
ysr@777 | 4206 | // OK, this doesn't safeguard again all possible scenarios, as it is |
ysr@777 | 4207 | // possible for two threads to set the _claimed flag at the same |
ysr@777 | 4208 | // time. But it is only for debugging purposes anyway and it will |
ysr@777 | 4209 | // catch most problems. |
ysr@777 | 4210 | _claimed = true; |
ysr@777 | 4211 | |
ysr@777 | 4212 | _start_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4213 | statsOnly( _interval_start_time_ms = _start_time_ms ); |
ysr@777 | 4214 | |
johnc@4787 | 4215 | // If do_stealing is true then do_marking_step will attempt to |
johnc@4787 | 4216 | // steal work from the other CMTasks. It only makes sense to |
johnc@4787 | 4217 | // enable stealing when the termination protocol is enabled |
johnc@4787 | 4218 | // and do_marking_step() is not being called serially. |
johnc@4787 | 4219 | bool do_stealing = do_termination && !is_serial; |
johnc@4787 | 4220 | |
ysr@777 | 4221 | double diff_prediction_ms = |
ysr@777 | 4222 | g1_policy->get_new_prediction(&_marking_step_diffs_ms); |
ysr@777 | 4223 | _time_target_ms = time_target_ms - diff_prediction_ms; |
ysr@777 | 4224 | |
ysr@777 | 4225 | // set up the variables that are used in the work-based scheme to |
ysr@777 | 4226 | // call the regular clock method |
ysr@777 | 4227 | _words_scanned = 0; |
ysr@777 | 4228 | _refs_reached = 0; |
ysr@777 | 4229 | recalculate_limits(); |
ysr@777 | 4230 | |
ysr@777 | 4231 | // clear all flags |
ysr@777 | 4232 | clear_has_aborted(); |
johnc@2494 | 4233 | _has_timed_out = false; |
ysr@777 | 4234 | _draining_satb_buffers = false; |
ysr@777 | 4235 | |
ysr@777 | 4236 | ++_calls; |
ysr@777 | 4237 | |
tonyp@2973 | 4238 | if (_cm->verbose_low()) { |
johnc@4173 | 4239 | gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " |
ysr@777 | 4240 | "target = %1.2lfms >>>>>>>>>>", |
johnc@4173 | 4241 | _worker_id, _calls, _time_target_ms); |
tonyp@2973 | 4242 | } |
ysr@777 | 4243 | |
ysr@777 | 4244 | // Set up the bitmap and oop closures. Anything that uses them is |
ysr@777 | 4245 | // eventually called from this method, so it is OK to allocate these |
ysr@777 | 4246 | // statically. |
ysr@777 | 4247 | CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); |
tonyp@2968 | 4248 | G1CMOopClosure cm_oop_closure(_g1h, _cm, this); |
tonyp@2968 | 4249 | set_cm_oop_closure(&cm_oop_closure); |
ysr@777 | 4250 | |
ysr@777 | 4251 | if (_cm->has_overflown()) { |
tonyp@3691 | 4252 | // This can happen if the mark stack overflows during a GC pause |
tonyp@3691 | 4253 | // and this task, after a yield point, restarts. We have to abort |
tonyp@3691 | 4254 | // as we need to get into the overflow protocol which happens |
tonyp@3691 | 4255 | // right at the end of this task. |
ysr@777 | 4256 | set_has_aborted(); |
ysr@777 | 4257 | } |
ysr@777 | 4258 | |
ysr@777 | 4259 | // First drain any available SATB buffers. After this, we will not |
ysr@777 | 4260 | // look at SATB buffers before the next invocation of this method. |
ysr@777 | 4261 | // If enough completed SATB buffers are queued up, the regular clock |
ysr@777 | 4262 | // will abort this task so that it restarts. |
ysr@777 | 4263 | drain_satb_buffers(); |
ysr@777 | 4264 | // ...then partially drain the local queue and the global stack |
ysr@777 | 4265 | drain_local_queue(true); |
ysr@777 | 4266 | drain_global_stack(true); |
ysr@777 | 4267 | |
ysr@777 | 4268 | do { |
ysr@777 | 4269 | if (!has_aborted() && _curr_region != NULL) { |
ysr@777 | 4270 | // This means that we're already holding on to a region. |
tonyp@1458 | 4271 | assert(_finger != NULL, "if region is not NULL, then the finger " |
tonyp@1458 | 4272 | "should not be NULL either"); |
ysr@777 | 4273 | |
ysr@777 | 4274 | // We might have restarted this task after an evacuation pause |
ysr@777 | 4275 | // which might have evacuated the region we're holding on to |
ysr@777 | 4276 | // underneath our feet. Let's read its limit again to make sure |
ysr@777 | 4277 | // that we do not iterate over a region of the heap that |
ysr@777 | 4278 | // contains garbage (update_region_limit() will also move |
ysr@777 | 4279 | // _finger to the start of the region if it is found empty). |
ysr@777 | 4280 | update_region_limit(); |
ysr@777 | 4281 | // We will start from _finger not from the start of the region, |
ysr@777 | 4282 | // as we might be restarting this task after aborting half-way |
ysr@777 | 4283 | // through scanning this region. In this case, _finger points to |
ysr@777 | 4284 | // the address where we last found a marked object. If this is a |
ysr@777 | 4285 | // fresh region, _finger points to start(). |
ysr@777 | 4286 | MemRegion mr = MemRegion(_finger, _region_limit); |
ysr@777 | 4287 | |
tonyp@2973 | 4288 | if (_cm->verbose_low()) { |
johnc@4173 | 4289 | gclog_or_tty->print_cr("[%u] we're scanning part " |
ysr@777 | 4290 | "["PTR_FORMAT", "PTR_FORMAT") " |
johnc@4580 | 4291 | "of region "HR_FORMAT, |
drchase@6680 | 4292 | _worker_id, p2i(_finger), p2i(_region_limit), |
johnc@4580 | 4293 | HR_FORMAT_PARAMS(_curr_region)); |
tonyp@2973 | 4294 | } |
ysr@777 | 4295 | |
johnc@4580 | 4296 | assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), |
johnc@4580 | 4297 | "humongous regions should go around loop once only"); |
johnc@4580 | 4298 | |
johnc@4580 | 4299 | // Some special cases: |
johnc@4580 | 4300 | // If the memory region is empty, we can just give up the region. |
johnc@4580 | 4301 | // If the current region is humongous then we only need to check |
johnc@4580 | 4302 | // the bitmap for the bit associated with the start of the object, |
johnc@4580 | 4303 | // scan the object if it's live, and give up the region. |
johnc@4580 | 4304 | // Otherwise, let's iterate over the bitmap of the part of the region |
johnc@4580 | 4305 | // that is left. |
johnc@4575 | 4306 | // If the iteration is successful, give up the region. |
johnc@4580 | 4307 | if (mr.is_empty()) { |
johnc@4580 | 4308 | giveup_current_region(); |
johnc@4580 | 4309 | regular_clock_call(); |
johnc@4580 | 4310 | } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { |
johnc@4580 | 4311 | if (_nextMarkBitMap->isMarked(mr.start())) { |
johnc@4580 | 4312 | // The object is marked - apply the closure |
johnc@4580 | 4313 | BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); |
johnc@4580 | 4314 | bitmap_closure.do_bit(offset); |
johnc@4580 | 4315 | } |
johnc@4580 | 4316 | // Even if this task aborted while scanning the humongous object |
johnc@4580 | 4317 | // we can (and should) give up the current region. |
johnc@4580 | 4318 | giveup_current_region(); |
johnc@4580 | 4319 | regular_clock_call(); |
johnc@4580 | 4320 | } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { |
ysr@777 | 4321 | giveup_current_region(); |
ysr@777 | 4322 | regular_clock_call(); |
ysr@777 | 4323 | } else { |
tonyp@1458 | 4324 | assert(has_aborted(), "currently the only way to do so"); |
ysr@777 | 4325 | // The only way to abort the bitmap iteration is to return |
ysr@777 | 4326 | // false from the do_bit() method. However, inside the |
ysr@777 | 4327 | // do_bit() method we move the _finger to point to the |
ysr@777 | 4328 | // object currently being looked at. So, if we bail out, we |
ysr@777 | 4329 | // have definitely set _finger to something non-null. |
tonyp@1458 | 4330 | assert(_finger != NULL, "invariant"); |
ysr@777 | 4331 | |
ysr@777 | 4332 | // Region iteration was actually aborted. So now _finger |
ysr@777 | 4333 | // points to the address of the object we last scanned. If we |
ysr@777 | 4334 | // leave it there, when we restart this task, we will rescan |
ysr@777 | 4335 | // the object. It is easy to avoid this. We move the finger by |
ysr@777 | 4336 | // enough to point to the next possible object header (the |
ysr@777 | 4337 | // bitmap knows by how much we need to move it as it knows its |
ysr@777 | 4338 | // granularity). |
apetrusenko@1749 | 4339 | assert(_finger < _region_limit, "invariant"); |
tamao@4733 | 4340 | HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); |
apetrusenko@1749 | 4341 | // Check if bitmap iteration was aborted while scanning the last object |
apetrusenko@1749 | 4342 | if (new_finger >= _region_limit) { |
tonyp@3691 | 4343 | giveup_current_region(); |
apetrusenko@1749 | 4344 | } else { |
tonyp@3691 | 4345 | move_finger_to(new_finger); |
apetrusenko@1749 | 4346 | } |
ysr@777 | 4347 | } |
ysr@777 | 4348 | } |
ysr@777 | 4349 | // At this point we have either completed iterating over the |
ysr@777 | 4350 | // region we were holding on to, or we have aborted. |
ysr@777 | 4351 | |
ysr@777 | 4352 | // We then partially drain the local queue and the global stack. |
ysr@777 | 4353 | // (Do we really need this?) |
ysr@777 | 4354 | drain_local_queue(true); |
ysr@777 | 4355 | drain_global_stack(true); |
ysr@777 | 4356 | |
ysr@777 | 4357 | // Read the note on the claim_region() method on why it might |
ysr@777 | 4358 | // return NULL with potentially more regions available for |
ysr@777 | 4359 | // claiming and why we have to check out_of_regions() to determine |
ysr@777 | 4360 | // whether we're done or not. |
ysr@777 | 4361 | while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { |
ysr@777 | 4362 | // We are going to try to claim a new region. We should have |
ysr@777 | 4363 | // given up on the previous one. |
tonyp@1458 | 4364 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 4365 | assert(_curr_region == NULL, "invariant"); |
tonyp@1458 | 4366 | assert(_finger == NULL, "invariant"); |
tonyp@1458 | 4367 | assert(_region_limit == NULL, "invariant"); |
tonyp@2973 | 4368 | if (_cm->verbose_low()) { |
johnc@4173 | 4369 | gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); |
tonyp@2973 | 4370 | } |
johnc@4173 | 4371 | HeapRegion* claimed_region = _cm->claim_region(_worker_id); |
ysr@777 | 4372 | if (claimed_region != NULL) { |
ysr@777 | 4373 | // Yes, we managed to claim one |
ysr@777 | 4374 | statsOnly( ++_regions_claimed ); |
ysr@777 | 4375 | |
tonyp@2973 | 4376 | if (_cm->verbose_low()) { |
johnc@4173 | 4377 | gclog_or_tty->print_cr("[%u] we successfully claimed " |
ysr@777 | 4378 | "region "PTR_FORMAT, |
drchase@6680 | 4379 | _worker_id, p2i(claimed_region)); |
tonyp@2973 | 4380 | } |
ysr@777 | 4381 | |
ysr@777 | 4382 | setup_for_region(claimed_region); |
tonyp@1458 | 4383 | assert(_curr_region == claimed_region, "invariant"); |
ysr@777 | 4384 | } |
ysr@777 | 4385 | // It is important to call the regular clock here. It might take |
ysr@777 | 4386 | // a while to claim a region if, for example, we hit a large |
ysr@777 | 4387 | // block of empty regions. So we need to call the regular clock |
ysr@777 | 4388 | // method once round the loop to make sure it's called |
ysr@777 | 4389 | // frequently enough. |
ysr@777 | 4390 | regular_clock_call(); |
ysr@777 | 4391 | } |
ysr@777 | 4392 | |
ysr@777 | 4393 | if (!has_aborted() && _curr_region == NULL) { |
tonyp@1458 | 4394 | assert(_cm->out_of_regions(), |
tonyp@1458 | 4395 | "at this point we should be out of regions"); |
ysr@777 | 4396 | } |
ysr@777 | 4397 | } while ( _curr_region != NULL && !has_aborted()); |
ysr@777 | 4398 | |
ysr@777 | 4399 | if (!has_aborted()) { |
ysr@777 | 4400 | // We cannot check whether the global stack is empty, since other |
tonyp@3691 | 4401 | // tasks might be pushing objects to it concurrently. |
tonyp@1458 | 4402 | assert(_cm->out_of_regions(), |
tonyp@1458 | 4403 | "at this point we should be out of regions"); |
ysr@777 | 4404 | |
tonyp@2973 | 4405 | if (_cm->verbose_low()) { |
johnc@4173 | 4406 | gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); |
tonyp@2973 | 4407 | } |
ysr@777 | 4408 | |
ysr@777 | 4409 | // Try to reduce the number of available SATB buffers so that |
ysr@777 | 4410 | // remark has less work to do. |
ysr@777 | 4411 | drain_satb_buffers(); |
ysr@777 | 4412 | } |
ysr@777 | 4413 | |
ysr@777 | 4414 | // Since we've done everything else, we can now totally drain the |
ysr@777 | 4415 | // local queue and global stack. |
ysr@777 | 4416 | drain_local_queue(false); |
ysr@777 | 4417 | drain_global_stack(false); |
ysr@777 | 4418 | |
ysr@777 | 4419 | // Attempt at work stealing from other task's queues. |
johnc@2494 | 4420 | if (do_stealing && !has_aborted()) { |
ysr@777 | 4421 | // We have not aborted. This means that we have finished all that |
ysr@777 | 4422 | // we could. Let's try to do some stealing... |
ysr@777 | 4423 | |
ysr@777 | 4424 | // We cannot check whether the global stack is empty, since other |
tonyp@3691 | 4425 | // tasks might be pushing objects to it concurrently. |
tonyp@1458 | 4426 | assert(_cm->out_of_regions() && _task_queue->size() == 0, |
tonyp@1458 | 4427 | "only way to reach here"); |
ysr@777 | 4428 | |
tonyp@2973 | 4429 | if (_cm->verbose_low()) { |
johnc@4173 | 4430 | gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); |
tonyp@2973 | 4431 | } |
ysr@777 | 4432 | |
ysr@777 | 4433 | while (!has_aborted()) { |
ysr@777 | 4434 | oop obj; |
ysr@777 | 4435 | statsOnly( ++_steal_attempts ); |
ysr@777 | 4436 | |
johnc@4173 | 4437 | if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { |
tonyp@2973 | 4438 | if (_cm->verbose_medium()) { |
johnc@4173 | 4439 | gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", |
drchase@6680 | 4440 | _worker_id, p2i((void*) obj)); |
tonyp@2973 | 4441 | } |
ysr@777 | 4442 | |
ysr@777 | 4443 | statsOnly( ++_steals ); |
ysr@777 | 4444 | |
tonyp@1458 | 4445 | assert(_nextMarkBitMap->isMarked((HeapWord*) obj), |
tonyp@1458 | 4446 | "any stolen object should be marked"); |
ysr@777 | 4447 | scan_object(obj); |
ysr@777 | 4448 | |
ysr@777 | 4449 | // And since we're towards the end, let's totally drain the |
ysr@777 | 4450 | // local queue and global stack. |
ysr@777 | 4451 | drain_local_queue(false); |
ysr@777 | 4452 | drain_global_stack(false); |
ysr@777 | 4453 | } else { |
ysr@777 | 4454 | break; |
ysr@777 | 4455 | } |
ysr@777 | 4456 | } |
ysr@777 | 4457 | } |
ysr@777 | 4458 | |
tonyp@2848 | 4459 | // If we are about to wrap up and go into termination, check if we |
tonyp@2848 | 4460 | // should raise the overflow flag. |
tonyp@2848 | 4461 | if (do_termination && !has_aborted()) { |
tonyp@2848 | 4462 | if (_cm->force_overflow()->should_force()) { |
tonyp@2848 | 4463 | _cm->set_has_overflown(); |
tonyp@2848 | 4464 | regular_clock_call(); |
tonyp@2848 | 4465 | } |
tonyp@2848 | 4466 | } |
tonyp@2848 | 4467 | |
ysr@777 | 4468 | // We still haven't aborted. Now, let's try to get into the |
ysr@777 | 4469 | // termination protocol. |
johnc@2494 | 4470 | if (do_termination && !has_aborted()) { |
ysr@777 | 4471 | // We cannot check whether the global stack is empty, since other |
tonyp@3691 | 4472 | // tasks might be concurrently pushing objects on it. |
tonyp@1458 | 4473 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 4474 | assert(_cm->out_of_regions(), "only way to reach here"); |
tonyp@1458 | 4475 | assert(_task_queue->size() == 0, "only way to reach here"); |
ysr@777 | 4476 | |
tonyp@2973 | 4477 | if (_cm->verbose_low()) { |
johnc@4173 | 4478 | gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); |
tonyp@2973 | 4479 | } |
ysr@777 | 4480 | |
ysr@777 | 4481 | _termination_start_time_ms = os::elapsedVTime() * 1000.0; |
johnc@4787 | 4482 | |
ysr@777 | 4483 | // The CMTask class also extends the TerminatorTerminator class, |
ysr@777 | 4484 | // hence its should_exit_termination() method will also decide |
ysr@777 | 4485 | // whether to exit the termination protocol or not. |
johnc@4787 | 4486 | bool finished = (is_serial || |
johnc@4787 | 4487 | _cm->terminator()->offer_termination(this)); |
ysr@777 | 4488 | double termination_end_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4489 | _termination_time_ms += |
ysr@777 | 4490 | termination_end_time_ms - _termination_start_time_ms; |
ysr@777 | 4491 | |
ysr@777 | 4492 | if (finished) { |
ysr@777 | 4493 | // We're all done. |
ysr@777 | 4494 | |
johnc@4173 | 4495 | if (_worker_id == 0) { |
ysr@777 | 4496 | // let's allow task 0 to do this |
ysr@777 | 4497 | if (concurrent()) { |
tonyp@1458 | 4498 | assert(_cm->concurrent_marking_in_progress(), "invariant"); |
ysr@777 | 4499 | // we need to set this to false before the next |
ysr@777 | 4500 | // safepoint. This way we ensure that the marking phase |
ysr@777 | 4501 | // doesn't observe any more heap expansions. |
ysr@777 | 4502 | _cm->clear_concurrent_marking_in_progress(); |
ysr@777 | 4503 | } |
ysr@777 | 4504 | } |
ysr@777 | 4505 | |
ysr@777 | 4506 | // We can now guarantee that the global stack is empty, since |
tonyp@1458 | 4507 | // all other tasks have finished. We separated the guarantees so |
tonyp@1458 | 4508 | // that, if a condition is false, we can immediately find out |
tonyp@1458 | 4509 | // which one. |
tonyp@1458 | 4510 | guarantee(_cm->out_of_regions(), "only way to reach here"); |
tonyp@1458 | 4511 | guarantee(_cm->mark_stack_empty(), "only way to reach here"); |
tonyp@1458 | 4512 | guarantee(_task_queue->size() == 0, "only way to reach here"); |
tonyp@1458 | 4513 | guarantee(!_cm->has_overflown(), "only way to reach here"); |
tonyp@1458 | 4514 | guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); |
ysr@777 | 4515 | |
tonyp@2973 | 4516 | if (_cm->verbose_low()) { |
johnc@4173 | 4517 | gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); |
tonyp@2973 | 4518 | } |
ysr@777 | 4519 | } else { |
ysr@777 | 4520 | // Apparently there's more work to do. Let's abort this task. It |
ysr@777 | 4521 | // will restart it and we can hopefully find more things to do. |
ysr@777 | 4522 | |
tonyp@2973 | 4523 | if (_cm->verbose_low()) { |
johnc@4173 | 4524 | gclog_or_tty->print_cr("[%u] apparently there is more work to do", |
johnc@4173 | 4525 | _worker_id); |
tonyp@2973 | 4526 | } |
ysr@777 | 4527 | |
ysr@777 | 4528 | set_has_aborted(); |
ysr@777 | 4529 | statsOnly( ++_aborted_termination ); |
ysr@777 | 4530 | } |
ysr@777 | 4531 | } |
ysr@777 | 4532 | |
ysr@777 | 4533 | // Mainly for debugging purposes to make sure that a pointer to the |
ysr@777 | 4534 | // closure which was statically allocated in this frame doesn't |
ysr@777 | 4535 | // escape it by accident. |
tonyp@2968 | 4536 | set_cm_oop_closure(NULL); |
ysr@777 | 4537 | double end_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4538 | double elapsed_time_ms = end_time_ms - _start_time_ms; |
ysr@777 | 4539 | // Update the step history. |
ysr@777 | 4540 | _step_times_ms.add(elapsed_time_ms); |
ysr@777 | 4541 | |
ysr@777 | 4542 | if (has_aborted()) { |
ysr@777 | 4543 | // The task was aborted for some reason. |
ysr@777 | 4544 | |
ysr@777 | 4545 | statsOnly( ++_aborted ); |
ysr@777 | 4546 | |
johnc@2494 | 4547 | if (_has_timed_out) { |
ysr@777 | 4548 | double diff_ms = elapsed_time_ms - _time_target_ms; |
ysr@777 | 4549 | // Keep statistics of how well we did with respect to hitting |
ysr@777 | 4550 | // our target only if we actually timed out (if we aborted for |
ysr@777 | 4551 | // other reasons, then the results might get skewed). |
ysr@777 | 4552 | _marking_step_diffs_ms.add(diff_ms); |
ysr@777 | 4553 | } |
ysr@777 | 4554 | |
ysr@777 | 4555 | if (_cm->has_overflown()) { |
ysr@777 | 4556 | // This is the interesting one. We aborted because a global |
ysr@777 | 4557 | // overflow was raised. This means we have to restart the |
ysr@777 | 4558 | // marking phase and start iterating over regions. However, in |
ysr@777 | 4559 | // order to do this we have to make sure that all tasks stop |
ysr@777 | 4560 | // what they are doing and re-initialise in a safe manner. We |
ysr@777 | 4561 | // will achieve this with the use of two barrier sync points. |
ysr@777 | 4562 | |
tonyp@2973 | 4563 | if (_cm->verbose_low()) { |
johnc@4173 | 4564 | gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); |
tonyp@2973 | 4565 | } |
ysr@777 | 4566 | |
johnc@4787 | 4567 | if (!is_serial) { |
johnc@4787 | 4568 | // We only need to enter the sync barrier if being called |
johnc@4787 | 4569 | // from a parallel context |
johnc@4787 | 4570 | _cm->enter_first_sync_barrier(_worker_id); |
johnc@4787 | 4571 | |
johnc@4787 | 4572 | // When we exit this sync barrier we know that all tasks have |
johnc@4787 | 4573 | // stopped doing marking work. So, it's now safe to |
johnc@4787 | 4574 | // re-initialise our data structures. At the end of this method, |
johnc@4787 | 4575 | // task 0 will clear the global data structures. |
johnc@4787 | 4576 | } |
ysr@777 | 4577 | |
ysr@777 | 4578 | statsOnly( ++_aborted_overflow ); |
ysr@777 | 4579 | |
ysr@777 | 4580 | // We clear the local state of this task... |
ysr@777 | 4581 | clear_region_fields(); |
ysr@777 | 4582 | |
johnc@4787 | 4583 | if (!is_serial) { |
johnc@4787 | 4584 | // ...and enter the second barrier. |
johnc@4787 | 4585 | _cm->enter_second_sync_barrier(_worker_id); |
johnc@4787 | 4586 | } |
johnc@4788 | 4587 | // At this point, if we're during the concurrent phase of |
johnc@4788 | 4588 | // marking, everything has been re-initialized and we're |
ysr@777 | 4589 | // ready to restart. |
ysr@777 | 4590 | } |
ysr@777 | 4591 | |
ysr@777 | 4592 | if (_cm->verbose_low()) { |
johnc@4173 | 4593 | gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " |
ysr@777 | 4594 | "elapsed = %1.2lfms <<<<<<<<<<", |
johnc@4173 | 4595 | _worker_id, _time_target_ms, elapsed_time_ms); |
tonyp@2973 | 4596 | if (_cm->has_aborted()) { |
johnc@4173 | 4597 | gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", |
johnc@4173 | 4598 | _worker_id); |
tonyp@2973 | 4599 | } |
ysr@777 | 4600 | } |
ysr@777 | 4601 | } else { |
tonyp@2973 | 4602 | if (_cm->verbose_low()) { |
johnc@4173 | 4603 | gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " |
ysr@777 | 4604 | "elapsed = %1.2lfms <<<<<<<<<<", |
johnc@4173 | 4605 | _worker_id, _time_target_ms, elapsed_time_ms); |
tonyp@2973 | 4606 | } |
ysr@777 | 4607 | } |
ysr@777 | 4608 | |
ysr@777 | 4609 | _claimed = false; |
ysr@777 | 4610 | } |
ysr@777 | 4611 | |
johnc@4173 | 4612 | CMTask::CMTask(uint worker_id, |
ysr@777 | 4613 | ConcurrentMark* cm, |
johnc@3463 | 4614 | size_t* marked_bytes, |
johnc@3463 | 4615 | BitMap* card_bm, |
ysr@777 | 4616 | CMTaskQueue* task_queue, |
ysr@777 | 4617 | CMTaskQueueSet* task_queues) |
ysr@777 | 4618 | : _g1h(G1CollectedHeap::heap()), |
johnc@4173 | 4619 | _worker_id(worker_id), _cm(cm), |
ysr@777 | 4620 | _claimed(false), |
ysr@777 | 4621 | _nextMarkBitMap(NULL), _hash_seed(17), |
ysr@777 | 4622 | _task_queue(task_queue), |
ysr@777 | 4623 | _task_queues(task_queues), |
tonyp@2968 | 4624 | _cm_oop_closure(NULL), |
johnc@3463 | 4625 | _marked_bytes_array(marked_bytes), |
johnc@3463 | 4626 | _card_bm(card_bm) { |
tonyp@1458 | 4627 | guarantee(task_queue != NULL, "invariant"); |
tonyp@1458 | 4628 | guarantee(task_queues != NULL, "invariant"); |
ysr@777 | 4629 | |
ysr@777 | 4630 | statsOnly( _clock_due_to_scanning = 0; |
ysr@777 | 4631 | _clock_due_to_marking = 0 ); |
ysr@777 | 4632 | |
ysr@777 | 4633 | _marking_step_diffs_ms.add(0.5); |
ysr@777 | 4634 | } |
tonyp@2717 | 4635 | |
tonyp@2717 | 4636 | // These are formatting macros that are used below to ensure |
tonyp@2717 | 4637 | // consistent formatting. The *_H_* versions are used to format the |
tonyp@2717 | 4638 | // header for a particular value and they should be kept consistent |
tonyp@2717 | 4639 | // with the corresponding macro. Also note that most of the macros add |
tonyp@2717 | 4640 | // the necessary white space (as a prefix) which makes them a bit |
tonyp@2717 | 4641 | // easier to compose. |
tonyp@2717 | 4642 | |
tonyp@2717 | 4643 | // All the output lines are prefixed with this string to be able to |
tonyp@2717 | 4644 | // identify them easily in a large log file. |
tonyp@2717 | 4645 | #define G1PPRL_LINE_PREFIX "###" |
tonyp@2717 | 4646 | |
tonyp@2717 | 4647 | #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT |
tonyp@2717 | 4648 | #ifdef _LP64 |
tonyp@2717 | 4649 | #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" |
tonyp@2717 | 4650 | #else // _LP64 |
tonyp@2717 | 4651 | #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" |
tonyp@2717 | 4652 | #endif // _LP64 |
tonyp@2717 | 4653 | |
tonyp@2717 | 4654 | // For per-region info |
tonyp@2717 | 4655 | #define G1PPRL_TYPE_FORMAT " %-4s" |
tonyp@2717 | 4656 | #define G1PPRL_TYPE_H_FORMAT " %4s" |
tonyp@2717 | 4657 | #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) |
tonyp@2717 | 4658 | #define G1PPRL_BYTE_H_FORMAT " %9s" |
tonyp@2717 | 4659 | #define G1PPRL_DOUBLE_FORMAT " %14.1f" |
tonyp@2717 | 4660 | #define G1PPRL_DOUBLE_H_FORMAT " %14s" |
tonyp@2717 | 4661 | |
tonyp@2717 | 4662 | // For summary info |
tonyp@2717 | 4663 | #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT |
tonyp@2717 | 4664 | #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT |
tonyp@2717 | 4665 | #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" |
tonyp@2717 | 4666 | #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" |
tonyp@2717 | 4667 | |
tonyp@2717 | 4668 | G1PrintRegionLivenessInfoClosure:: |
tonyp@2717 | 4669 | G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) |
tonyp@2717 | 4670 | : _out(out), |
tonyp@2717 | 4671 | _total_used_bytes(0), _total_capacity_bytes(0), |
tonyp@2717 | 4672 | _total_prev_live_bytes(0), _total_next_live_bytes(0), |
tonyp@2717 | 4673 | _hum_used_bytes(0), _hum_capacity_bytes(0), |
tschatzl@5122 | 4674 | _hum_prev_live_bytes(0), _hum_next_live_bytes(0), |
johnc@5548 | 4675 | _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { |
tonyp@2717 | 4676 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2717 | 4677 | MemRegion g1_committed = g1h->g1_committed(); |
tonyp@2717 | 4678 | MemRegion g1_reserved = g1h->g1_reserved(); |
tonyp@2717 | 4679 | double now = os::elapsedTime(); |
tonyp@2717 | 4680 | |
tonyp@2717 | 4681 | // Print the header of the output. |
tonyp@2717 | 4682 | _out->cr(); |
tonyp@2717 | 4683 | _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); |
tonyp@2717 | 4684 | _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" |
tonyp@2717 | 4685 | G1PPRL_SUM_ADDR_FORMAT("committed") |
tonyp@2717 | 4686 | G1PPRL_SUM_ADDR_FORMAT("reserved") |
tonyp@2717 | 4687 | G1PPRL_SUM_BYTE_FORMAT("region-size"), |
drchase@6680 | 4688 | p2i(g1_committed.start()), p2i(g1_committed.end()), |
drchase@6680 | 4689 | p2i(g1_reserved.start()), p2i(g1_reserved.end()), |
johnc@3182 | 4690 | HeapRegion::GrainBytes); |
tonyp@2717 | 4691 | _out->print_cr(G1PPRL_LINE_PREFIX); |
tonyp@2717 | 4692 | _out->print_cr(G1PPRL_LINE_PREFIX |
tschatzl@5122 | 4693 | G1PPRL_TYPE_H_FORMAT |
tschatzl@5122 | 4694 | G1PPRL_ADDR_BASE_H_FORMAT |
tschatzl@5122 | 4695 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4696 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4697 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4698 | G1PPRL_DOUBLE_H_FORMAT |
johnc@5548 | 4699 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4700 | G1PPRL_BYTE_H_FORMAT, |
tschatzl@5122 | 4701 | "type", "address-range", |
johnc@5548 | 4702 | "used", "prev-live", "next-live", "gc-eff", |
johnc@5548 | 4703 | "remset", "code-roots"); |
johnc@3173 | 4704 | _out->print_cr(G1PPRL_LINE_PREFIX |
tschatzl@5122 | 4705 | G1PPRL_TYPE_H_FORMAT |
tschatzl@5122 | 4706 | G1PPRL_ADDR_BASE_H_FORMAT |
tschatzl@5122 | 4707 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4708 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4709 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4710 | G1PPRL_DOUBLE_H_FORMAT |
johnc@5548 | 4711 | G1PPRL_BYTE_H_FORMAT |
tschatzl@5122 | 4712 | G1PPRL_BYTE_H_FORMAT, |
tschatzl@5122 | 4713 | "", "", |
johnc@5548 | 4714 | "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", |
johnc@5548 | 4715 | "(bytes)", "(bytes)"); |
tonyp@2717 | 4716 | } |
tonyp@2717 | 4717 | |
tonyp@2717 | 4718 | // It takes as a parameter a reference to one of the _hum_* fields, it |
tonyp@2717 | 4719 | // deduces the corresponding value for a region in a humongous region |
tonyp@2717 | 4720 | // series (either the region size, or what's left if the _hum_* field |
tonyp@2717 | 4721 | // is < the region size), and updates the _hum_* field accordingly. |
tonyp@2717 | 4722 | size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { |
tonyp@2717 | 4723 | size_t bytes = 0; |
tonyp@2717 | 4724 | // The > 0 check is to deal with the prev and next live bytes which |
tonyp@2717 | 4725 | // could be 0. |
tonyp@2717 | 4726 | if (*hum_bytes > 0) { |
johnc@3182 | 4727 | bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); |
tonyp@2717 | 4728 | *hum_bytes -= bytes; |
tonyp@2717 | 4729 | } |
tonyp@2717 | 4730 | return bytes; |
tonyp@2717 | 4731 | } |
tonyp@2717 | 4732 | |
tonyp@2717 | 4733 | // It deduces the values for a region in a humongous region series |
tonyp@2717 | 4734 | // from the _hum_* fields and updates those accordingly. It assumes |
tonyp@2717 | 4735 | // that that _hum_* fields have already been set up from the "starts |
tonyp@2717 | 4736 | // humongous" region and we visit the regions in address order. |
tonyp@2717 | 4737 | void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, |
tonyp@2717 | 4738 | size_t* capacity_bytes, |
tonyp@2717 | 4739 | size_t* prev_live_bytes, |
tonyp@2717 | 4740 | size_t* next_live_bytes) { |
tonyp@2717 | 4741 | assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); |
tonyp@2717 | 4742 | *used_bytes = get_hum_bytes(&_hum_used_bytes); |
tonyp@2717 | 4743 | *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); |
tonyp@2717 | 4744 | *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); |
tonyp@2717 | 4745 | *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); |
tonyp@2717 | 4746 | } |
tonyp@2717 | 4747 | |
tonyp@2717 | 4748 | bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { |
tonyp@2717 | 4749 | const char* type = ""; |
tonyp@2717 | 4750 | HeapWord* bottom = r->bottom(); |
tonyp@2717 | 4751 | HeapWord* end = r->end(); |
tonyp@2717 | 4752 | size_t capacity_bytes = r->capacity(); |
tonyp@2717 | 4753 | size_t used_bytes = r->used(); |
tonyp@2717 | 4754 | size_t prev_live_bytes = r->live_bytes(); |
tonyp@2717 | 4755 | size_t next_live_bytes = r->next_live_bytes(); |
tonyp@2717 | 4756 | double gc_eff = r->gc_efficiency(); |
tschatzl@5122 | 4757 | size_t remset_bytes = r->rem_set()->mem_size(); |
johnc@5548 | 4758 | size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); |
johnc@5548 | 4759 | |
tonyp@2717 | 4760 | if (r->used() == 0) { |
tonyp@2717 | 4761 | type = "FREE"; |
tonyp@2717 | 4762 | } else if (r->is_survivor()) { |
tonyp@2717 | 4763 | type = "SURV"; |
tonyp@2717 | 4764 | } else if (r->is_young()) { |
tonyp@2717 | 4765 | type = "EDEN"; |
tonyp@2717 | 4766 | } else if (r->startsHumongous()) { |
tonyp@2717 | 4767 | type = "HUMS"; |
tonyp@2717 | 4768 | |
tonyp@2717 | 4769 | assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && |
tonyp@2717 | 4770 | _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, |
tonyp@2717 | 4771 | "they should have been zeroed after the last time we used them"); |
tonyp@2717 | 4772 | // Set up the _hum_* fields. |
tonyp@2717 | 4773 | _hum_capacity_bytes = capacity_bytes; |
tonyp@2717 | 4774 | _hum_used_bytes = used_bytes; |
tonyp@2717 | 4775 | _hum_prev_live_bytes = prev_live_bytes; |
tonyp@2717 | 4776 | _hum_next_live_bytes = next_live_bytes; |
tonyp@2717 | 4777 | get_hum_bytes(&used_bytes, &capacity_bytes, |
tonyp@2717 | 4778 | &prev_live_bytes, &next_live_bytes); |
tonyp@2717 | 4779 | end = bottom + HeapRegion::GrainWords; |
tonyp@2717 | 4780 | } else if (r->continuesHumongous()) { |
tonyp@2717 | 4781 | type = "HUMC"; |
tonyp@2717 | 4782 | get_hum_bytes(&used_bytes, &capacity_bytes, |
tonyp@2717 | 4783 | &prev_live_bytes, &next_live_bytes); |
tonyp@2717 | 4784 | assert(end == bottom + HeapRegion::GrainWords, "invariant"); |
tonyp@2717 | 4785 | } else { |
tonyp@2717 | 4786 | type = "OLD"; |
tonyp@2717 | 4787 | } |
tonyp@2717 | 4788 | |
tonyp@2717 | 4789 | _total_used_bytes += used_bytes; |
tonyp@2717 | 4790 | _total_capacity_bytes += capacity_bytes; |
tonyp@2717 | 4791 | _total_prev_live_bytes += prev_live_bytes; |
tonyp@2717 | 4792 | _total_next_live_bytes += next_live_bytes; |
tschatzl@5122 | 4793 | _total_remset_bytes += remset_bytes; |
johnc@5548 | 4794 | _total_strong_code_roots_bytes += strong_code_roots_bytes; |
tonyp@2717 | 4795 | |
tonyp@2717 | 4796 | // Print a line for this particular region. |
tonyp@2717 | 4797 | _out->print_cr(G1PPRL_LINE_PREFIX |
tonyp@2717 | 4798 | G1PPRL_TYPE_FORMAT |
tonyp@2717 | 4799 | G1PPRL_ADDR_BASE_FORMAT |
tonyp@2717 | 4800 | G1PPRL_BYTE_FORMAT |
tonyp@2717 | 4801 | G1PPRL_BYTE_FORMAT |
tonyp@2717 | 4802 | G1PPRL_BYTE_FORMAT |
tschatzl@5122 | 4803 | G1PPRL_DOUBLE_FORMAT |
johnc@5548 | 4804 | G1PPRL_BYTE_FORMAT |
tschatzl@5122 | 4805 | G1PPRL_BYTE_FORMAT, |
drchase@6680 | 4806 | type, p2i(bottom), p2i(end), |
johnc@5548 | 4807 | used_bytes, prev_live_bytes, next_live_bytes, gc_eff, |
johnc@5548 | 4808 | remset_bytes, strong_code_roots_bytes); |
tonyp@2717 | 4809 | |
tonyp@2717 | 4810 | return false; |
tonyp@2717 | 4811 | } |
tonyp@2717 | 4812 | |
tonyp@2717 | 4813 | G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { |
tschatzl@5122 | 4814 | // add static memory usages to remembered set sizes |
tschatzl@5122 | 4815 | _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); |
tonyp@2717 | 4816 | // Print the footer of the output. |
tonyp@2717 | 4817 | _out->print_cr(G1PPRL_LINE_PREFIX); |
tonyp@2717 | 4818 | _out->print_cr(G1PPRL_LINE_PREFIX |
tonyp@2717 | 4819 | " SUMMARY" |
tonyp@2717 | 4820 | G1PPRL_SUM_MB_FORMAT("capacity") |
tonyp@2717 | 4821 | G1PPRL_SUM_MB_PERC_FORMAT("used") |
tonyp@2717 | 4822 | G1PPRL_SUM_MB_PERC_FORMAT("prev-live") |
tschatzl@5122 | 4823 | G1PPRL_SUM_MB_PERC_FORMAT("next-live") |
johnc@5548 | 4824 | G1PPRL_SUM_MB_FORMAT("remset") |
johnc@5548 | 4825 | G1PPRL_SUM_MB_FORMAT("code-roots"), |
tonyp@2717 | 4826 | bytes_to_mb(_total_capacity_bytes), |
tonyp@2717 | 4827 | bytes_to_mb(_total_used_bytes), |
tonyp@2717 | 4828 | perc(_total_used_bytes, _total_capacity_bytes), |
tonyp@2717 | 4829 | bytes_to_mb(_total_prev_live_bytes), |
tonyp@2717 | 4830 | perc(_total_prev_live_bytes, _total_capacity_bytes), |
tonyp@2717 | 4831 | bytes_to_mb(_total_next_live_bytes), |
tschatzl@5122 | 4832 | perc(_total_next_live_bytes, _total_capacity_bytes), |
johnc@5548 | 4833 | bytes_to_mb(_total_remset_bytes), |
johnc@5548 | 4834 | bytes_to_mb(_total_strong_code_roots_bytes)); |
tonyp@2717 | 4835 | _out->cr(); |
tonyp@2717 | 4836 | } |