Wed, 07 Sep 2011 12:21:23 -0400
7050392: G1: Introduce flag to generate a log of the G1 ergonomic decisions
Summary: It introduces ergonomic decision logging in G1 for the following heuristics: heap sizing, collection set construction, concurrent cycle initiation, and partially-young GC start/end. The code has a bit of refactoring in a few places to make the decision logging possible. It also replaces alternative ad-hoc logging that we have under different parameters and switches (G1_DEBUG, G1PolicyVerbose).
Reviewed-by: johnc, ysr
ysr@777 | 1 | /* |
tonyp@2469 | 2 | * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/symbolTable.hpp" |
tonyp@2968 | 27 | #include "gc_implementation/g1/concurrentMark.inline.hpp" |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 30 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
tonyp@3114 | 31 | #include "gc_implementation/g1/g1ErgoVerbose.hpp" |
tonyp@2968 | 32 | #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
stefank@2314 | 33 | #include "gc_implementation/g1/g1RemSet.hpp" |
stefank@2314 | 34 | #include "gc_implementation/g1/heapRegionRemSet.hpp" |
stefank@2314 | 35 | #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
kamg@2445 | 36 | #include "gc_implementation/shared/vmGCOperations.hpp" |
stefank@2314 | 37 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 38 | #include "memory/referencePolicy.hpp" |
stefank@2314 | 39 | #include "memory/resourceArea.hpp" |
stefank@2314 | 40 | #include "oops/oop.inline.hpp" |
stefank@2314 | 41 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 42 | #include "runtime/java.hpp" |
ysr@777 | 43 | |
ysr@777 | 44 | // |
ysr@777 | 45 | // CMS Bit Map Wrapper |
ysr@777 | 46 | |
ysr@777 | 47 | CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter): |
ysr@777 | 48 | _bm((uintptr_t*)NULL,0), |
ysr@777 | 49 | _shifter(shifter) { |
ysr@777 | 50 | _bmStartWord = (HeapWord*)(rs.base()); |
ysr@777 | 51 | _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes |
ysr@777 | 52 | ReservedSpace brs(ReservedSpace::allocation_align_size_up( |
ysr@777 | 53 | (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); |
ysr@777 | 54 | |
ysr@777 | 55 | guarantee(brs.is_reserved(), "couldn't allocate CMS bit map"); |
ysr@777 | 56 | // For now we'll just commit all of the bit map up fromt. |
ysr@777 | 57 | // Later on we'll try to be more parsimonious with swap. |
ysr@777 | 58 | guarantee(_virtual_space.initialize(brs, brs.size()), |
ysr@777 | 59 | "couldn't reseve backing store for CMS bit map"); |
ysr@777 | 60 | assert(_virtual_space.committed_size() == brs.size(), |
ysr@777 | 61 | "didn't reserve backing store for all of CMS bit map?"); |
ysr@777 | 62 | _bm.set_map((uintptr_t*)_virtual_space.low()); |
ysr@777 | 63 | assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= |
ysr@777 | 64 | _bmWordSize, "inconsistency in bit map sizing"); |
ysr@777 | 65 | _bm.set_size(_bmWordSize >> _shifter); |
ysr@777 | 66 | } |
ysr@777 | 67 | |
ysr@777 | 68 | HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, |
ysr@777 | 69 | HeapWord* limit) const { |
ysr@777 | 70 | // First we must round addr *up* to a possible object boundary. |
ysr@777 | 71 | addr = (HeapWord*)align_size_up((intptr_t)addr, |
ysr@777 | 72 | HeapWordSize << _shifter); |
ysr@777 | 73 | size_t addrOffset = heapWordToOffset(addr); |
tonyp@2973 | 74 | if (limit == NULL) { |
tonyp@2973 | 75 | limit = _bmStartWord + _bmWordSize; |
tonyp@2973 | 76 | } |
ysr@777 | 77 | size_t limitOffset = heapWordToOffset(limit); |
ysr@777 | 78 | size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); |
ysr@777 | 79 | HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
ysr@777 | 80 | assert(nextAddr >= addr, "get_next_one postcondition"); |
ysr@777 | 81 | assert(nextAddr == limit || isMarked(nextAddr), |
ysr@777 | 82 | "get_next_one postcondition"); |
ysr@777 | 83 | return nextAddr; |
ysr@777 | 84 | } |
ysr@777 | 85 | |
ysr@777 | 86 | HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, |
ysr@777 | 87 | HeapWord* limit) const { |
ysr@777 | 88 | size_t addrOffset = heapWordToOffset(addr); |
tonyp@2973 | 89 | if (limit == NULL) { |
tonyp@2973 | 90 | limit = _bmStartWord + _bmWordSize; |
tonyp@2973 | 91 | } |
ysr@777 | 92 | size_t limitOffset = heapWordToOffset(limit); |
ysr@777 | 93 | size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); |
ysr@777 | 94 | HeapWord* nextAddr = offsetToHeapWord(nextOffset); |
ysr@777 | 95 | assert(nextAddr >= addr, "get_next_one postcondition"); |
ysr@777 | 96 | assert(nextAddr == limit || !isMarked(nextAddr), |
ysr@777 | 97 | "get_next_one postcondition"); |
ysr@777 | 98 | return nextAddr; |
ysr@777 | 99 | } |
ysr@777 | 100 | |
ysr@777 | 101 | int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { |
ysr@777 | 102 | assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); |
ysr@777 | 103 | return (int) (diff >> _shifter); |
ysr@777 | 104 | } |
ysr@777 | 105 | |
ysr@777 | 106 | bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { |
ysr@777 | 107 | HeapWord* left = MAX2(_bmStartWord, mr.start()); |
ysr@777 | 108 | HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end()); |
ysr@777 | 109 | if (right > left) { |
ysr@777 | 110 | // Right-open interval [leftOffset, rightOffset). |
ysr@777 | 111 | return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); |
ysr@777 | 112 | } else { |
ysr@777 | 113 | return true; |
ysr@777 | 114 | } |
ysr@777 | 115 | } |
ysr@777 | 116 | |
ysr@777 | 117 | void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, |
ysr@777 | 118 | size_t from_start_index, |
ysr@777 | 119 | HeapWord* to_start_word, |
ysr@777 | 120 | size_t word_num) { |
ysr@777 | 121 | _bm.mostly_disjoint_range_union(from_bitmap, |
ysr@777 | 122 | from_start_index, |
ysr@777 | 123 | heapWordToOffset(to_start_word), |
ysr@777 | 124 | word_num); |
ysr@777 | 125 | } |
ysr@777 | 126 | |
ysr@777 | 127 | #ifndef PRODUCT |
ysr@777 | 128 | bool CMBitMapRO::covers(ReservedSpace rs) const { |
ysr@777 | 129 | // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); |
kvn@1080 | 130 | assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize, |
ysr@777 | 131 | "size inconsistency"); |
ysr@777 | 132 | return _bmStartWord == (HeapWord*)(rs.base()) && |
ysr@777 | 133 | _bmWordSize == rs.size()>>LogHeapWordSize; |
ysr@777 | 134 | } |
ysr@777 | 135 | #endif |
ysr@777 | 136 | |
ysr@777 | 137 | void CMBitMap::clearAll() { |
ysr@777 | 138 | _bm.clear(); |
ysr@777 | 139 | return; |
ysr@777 | 140 | } |
ysr@777 | 141 | |
ysr@777 | 142 | void CMBitMap::markRange(MemRegion mr) { |
ysr@777 | 143 | mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); |
ysr@777 | 144 | assert(!mr.is_empty(), "unexpected empty region"); |
ysr@777 | 145 | assert((offsetToHeapWord(heapWordToOffset(mr.end())) == |
ysr@777 | 146 | ((HeapWord *) mr.end())), |
ysr@777 | 147 | "markRange memory region end is not card aligned"); |
ysr@777 | 148 | // convert address range into offset range |
ysr@777 | 149 | _bm.at_put_range(heapWordToOffset(mr.start()), |
ysr@777 | 150 | heapWordToOffset(mr.end()), true); |
ysr@777 | 151 | } |
ysr@777 | 152 | |
ysr@777 | 153 | void CMBitMap::clearRange(MemRegion mr) { |
ysr@777 | 154 | mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); |
ysr@777 | 155 | assert(!mr.is_empty(), "unexpected empty region"); |
ysr@777 | 156 | // convert address range into offset range |
ysr@777 | 157 | _bm.at_put_range(heapWordToOffset(mr.start()), |
ysr@777 | 158 | heapWordToOffset(mr.end()), false); |
ysr@777 | 159 | } |
ysr@777 | 160 | |
ysr@777 | 161 | MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, |
ysr@777 | 162 | HeapWord* end_addr) { |
ysr@777 | 163 | HeapWord* start = getNextMarkedWordAddress(addr); |
ysr@777 | 164 | start = MIN2(start, end_addr); |
ysr@777 | 165 | HeapWord* end = getNextUnmarkedWordAddress(start); |
ysr@777 | 166 | end = MIN2(end, end_addr); |
ysr@777 | 167 | assert(start <= end, "Consistency check"); |
ysr@777 | 168 | MemRegion mr(start, end); |
ysr@777 | 169 | if (!mr.is_empty()) { |
ysr@777 | 170 | clearRange(mr); |
ysr@777 | 171 | } |
ysr@777 | 172 | return mr; |
ysr@777 | 173 | } |
ysr@777 | 174 | |
ysr@777 | 175 | CMMarkStack::CMMarkStack(ConcurrentMark* cm) : |
ysr@777 | 176 | _base(NULL), _cm(cm) |
ysr@777 | 177 | #ifdef ASSERT |
ysr@777 | 178 | , _drain_in_progress(false) |
ysr@777 | 179 | , _drain_in_progress_yields(false) |
ysr@777 | 180 | #endif |
ysr@777 | 181 | {} |
ysr@777 | 182 | |
ysr@777 | 183 | void CMMarkStack::allocate(size_t size) { |
ysr@777 | 184 | _base = NEW_C_HEAP_ARRAY(oop, size); |
tonyp@2973 | 185 | if (_base == NULL) { |
ysr@777 | 186 | vm_exit_during_initialization("Failed to allocate " |
ysr@777 | 187 | "CM region mark stack"); |
tonyp@2973 | 188 | } |
ysr@777 | 189 | _index = 0; |
ysr@777 | 190 | _capacity = (jint) size; |
ysr@777 | 191 | _oops_do_bound = -1; |
ysr@777 | 192 | NOT_PRODUCT(_max_depth = 0); |
ysr@777 | 193 | } |
ysr@777 | 194 | |
ysr@777 | 195 | CMMarkStack::~CMMarkStack() { |
tonyp@2973 | 196 | if (_base != NULL) { |
tonyp@2973 | 197 | FREE_C_HEAP_ARRAY(oop, _base); |
tonyp@2973 | 198 | } |
ysr@777 | 199 | } |
ysr@777 | 200 | |
ysr@777 | 201 | void CMMarkStack::par_push(oop ptr) { |
ysr@777 | 202 | while (true) { |
ysr@777 | 203 | if (isFull()) { |
ysr@777 | 204 | _overflow = true; |
ysr@777 | 205 | return; |
ysr@777 | 206 | } |
ysr@777 | 207 | // Otherwise... |
ysr@777 | 208 | jint index = _index; |
ysr@777 | 209 | jint next_index = index+1; |
ysr@777 | 210 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 211 | if (res == index) { |
ysr@777 | 212 | _base[index] = ptr; |
ysr@777 | 213 | // Note that we don't maintain this atomically. We could, but it |
ysr@777 | 214 | // doesn't seem necessary. |
ysr@777 | 215 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
ysr@777 | 216 | return; |
ysr@777 | 217 | } |
ysr@777 | 218 | // Otherwise, we need to try again. |
ysr@777 | 219 | } |
ysr@777 | 220 | } |
ysr@777 | 221 | |
ysr@777 | 222 | void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { |
ysr@777 | 223 | while (true) { |
ysr@777 | 224 | if (isFull()) { |
ysr@777 | 225 | _overflow = true; |
ysr@777 | 226 | return; |
ysr@777 | 227 | } |
ysr@777 | 228 | // Otherwise... |
ysr@777 | 229 | jint index = _index; |
ysr@777 | 230 | jint next_index = index + n; |
ysr@777 | 231 | if (next_index > _capacity) { |
ysr@777 | 232 | _overflow = true; |
ysr@777 | 233 | return; |
ysr@777 | 234 | } |
ysr@777 | 235 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 236 | if (res == index) { |
ysr@777 | 237 | for (int i = 0; i < n; i++) { |
ysr@777 | 238 | int ind = index + i; |
ysr@777 | 239 | assert(ind < _capacity, "By overflow test above."); |
ysr@777 | 240 | _base[ind] = ptr_arr[i]; |
ysr@777 | 241 | } |
ysr@777 | 242 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); |
ysr@777 | 243 | return; |
ysr@777 | 244 | } |
ysr@777 | 245 | // Otherwise, we need to try again. |
ysr@777 | 246 | } |
ysr@777 | 247 | } |
ysr@777 | 248 | |
ysr@777 | 249 | |
ysr@777 | 250 | void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { |
ysr@777 | 251 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 252 | jint start = _index; |
ysr@777 | 253 | jint next_index = start + n; |
ysr@777 | 254 | if (next_index > _capacity) { |
ysr@777 | 255 | _overflow = true; |
ysr@777 | 256 | return; |
ysr@777 | 257 | } |
ysr@777 | 258 | // Otherwise. |
ysr@777 | 259 | _index = next_index; |
ysr@777 | 260 | for (int i = 0; i < n; i++) { |
ysr@777 | 261 | int ind = start + i; |
tonyp@1458 | 262 | assert(ind < _capacity, "By overflow test above."); |
ysr@777 | 263 | _base[ind] = ptr_arr[i]; |
ysr@777 | 264 | } |
ysr@777 | 265 | } |
ysr@777 | 266 | |
ysr@777 | 267 | |
ysr@777 | 268 | bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { |
ysr@777 | 269 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 270 | jint index = _index; |
ysr@777 | 271 | if (index == 0) { |
ysr@777 | 272 | *n = 0; |
ysr@777 | 273 | return false; |
ysr@777 | 274 | } else { |
ysr@777 | 275 | int k = MIN2(max, index); |
ysr@777 | 276 | jint new_ind = index - k; |
ysr@777 | 277 | for (int j = 0; j < k; j++) { |
ysr@777 | 278 | ptr_arr[j] = _base[new_ind + j]; |
ysr@777 | 279 | } |
ysr@777 | 280 | _index = new_ind; |
ysr@777 | 281 | *n = k; |
ysr@777 | 282 | return true; |
ysr@777 | 283 | } |
ysr@777 | 284 | } |
ysr@777 | 285 | |
ysr@777 | 286 | |
ysr@777 | 287 | CMRegionStack::CMRegionStack() : _base(NULL) {} |
ysr@777 | 288 | |
ysr@777 | 289 | void CMRegionStack::allocate(size_t size) { |
ysr@777 | 290 | _base = NEW_C_HEAP_ARRAY(MemRegion, size); |
tonyp@2973 | 291 | if (_base == NULL) { |
tonyp@2973 | 292 | vm_exit_during_initialization("Failed to allocate CM region mark stack"); |
tonyp@2973 | 293 | } |
ysr@777 | 294 | _index = 0; |
ysr@777 | 295 | _capacity = (jint) size; |
ysr@777 | 296 | } |
ysr@777 | 297 | |
ysr@777 | 298 | CMRegionStack::~CMRegionStack() { |
tonyp@2973 | 299 | if (_base != NULL) { |
tonyp@2973 | 300 | FREE_C_HEAP_ARRAY(oop, _base); |
tonyp@2973 | 301 | } |
ysr@777 | 302 | } |
ysr@777 | 303 | |
johnc@2190 | 304 | void CMRegionStack::push_lock_free(MemRegion mr) { |
ysr@777 | 305 | assert(mr.word_size() > 0, "Precondition"); |
ysr@777 | 306 | while (true) { |
johnc@2190 | 307 | jint index = _index; |
johnc@2190 | 308 | |
johnc@2190 | 309 | if (index >= _capacity) { |
ysr@777 | 310 | _overflow = true; |
ysr@777 | 311 | return; |
ysr@777 | 312 | } |
ysr@777 | 313 | // Otherwise... |
ysr@777 | 314 | jint next_index = index+1; |
ysr@777 | 315 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 316 | if (res == index) { |
ysr@777 | 317 | _base[index] = mr; |
ysr@777 | 318 | return; |
ysr@777 | 319 | } |
ysr@777 | 320 | // Otherwise, we need to try again. |
ysr@777 | 321 | } |
ysr@777 | 322 | } |
ysr@777 | 323 | |
johnc@2190 | 324 | // Lock-free pop of the region stack. Called during the concurrent |
johnc@2190 | 325 | // marking / remark phases. Should only be called in tandem with |
johnc@2190 | 326 | // other lock-free pops. |
johnc@2190 | 327 | MemRegion CMRegionStack::pop_lock_free() { |
ysr@777 | 328 | while (true) { |
ysr@777 | 329 | jint index = _index; |
ysr@777 | 330 | |
ysr@777 | 331 | if (index == 0) { |
ysr@777 | 332 | return MemRegion(); |
ysr@777 | 333 | } |
johnc@2190 | 334 | // Otherwise... |
ysr@777 | 335 | jint next_index = index-1; |
ysr@777 | 336 | jint res = Atomic::cmpxchg(next_index, &_index, index); |
ysr@777 | 337 | if (res == index) { |
ysr@777 | 338 | MemRegion mr = _base[next_index]; |
ysr@777 | 339 | if (mr.start() != NULL) { |
tonyp@1458 | 340 | assert(mr.end() != NULL, "invariant"); |
tonyp@1458 | 341 | assert(mr.word_size() > 0, "invariant"); |
ysr@777 | 342 | return mr; |
ysr@777 | 343 | } else { |
ysr@777 | 344 | // that entry was invalidated... let's skip it |
tonyp@1458 | 345 | assert(mr.end() == NULL, "invariant"); |
ysr@777 | 346 | } |
ysr@777 | 347 | } |
ysr@777 | 348 | // Otherwise, we need to try again. |
ysr@777 | 349 | } |
ysr@777 | 350 | } |
johnc@2190 | 351 | |
johnc@2190 | 352 | #if 0 |
johnc@2190 | 353 | // The routines that manipulate the region stack with a lock are |
johnc@2190 | 354 | // not currently used. They should be retained, however, as a |
johnc@2190 | 355 | // diagnostic aid. |
tonyp@1793 | 356 | |
tonyp@1793 | 357 | void CMRegionStack::push_with_lock(MemRegion mr) { |
tonyp@1793 | 358 | assert(mr.word_size() > 0, "Precondition"); |
tonyp@1793 | 359 | MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); |
tonyp@1793 | 360 | |
tonyp@1793 | 361 | if (isFull()) { |
tonyp@1793 | 362 | _overflow = true; |
tonyp@1793 | 363 | return; |
tonyp@1793 | 364 | } |
tonyp@1793 | 365 | |
tonyp@1793 | 366 | _base[_index] = mr; |
tonyp@1793 | 367 | _index += 1; |
tonyp@1793 | 368 | } |
tonyp@1793 | 369 | |
tonyp@1793 | 370 | MemRegion CMRegionStack::pop_with_lock() { |
tonyp@1793 | 371 | MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); |
tonyp@1793 | 372 | |
tonyp@1793 | 373 | while (true) { |
tonyp@1793 | 374 | if (_index == 0) { |
tonyp@1793 | 375 | return MemRegion(); |
tonyp@1793 | 376 | } |
tonyp@1793 | 377 | _index -= 1; |
tonyp@1793 | 378 | |
tonyp@1793 | 379 | MemRegion mr = _base[_index]; |
tonyp@1793 | 380 | if (mr.start() != NULL) { |
tonyp@1793 | 381 | assert(mr.end() != NULL, "invariant"); |
tonyp@1793 | 382 | assert(mr.word_size() > 0, "invariant"); |
tonyp@1793 | 383 | return mr; |
tonyp@1793 | 384 | } else { |
tonyp@1793 | 385 | // that entry was invalidated... let's skip it |
tonyp@1793 | 386 | assert(mr.end() == NULL, "invariant"); |
tonyp@1793 | 387 | } |
tonyp@1793 | 388 | } |
tonyp@1793 | 389 | } |
johnc@2190 | 390 | #endif |
ysr@777 | 391 | |
ysr@777 | 392 | bool CMRegionStack::invalidate_entries_into_cset() { |
ysr@777 | 393 | bool result = false; |
ysr@777 | 394 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 395 | for (int i = 0; i < _oops_do_bound; ++i) { |
ysr@777 | 396 | MemRegion mr = _base[i]; |
ysr@777 | 397 | if (mr.start() != NULL) { |
tonyp@1458 | 398 | assert(mr.end() != NULL, "invariant"); |
tonyp@1458 | 399 | assert(mr.word_size() > 0, "invariant"); |
ysr@777 | 400 | HeapRegion* hr = g1h->heap_region_containing(mr.start()); |
tonyp@1458 | 401 | assert(hr != NULL, "invariant"); |
ysr@777 | 402 | if (hr->in_collection_set()) { |
ysr@777 | 403 | // The region points into the collection set |
ysr@777 | 404 | _base[i] = MemRegion(); |
ysr@777 | 405 | result = true; |
ysr@777 | 406 | } |
ysr@777 | 407 | } else { |
ysr@777 | 408 | // that entry was invalidated... let's skip it |
tonyp@1458 | 409 | assert(mr.end() == NULL, "invariant"); |
ysr@777 | 410 | } |
ysr@777 | 411 | } |
ysr@777 | 412 | return result; |
ysr@777 | 413 | } |
ysr@777 | 414 | |
ysr@777 | 415 | template<class OopClosureClass> |
ysr@777 | 416 | bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { |
ysr@777 | 417 | assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after |
ysr@777 | 418 | || SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 419 | "Drain recursion must be yield-safe."); |
ysr@777 | 420 | bool res = true; |
ysr@777 | 421 | debug_only(_drain_in_progress = true); |
ysr@777 | 422 | debug_only(_drain_in_progress_yields = yield_after); |
ysr@777 | 423 | while (!isEmpty()) { |
ysr@777 | 424 | oop newOop = pop(); |
ysr@777 | 425 | assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); |
ysr@777 | 426 | assert(newOop->is_oop(), "Expected an oop"); |
ysr@777 | 427 | assert(bm == NULL || bm->isMarked((HeapWord*)newOop), |
ysr@777 | 428 | "only grey objects on this stack"); |
ysr@777 | 429 | // iterate over the oops in this oop, marking and pushing |
ysr@777 | 430 | // the ones in CMS generation. |
ysr@777 | 431 | newOop->oop_iterate(cl); |
ysr@777 | 432 | if (yield_after && _cm->do_yield_check()) { |
tonyp@2973 | 433 | res = false; |
tonyp@2973 | 434 | break; |
ysr@777 | 435 | } |
ysr@777 | 436 | } |
ysr@777 | 437 | debug_only(_drain_in_progress = false); |
ysr@777 | 438 | return res; |
ysr@777 | 439 | } |
ysr@777 | 440 | |
ysr@777 | 441 | void CMMarkStack::oops_do(OopClosure* f) { |
ysr@777 | 442 | if (_index == 0) return; |
ysr@777 | 443 | assert(_oops_do_bound != -1 && _oops_do_bound <= _index, |
ysr@777 | 444 | "Bound must be set."); |
ysr@777 | 445 | for (int i = 0; i < _oops_do_bound; i++) { |
ysr@777 | 446 | f->do_oop(&_base[i]); |
ysr@777 | 447 | } |
ysr@777 | 448 | _oops_do_bound = -1; |
ysr@777 | 449 | } |
ysr@777 | 450 | |
ysr@777 | 451 | bool ConcurrentMark::not_yet_marked(oop obj) const { |
ysr@777 | 452 | return (_g1h->is_obj_ill(obj) |
ysr@777 | 453 | || (_g1h->is_in_permanent(obj) |
ysr@777 | 454 | && !nextMarkBitMap()->isMarked((HeapWord*)obj))); |
ysr@777 | 455 | } |
ysr@777 | 456 | |
ysr@777 | 457 | #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away |
ysr@777 | 458 | #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
ysr@777 | 459 | #endif // _MSC_VER |
ysr@777 | 460 | |
ysr@777 | 461 | ConcurrentMark::ConcurrentMark(ReservedSpace rs, |
ysr@777 | 462 | int max_regions) : |
ysr@777 | 463 | _markBitMap1(rs, MinObjAlignment - 1), |
ysr@777 | 464 | _markBitMap2(rs, MinObjAlignment - 1), |
ysr@777 | 465 | |
ysr@777 | 466 | _parallel_marking_threads(0), |
ysr@777 | 467 | _sleep_factor(0.0), |
ysr@777 | 468 | _marking_task_overhead(1.0), |
ysr@777 | 469 | _cleanup_sleep_factor(0.0), |
ysr@777 | 470 | _cleanup_task_overhead(1.0), |
tonyp@2472 | 471 | _cleanup_list("Cleanup List"), |
ysr@777 | 472 | _region_bm(max_regions, false /* in_resource_area*/), |
ysr@777 | 473 | _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> |
ysr@777 | 474 | CardTableModRefBS::card_shift, |
ysr@777 | 475 | false /* in_resource_area*/), |
ysr@777 | 476 | _prevMarkBitMap(&_markBitMap1), |
ysr@777 | 477 | _nextMarkBitMap(&_markBitMap2), |
ysr@777 | 478 | _at_least_one_mark_complete(false), |
ysr@777 | 479 | |
ysr@777 | 480 | _markStack(this), |
ysr@777 | 481 | _regionStack(), |
ysr@777 | 482 | // _finger set in set_non_marking_state |
ysr@777 | 483 | |
ysr@777 | 484 | _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), |
ysr@777 | 485 | // _active_tasks set in set_non_marking_state |
ysr@777 | 486 | // _tasks set inside the constructor |
ysr@777 | 487 | _task_queues(new CMTaskQueueSet((int) _max_task_num)), |
ysr@777 | 488 | _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), |
ysr@777 | 489 | |
ysr@777 | 490 | _has_overflown(false), |
ysr@777 | 491 | _concurrent(false), |
tonyp@1054 | 492 | _has_aborted(false), |
tonyp@1054 | 493 | _restart_for_overflow(false), |
tonyp@1054 | 494 | _concurrent_marking_in_progress(false), |
tonyp@1054 | 495 | _should_gray_objects(false), |
ysr@777 | 496 | |
ysr@777 | 497 | // _verbose_level set below |
ysr@777 | 498 | |
ysr@777 | 499 | _init_times(), |
ysr@777 | 500 | _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), |
ysr@777 | 501 | _cleanup_times(), |
ysr@777 | 502 | _total_counting_time(0.0), |
ysr@777 | 503 | _total_rs_scrub_time(0.0), |
ysr@777 | 504 | |
tonyp@2973 | 505 | _parallel_workers(NULL) { |
tonyp@2973 | 506 | CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; |
tonyp@2973 | 507 | if (verbose_level < no_verbose) { |
ysr@777 | 508 | verbose_level = no_verbose; |
tonyp@2973 | 509 | } |
tonyp@2973 | 510 | if (verbose_level > high_verbose) { |
ysr@777 | 511 | verbose_level = high_verbose; |
tonyp@2973 | 512 | } |
ysr@777 | 513 | _verbose_level = verbose_level; |
ysr@777 | 514 | |
tonyp@2973 | 515 | if (verbose_low()) { |
ysr@777 | 516 | gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " |
ysr@777 | 517 | "heap end = "PTR_FORMAT, _heap_start, _heap_end); |
tonyp@2973 | 518 | } |
ysr@777 | 519 | |
jmasa@1719 | 520 | _markStack.allocate(MarkStackSize); |
johnc@1186 | 521 | _regionStack.allocate(G1MarkRegionStackSize); |
ysr@777 | 522 | |
ysr@777 | 523 | // Create & start a ConcurrentMark thread. |
ysr@1280 | 524 | _cmThread = new ConcurrentMarkThread(this); |
ysr@1280 | 525 | assert(cmThread() != NULL, "CM Thread should have been created"); |
ysr@1280 | 526 | assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); |
ysr@1280 | 527 | |
ysr@777 | 528 | _g1h = G1CollectedHeap::heap(); |
ysr@777 | 529 | assert(CGC_lock != NULL, "Where's the CGC_lock?"); |
ysr@777 | 530 | assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); |
ysr@777 | 531 | assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); |
ysr@777 | 532 | |
ysr@777 | 533 | SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); |
tonyp@1717 | 534 | satb_qs.set_buffer_size(G1SATBBufferSize); |
ysr@777 | 535 | |
ysr@777 | 536 | _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); |
ysr@777 | 537 | _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); |
ysr@777 | 538 | |
ysr@777 | 539 | // so that the assertion in MarkingTaskQueue::task_queue doesn't fail |
ysr@777 | 540 | _active_tasks = _max_task_num; |
ysr@777 | 541 | for (int i = 0; i < (int) _max_task_num; ++i) { |
ysr@777 | 542 | CMTaskQueue* task_queue = new CMTaskQueue(); |
ysr@777 | 543 | task_queue->initialize(); |
ysr@777 | 544 | _task_queues->register_queue(i, task_queue); |
ysr@777 | 545 | |
ysr@777 | 546 | _tasks[i] = new CMTask(i, this, task_queue, _task_queues); |
ysr@777 | 547 | _accum_task_vtime[i] = 0.0; |
ysr@777 | 548 | } |
ysr@777 | 549 | |
jmasa@1719 | 550 | if (ConcGCThreads > ParallelGCThreads) { |
jmasa@1719 | 551 | vm_exit_during_initialization("Can't have more ConcGCThreads " |
ysr@777 | 552 | "than ParallelGCThreads."); |
ysr@777 | 553 | } |
ysr@777 | 554 | if (ParallelGCThreads == 0) { |
ysr@777 | 555 | // if we are not running with any parallel GC threads we will not |
ysr@777 | 556 | // spawn any marking threads either |
ysr@777 | 557 | _parallel_marking_threads = 0; |
ysr@777 | 558 | _sleep_factor = 0.0; |
ysr@777 | 559 | _marking_task_overhead = 1.0; |
ysr@777 | 560 | } else { |
jmasa@1719 | 561 | if (ConcGCThreads > 0) { |
jmasa@1719 | 562 | // notice that ConcGCThreads overwrites G1MarkingOverheadPercent |
ysr@777 | 563 | // if both are set |
ysr@777 | 564 | |
jmasa@1719 | 565 | _parallel_marking_threads = ConcGCThreads; |
ysr@777 | 566 | _sleep_factor = 0.0; |
ysr@777 | 567 | _marking_task_overhead = 1.0; |
johnc@1186 | 568 | } else if (G1MarkingOverheadPercent > 0) { |
ysr@777 | 569 | // we will calculate the number of parallel marking threads |
ysr@777 | 570 | // based on a target overhead with respect to the soft real-time |
ysr@777 | 571 | // goal |
ysr@777 | 572 | |
johnc@1186 | 573 | double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; |
ysr@777 | 574 | double overall_cm_overhead = |
johnc@1186 | 575 | (double) MaxGCPauseMillis * marking_overhead / |
johnc@1186 | 576 | (double) GCPauseIntervalMillis; |
ysr@777 | 577 | double cpu_ratio = 1.0 / (double) os::processor_count(); |
ysr@777 | 578 | double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); |
ysr@777 | 579 | double marking_task_overhead = |
ysr@777 | 580 | overall_cm_overhead / marking_thread_num * |
ysr@777 | 581 | (double) os::processor_count(); |
ysr@777 | 582 | double sleep_factor = |
ysr@777 | 583 | (1.0 - marking_task_overhead) / marking_task_overhead; |
ysr@777 | 584 | |
ysr@777 | 585 | _parallel_marking_threads = (size_t) marking_thread_num; |
ysr@777 | 586 | _sleep_factor = sleep_factor; |
ysr@777 | 587 | _marking_task_overhead = marking_task_overhead; |
ysr@777 | 588 | } else { |
ysr@777 | 589 | _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1); |
ysr@777 | 590 | _sleep_factor = 0.0; |
ysr@777 | 591 | _marking_task_overhead = 1.0; |
ysr@777 | 592 | } |
ysr@777 | 593 | |
tonyp@2973 | 594 | if (parallel_marking_threads() > 1) { |
ysr@777 | 595 | _cleanup_task_overhead = 1.0; |
tonyp@2973 | 596 | } else { |
ysr@777 | 597 | _cleanup_task_overhead = marking_task_overhead(); |
tonyp@2973 | 598 | } |
ysr@777 | 599 | _cleanup_sleep_factor = |
ysr@777 | 600 | (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); |
ysr@777 | 601 | |
ysr@777 | 602 | #if 0 |
ysr@777 | 603 | gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); |
ysr@777 | 604 | gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); |
ysr@777 | 605 | gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); |
ysr@777 | 606 | gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); |
ysr@777 | 607 | gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); |
ysr@777 | 608 | #endif |
ysr@777 | 609 | |
tonyp@1458 | 610 | guarantee(parallel_marking_threads() > 0, "peace of mind"); |
jmasa@2188 | 611 | _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", |
jmasa@2188 | 612 | (int) _parallel_marking_threads, false, true); |
jmasa@2188 | 613 | if (_parallel_workers == NULL) { |
ysr@777 | 614 | vm_exit_during_initialization("Failed necessary allocation."); |
jmasa@2188 | 615 | } else { |
jmasa@2188 | 616 | _parallel_workers->initialize_workers(); |
jmasa@2188 | 617 | } |
ysr@777 | 618 | } |
ysr@777 | 619 | |
ysr@777 | 620 | // so that the call below can read a sensible value |
ysr@777 | 621 | _heap_start = (HeapWord*) rs.base(); |
ysr@777 | 622 | set_non_marking_state(); |
ysr@777 | 623 | } |
ysr@777 | 624 | |
ysr@777 | 625 | void ConcurrentMark::update_g1_committed(bool force) { |
ysr@777 | 626 | // If concurrent marking is not in progress, then we do not need to |
ysr@777 | 627 | // update _heap_end. This has a subtle and important |
ysr@777 | 628 | // side-effect. Imagine that two evacuation pauses happen between |
ysr@777 | 629 | // marking completion and remark. The first one can grow the |
ysr@777 | 630 | // heap (hence now the finger is below the heap end). Then, the |
ysr@777 | 631 | // second one could unnecessarily push regions on the region |
ysr@777 | 632 | // stack. This causes the invariant that the region stack is empty |
ysr@777 | 633 | // at the beginning of remark to be false. By ensuring that we do |
ysr@777 | 634 | // not observe heap expansions after marking is complete, then we do |
ysr@777 | 635 | // not have this problem. |
tonyp@2973 | 636 | if (!concurrent_marking_in_progress() && !force) return; |
ysr@777 | 637 | |
ysr@777 | 638 | MemRegion committed = _g1h->g1_committed(); |
tonyp@1458 | 639 | assert(committed.start() == _heap_start, "start shouldn't change"); |
ysr@777 | 640 | HeapWord* new_end = committed.end(); |
ysr@777 | 641 | if (new_end > _heap_end) { |
ysr@777 | 642 | // The heap has been expanded. |
ysr@777 | 643 | |
ysr@777 | 644 | _heap_end = new_end; |
ysr@777 | 645 | } |
ysr@777 | 646 | // Notice that the heap can also shrink. However, this only happens |
ysr@777 | 647 | // during a Full GC (at least currently) and the entire marking |
ysr@777 | 648 | // phase will bail out and the task will not be restarted. So, let's |
ysr@777 | 649 | // do nothing. |
ysr@777 | 650 | } |
ysr@777 | 651 | |
ysr@777 | 652 | void ConcurrentMark::reset() { |
ysr@777 | 653 | // Starting values for these two. This should be called in a STW |
ysr@777 | 654 | // phase. CM will be notified of any future g1_committed expansions |
ysr@777 | 655 | // will be at the end of evacuation pauses, when tasks are |
ysr@777 | 656 | // inactive. |
ysr@777 | 657 | MemRegion committed = _g1h->g1_committed(); |
ysr@777 | 658 | _heap_start = committed.start(); |
ysr@777 | 659 | _heap_end = committed.end(); |
ysr@777 | 660 | |
tonyp@1458 | 661 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 662 | assert(_heap_start != NULL, "heap bounds should look ok"); |
tonyp@1458 | 663 | assert(_heap_end != NULL, "heap bounds should look ok"); |
tonyp@1458 | 664 | assert(_heap_start < _heap_end, "heap bounds should look ok"); |
ysr@777 | 665 | |
ysr@777 | 666 | // reset all the marking data structures and any necessary flags |
ysr@777 | 667 | clear_marking_state(); |
ysr@777 | 668 | |
tonyp@2973 | 669 | if (verbose_low()) { |
ysr@777 | 670 | gclog_or_tty->print_cr("[global] resetting"); |
tonyp@2973 | 671 | } |
ysr@777 | 672 | |
ysr@777 | 673 | // We do reset all of them, since different phases will use |
ysr@777 | 674 | // different number of active threads. So, it's easiest to have all |
ysr@777 | 675 | // of them ready. |
johnc@2190 | 676 | for (int i = 0; i < (int) _max_task_num; ++i) { |
ysr@777 | 677 | _tasks[i]->reset(_nextMarkBitMap); |
johnc@2190 | 678 | } |
ysr@777 | 679 | |
ysr@777 | 680 | // we need this to make sure that the flag is on during the evac |
ysr@777 | 681 | // pause with initial mark piggy-backed |
ysr@777 | 682 | set_concurrent_marking_in_progress(); |
ysr@777 | 683 | } |
ysr@777 | 684 | |
ysr@777 | 685 | void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { |
tonyp@1458 | 686 | assert(active_tasks <= _max_task_num, "we should not have more"); |
ysr@777 | 687 | |
ysr@777 | 688 | _active_tasks = active_tasks; |
ysr@777 | 689 | // Need to update the three data structures below according to the |
ysr@777 | 690 | // number of active threads for this phase. |
ysr@777 | 691 | _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); |
ysr@777 | 692 | _first_overflow_barrier_sync.set_n_workers((int) active_tasks); |
ysr@777 | 693 | _second_overflow_barrier_sync.set_n_workers((int) active_tasks); |
ysr@777 | 694 | |
ysr@777 | 695 | _concurrent = concurrent; |
ysr@777 | 696 | // We propagate this to all tasks, not just the active ones. |
ysr@777 | 697 | for (int i = 0; i < (int) _max_task_num; ++i) |
ysr@777 | 698 | _tasks[i]->set_concurrent(concurrent); |
ysr@777 | 699 | |
ysr@777 | 700 | if (concurrent) { |
ysr@777 | 701 | set_concurrent_marking_in_progress(); |
ysr@777 | 702 | } else { |
ysr@777 | 703 | // We currently assume that the concurrent flag has been set to |
ysr@777 | 704 | // false before we start remark. At this point we should also be |
ysr@777 | 705 | // in a STW phase. |
tonyp@1458 | 706 | assert(!concurrent_marking_in_progress(), "invariant"); |
tonyp@1458 | 707 | assert(_finger == _heap_end, "only way to get here"); |
ysr@777 | 708 | update_g1_committed(true); |
ysr@777 | 709 | } |
ysr@777 | 710 | } |
ysr@777 | 711 | |
ysr@777 | 712 | void ConcurrentMark::set_non_marking_state() { |
ysr@777 | 713 | // We set the global marking state to some default values when we're |
ysr@777 | 714 | // not doing marking. |
ysr@777 | 715 | clear_marking_state(); |
ysr@777 | 716 | _active_tasks = 0; |
ysr@777 | 717 | clear_concurrent_marking_in_progress(); |
ysr@777 | 718 | } |
ysr@777 | 719 | |
ysr@777 | 720 | ConcurrentMark::~ConcurrentMark() { |
ysr@777 | 721 | for (int i = 0; i < (int) _max_task_num; ++i) { |
ysr@777 | 722 | delete _task_queues->queue(i); |
ysr@777 | 723 | delete _tasks[i]; |
ysr@777 | 724 | } |
ysr@777 | 725 | delete _task_queues; |
ysr@777 | 726 | FREE_C_HEAP_ARRAY(CMTask*, _max_task_num); |
ysr@777 | 727 | } |
ysr@777 | 728 | |
ysr@777 | 729 | // This closure is used to mark refs into the g1 generation |
ysr@777 | 730 | // from external roots in the CMS bit map. |
ysr@777 | 731 | // Called at the first checkpoint. |
ysr@777 | 732 | // |
ysr@777 | 733 | |
ysr@777 | 734 | void ConcurrentMark::clearNextBitmap() { |
tonyp@1794 | 735 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@1794 | 736 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
tonyp@1794 | 737 | |
tonyp@1794 | 738 | // Make sure that the concurrent mark thread looks to still be in |
tonyp@1794 | 739 | // the current cycle. |
tonyp@1794 | 740 | guarantee(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 741 | |
tonyp@1794 | 742 | // We are finishing up the current cycle by clearing the next |
tonyp@1794 | 743 | // marking bitmap and getting it ready for the next cycle. During |
tonyp@1794 | 744 | // this time no other cycle can start. So, let's make sure that this |
tonyp@1794 | 745 | // is the case. |
tonyp@1794 | 746 | guarantee(!g1h->mark_in_progress(), "invariant"); |
tonyp@1794 | 747 | |
tonyp@1794 | 748 | // clear the mark bitmap (no grey objects to start with). |
tonyp@1794 | 749 | // We need to do this in chunks and offer to yield in between |
tonyp@1794 | 750 | // each chunk. |
tonyp@1794 | 751 | HeapWord* start = _nextMarkBitMap->startWord(); |
tonyp@1794 | 752 | HeapWord* end = _nextMarkBitMap->endWord(); |
tonyp@1794 | 753 | HeapWord* cur = start; |
tonyp@1794 | 754 | size_t chunkSize = M; |
tonyp@1794 | 755 | while (cur < end) { |
tonyp@1794 | 756 | HeapWord* next = cur + chunkSize; |
tonyp@2973 | 757 | if (next > end) { |
tonyp@1794 | 758 | next = end; |
tonyp@2973 | 759 | } |
tonyp@1794 | 760 | MemRegion mr(cur,next); |
tonyp@1794 | 761 | _nextMarkBitMap->clearRange(mr); |
tonyp@1794 | 762 | cur = next; |
tonyp@1794 | 763 | do_yield_check(); |
tonyp@1794 | 764 | |
tonyp@1794 | 765 | // Repeat the asserts from above. We'll do them as asserts here to |
tonyp@1794 | 766 | // minimize their overhead on the product. However, we'll have |
tonyp@1794 | 767 | // them as guarantees at the beginning / end of the bitmap |
tonyp@1794 | 768 | // clearing to get some checking in the product. |
tonyp@1794 | 769 | assert(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 770 | assert(!g1h->mark_in_progress(), "invariant"); |
tonyp@1794 | 771 | } |
tonyp@1794 | 772 | |
tonyp@1794 | 773 | // Repeat the asserts from above. |
tonyp@1794 | 774 | guarantee(cmThread()->during_cycle(), "invariant"); |
tonyp@1794 | 775 | guarantee(!g1h->mark_in_progress(), "invariant"); |
ysr@777 | 776 | } |
ysr@777 | 777 | |
ysr@777 | 778 | class NoteStartOfMarkHRClosure: public HeapRegionClosure { |
ysr@777 | 779 | public: |
ysr@777 | 780 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 781 | if (!r->continuesHumongous()) { |
ysr@777 | 782 | r->note_start_of_marking(true); |
ysr@777 | 783 | } |
ysr@777 | 784 | return false; |
ysr@777 | 785 | } |
ysr@777 | 786 | }; |
ysr@777 | 787 | |
ysr@777 | 788 | void ConcurrentMark::checkpointRootsInitialPre() { |
ysr@777 | 789 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 790 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
ysr@777 | 791 | |
ysr@777 | 792 | _has_aborted = false; |
ysr@777 | 793 | |
jcoomes@1902 | 794 | #ifndef PRODUCT |
tonyp@1479 | 795 | if (G1PrintReachableAtInitialMark) { |
tonyp@1823 | 796 | print_reachable("at-cycle-start", |
johnc@2969 | 797 | VerifyOption_G1UsePrevMarking, true /* all */); |
tonyp@1479 | 798 | } |
jcoomes@1902 | 799 | #endif |
ysr@777 | 800 | |
ysr@777 | 801 | // Initialise marking structures. This has to be done in a STW phase. |
ysr@777 | 802 | reset(); |
ysr@777 | 803 | } |
ysr@777 | 804 | |
ysr@777 | 805 | |
ysr@777 | 806 | void ConcurrentMark::checkpointRootsInitialPost() { |
ysr@777 | 807 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 808 | |
tonyp@2848 | 809 | // If we force an overflow during remark, the remark operation will |
tonyp@2848 | 810 | // actually abort and we'll restart concurrent marking. If we always |
tonyp@2848 | 811 | // force an oveflow during remark we'll never actually complete the |
tonyp@2848 | 812 | // marking phase. So, we initilize this here, at the start of the |
tonyp@2848 | 813 | // cycle, so that at the remaining overflow number will decrease at |
tonyp@2848 | 814 | // every remark and we'll eventually not need to cause one. |
tonyp@2848 | 815 | force_overflow_stw()->init(); |
tonyp@2848 | 816 | |
ysr@777 | 817 | // For each region note start of marking. |
ysr@777 | 818 | NoteStartOfMarkHRClosure startcl; |
ysr@777 | 819 | g1h->heap_region_iterate(&startcl); |
ysr@777 | 820 | |
ysr@777 | 821 | // Start weak-reference discovery. |
ysr@777 | 822 | ReferenceProcessor* rp = g1h->ref_processor(); |
ysr@777 | 823 | rp->verify_no_references_recorded(); |
ysr@777 | 824 | rp->enable_discovery(); // enable ("weak") refs discovery |
ysr@892 | 825 | rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle |
ysr@777 | 826 | |
ysr@777 | 827 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
tonyp@1752 | 828 | // This is the start of the marking cycle, we're expected all |
tonyp@1752 | 829 | // threads to have SATB queues with active set to false. |
tonyp@1752 | 830 | satb_mq_set.set_active_all_threads(true, /* new active value */ |
tonyp@1752 | 831 | false /* expected_active */); |
ysr@777 | 832 | |
ysr@777 | 833 | // update_g1_committed() will be called at the end of an evac pause |
ysr@777 | 834 | // when marking is on. So, it's also called at the end of the |
ysr@777 | 835 | // initial-mark pause to update the heap end, if the heap expands |
ysr@777 | 836 | // during it. No need to call it here. |
ysr@777 | 837 | } |
ysr@777 | 838 | |
ysr@777 | 839 | /* |
tonyp@2848 | 840 | * Notice that in the next two methods, we actually leave the STS |
tonyp@2848 | 841 | * during the barrier sync and join it immediately afterwards. If we |
tonyp@2848 | 842 | * do not do this, the following deadlock can occur: one thread could |
tonyp@2848 | 843 | * be in the barrier sync code, waiting for the other thread to also |
tonyp@2848 | 844 | * sync up, whereas another one could be trying to yield, while also |
tonyp@2848 | 845 | * waiting for the other threads to sync up too. |
tonyp@2848 | 846 | * |
tonyp@2848 | 847 | * Note, however, that this code is also used during remark and in |
tonyp@2848 | 848 | * this case we should not attempt to leave / enter the STS, otherwise |
tonyp@2848 | 849 | * we'll either hit an asseert (debug / fastdebug) or deadlock |
tonyp@2848 | 850 | * (product). So we should only leave / enter the STS if we are |
tonyp@2848 | 851 | * operating concurrently. |
tonyp@2848 | 852 | * |
tonyp@2848 | 853 | * Because the thread that does the sync barrier has left the STS, it |
tonyp@2848 | 854 | * is possible to be suspended for a Full GC or an evacuation pause |
tonyp@2848 | 855 | * could occur. This is actually safe, since the entering the sync |
tonyp@2848 | 856 | * barrier is one of the last things do_marking_step() does, and it |
tonyp@2848 | 857 | * doesn't manipulate any data structures afterwards. |
tonyp@2848 | 858 | */ |
ysr@777 | 859 | |
ysr@777 | 860 | void ConcurrentMark::enter_first_sync_barrier(int task_num) { |
tonyp@2973 | 861 | if (verbose_low()) { |
ysr@777 | 862 | gclog_or_tty->print_cr("[%d] entering first barrier", task_num); |
tonyp@2973 | 863 | } |
ysr@777 | 864 | |
tonyp@2848 | 865 | if (concurrent()) { |
tonyp@2848 | 866 | ConcurrentGCThread::stsLeave(); |
tonyp@2848 | 867 | } |
ysr@777 | 868 | _first_overflow_barrier_sync.enter(); |
tonyp@2848 | 869 | if (concurrent()) { |
tonyp@2848 | 870 | ConcurrentGCThread::stsJoin(); |
tonyp@2848 | 871 | } |
ysr@777 | 872 | // at this point everyone should have synced up and not be doing any |
ysr@777 | 873 | // more work |
ysr@777 | 874 | |
tonyp@2973 | 875 | if (verbose_low()) { |
ysr@777 | 876 | gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); |
tonyp@2973 | 877 | } |
ysr@777 | 878 | |
ysr@777 | 879 | // let task 0 do this |
ysr@777 | 880 | if (task_num == 0) { |
ysr@777 | 881 | // task 0 is responsible for clearing the global data structures |
tonyp@2848 | 882 | // We should be here because of an overflow. During STW we should |
tonyp@2848 | 883 | // not clear the overflow flag since we rely on it being true when |
tonyp@2848 | 884 | // we exit this method to abort the pause and restart concurent |
tonyp@2848 | 885 | // marking. |
tonyp@2848 | 886 | clear_marking_state(concurrent() /* clear_overflow */); |
tonyp@2848 | 887 | force_overflow()->update(); |
ysr@777 | 888 | |
ysr@777 | 889 | if (PrintGC) { |
ysr@777 | 890 | gclog_or_tty->date_stamp(PrintGCDateStamps); |
ysr@777 | 891 | gclog_or_tty->stamp(PrintGCTimeStamps); |
ysr@777 | 892 | gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); |
ysr@777 | 893 | } |
ysr@777 | 894 | } |
ysr@777 | 895 | |
ysr@777 | 896 | // after this, each task should reset its own data structures then |
ysr@777 | 897 | // then go into the second barrier |
ysr@777 | 898 | } |
ysr@777 | 899 | |
ysr@777 | 900 | void ConcurrentMark::enter_second_sync_barrier(int task_num) { |
tonyp@2973 | 901 | if (verbose_low()) { |
ysr@777 | 902 | gclog_or_tty->print_cr("[%d] entering second barrier", task_num); |
tonyp@2973 | 903 | } |
ysr@777 | 904 | |
tonyp@2848 | 905 | if (concurrent()) { |
tonyp@2848 | 906 | ConcurrentGCThread::stsLeave(); |
tonyp@2848 | 907 | } |
ysr@777 | 908 | _second_overflow_barrier_sync.enter(); |
tonyp@2848 | 909 | if (concurrent()) { |
tonyp@2848 | 910 | ConcurrentGCThread::stsJoin(); |
tonyp@2848 | 911 | } |
ysr@777 | 912 | // at this point everything should be re-initialised and ready to go |
ysr@777 | 913 | |
tonyp@2973 | 914 | if (verbose_low()) { |
ysr@777 | 915 | gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); |
tonyp@2973 | 916 | } |
ysr@777 | 917 | } |
ysr@777 | 918 | |
tonyp@2848 | 919 | #ifndef PRODUCT |
tonyp@2848 | 920 | void ForceOverflowSettings::init() { |
tonyp@2848 | 921 | _num_remaining = G1ConcMarkForceOverflow; |
tonyp@2848 | 922 | _force = false; |
tonyp@2848 | 923 | update(); |
tonyp@2848 | 924 | } |
tonyp@2848 | 925 | |
tonyp@2848 | 926 | void ForceOverflowSettings::update() { |
tonyp@2848 | 927 | if (_num_remaining > 0) { |
tonyp@2848 | 928 | _num_remaining -= 1; |
tonyp@2848 | 929 | _force = true; |
tonyp@2848 | 930 | } else { |
tonyp@2848 | 931 | _force = false; |
tonyp@2848 | 932 | } |
tonyp@2848 | 933 | } |
tonyp@2848 | 934 | |
tonyp@2848 | 935 | bool ForceOverflowSettings::should_force() { |
tonyp@2848 | 936 | if (_force) { |
tonyp@2848 | 937 | _force = false; |
tonyp@2848 | 938 | return true; |
tonyp@2848 | 939 | } else { |
tonyp@2848 | 940 | return false; |
tonyp@2848 | 941 | } |
tonyp@2848 | 942 | } |
tonyp@2848 | 943 | #endif // !PRODUCT |
tonyp@2848 | 944 | |
ysr@777 | 945 | void ConcurrentMark::grayRoot(oop p) { |
ysr@777 | 946 | HeapWord* addr = (HeapWord*) p; |
ysr@777 | 947 | // We can't really check against _heap_start and _heap_end, since it |
ysr@777 | 948 | // is possible during an evacuation pause with piggy-backed |
ysr@777 | 949 | // initial-mark that the committed space is expanded during the |
ysr@777 | 950 | // pause without CM observing this change. So the assertions below |
ysr@777 | 951 | // is a bit conservative; but better than nothing. |
tonyp@1458 | 952 | assert(_g1h->g1_committed().contains(addr), |
tonyp@1458 | 953 | "address should be within the heap bounds"); |
ysr@777 | 954 | |
tonyp@2973 | 955 | if (!_nextMarkBitMap->isMarked(addr)) { |
ysr@777 | 956 | _nextMarkBitMap->parMark(addr); |
tonyp@2973 | 957 | } |
ysr@777 | 958 | } |
ysr@777 | 959 | |
ysr@777 | 960 | void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { |
ysr@777 | 961 | // The objects on the region have already been marked "in bulk" by |
ysr@777 | 962 | // the caller. We only need to decide whether to push the region on |
ysr@777 | 963 | // the region stack or not. |
ysr@777 | 964 | |
tonyp@2973 | 965 | if (!concurrent_marking_in_progress() || !_should_gray_objects) { |
ysr@777 | 966 | // We're done with marking and waiting for remark. We do not need to |
ysr@777 | 967 | // push anything else on the region stack. |
ysr@777 | 968 | return; |
tonyp@2973 | 969 | } |
ysr@777 | 970 | |
ysr@777 | 971 | HeapWord* finger = _finger; |
ysr@777 | 972 | |
tonyp@2973 | 973 | if (verbose_low()) { |
ysr@777 | 974 | gclog_or_tty->print_cr("[global] attempting to push " |
ysr@777 | 975 | "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " |
ysr@777 | 976 | PTR_FORMAT, mr.start(), mr.end(), finger); |
tonyp@2973 | 977 | } |
ysr@777 | 978 | |
ysr@777 | 979 | if (mr.start() < finger) { |
ysr@777 | 980 | // The finger is always heap region aligned and it is not possible |
ysr@777 | 981 | // for mr to span heap regions. |
tonyp@1458 | 982 | assert(mr.end() <= finger, "invariant"); |
tonyp@1458 | 983 | |
tonyp@1458 | 984 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 985 | assert(mr.start() <= mr.end(), |
tonyp@1458 | 986 | "region boundaries should fall within the committed space"); |
tonyp@1458 | 987 | assert(_heap_start <= mr.start(), |
tonyp@1458 | 988 | "region boundaries should fall within the committed space"); |
tonyp@1458 | 989 | assert(mr.end() <= _heap_end, |
tonyp@1458 | 990 | "region boundaries should fall within the committed space"); |
tonyp@2973 | 991 | if (verbose_low()) { |
ysr@777 | 992 | gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " |
ysr@777 | 993 | "below the finger, pushing it", |
ysr@777 | 994 | mr.start(), mr.end()); |
tonyp@2973 | 995 | } |
ysr@777 | 996 | |
johnc@2190 | 997 | if (!region_stack_push_lock_free(mr)) { |
tonyp@2973 | 998 | if (verbose_low()) { |
ysr@777 | 999 | gclog_or_tty->print_cr("[global] region stack has overflown."); |
tonyp@2973 | 1000 | } |
ysr@777 | 1001 | } |
ysr@777 | 1002 | } |
ysr@777 | 1003 | } |
ysr@777 | 1004 | |
ysr@777 | 1005 | void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { |
ysr@777 | 1006 | // The object is not marked by the caller. We need to at least mark |
ysr@777 | 1007 | // it and maybe push in on the stack. |
ysr@777 | 1008 | |
ysr@777 | 1009 | HeapWord* addr = (HeapWord*)p; |
ysr@777 | 1010 | if (!_nextMarkBitMap->isMarked(addr)) { |
ysr@777 | 1011 | // We definitely need to mark it, irrespective whether we bail out |
ysr@777 | 1012 | // because we're done with marking. |
ysr@777 | 1013 | if (_nextMarkBitMap->parMark(addr)) { |
tonyp@2973 | 1014 | if (!concurrent_marking_in_progress() || !_should_gray_objects) { |
ysr@777 | 1015 | // If we're done with concurrent marking and we're waiting for |
ysr@777 | 1016 | // remark, then we're not pushing anything on the stack. |
ysr@777 | 1017 | return; |
tonyp@2973 | 1018 | } |
ysr@777 | 1019 | |
ysr@777 | 1020 | // No OrderAccess:store_load() is needed. It is implicit in the |
ysr@777 | 1021 | // CAS done in parMark(addr) above |
ysr@777 | 1022 | HeapWord* finger = _finger; |
ysr@777 | 1023 | |
ysr@777 | 1024 | if (addr < finger) { |
ysr@777 | 1025 | if (!mark_stack_push(oop(addr))) { |
tonyp@2973 | 1026 | if (verbose_low()) { |
ysr@777 | 1027 | gclog_or_tty->print_cr("[global] global stack overflow " |
ysr@777 | 1028 | "during parMark"); |
tonyp@2973 | 1029 | } |
ysr@777 | 1030 | } |
ysr@777 | 1031 | } |
ysr@777 | 1032 | } |
ysr@777 | 1033 | } |
ysr@777 | 1034 | } |
ysr@777 | 1035 | |
ysr@777 | 1036 | class CMConcurrentMarkingTask: public AbstractGangTask { |
ysr@777 | 1037 | private: |
ysr@777 | 1038 | ConcurrentMark* _cm; |
ysr@777 | 1039 | ConcurrentMarkThread* _cmt; |
ysr@777 | 1040 | |
ysr@777 | 1041 | public: |
ysr@777 | 1042 | void work(int worker_i) { |
tonyp@1458 | 1043 | assert(Thread::current()->is_ConcurrentGC_thread(), |
tonyp@1458 | 1044 | "this should only be done by a conc GC thread"); |
johnc@2316 | 1045 | ResourceMark rm; |
ysr@777 | 1046 | |
ysr@777 | 1047 | double start_vtime = os::elapsedVTime(); |
ysr@777 | 1048 | |
ysr@777 | 1049 | ConcurrentGCThread::stsJoin(); |
ysr@777 | 1050 | |
tonyp@1458 | 1051 | assert((size_t) worker_i < _cm->active_tasks(), "invariant"); |
ysr@777 | 1052 | CMTask* the_task = _cm->task(worker_i); |
ysr@777 | 1053 | the_task->record_start_time(); |
ysr@777 | 1054 | if (!_cm->has_aborted()) { |
ysr@777 | 1055 | do { |
ysr@777 | 1056 | double start_vtime_sec = os::elapsedVTime(); |
ysr@777 | 1057 | double start_time_sec = os::elapsedTime(); |
johnc@2494 | 1058 | double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
johnc@2494 | 1059 | |
johnc@2494 | 1060 | the_task->do_marking_step(mark_step_duration_ms, |
johnc@2494 | 1061 | true /* do_stealing */, |
johnc@2494 | 1062 | true /* do_termination */); |
johnc@2494 | 1063 | |
ysr@777 | 1064 | double end_time_sec = os::elapsedTime(); |
ysr@777 | 1065 | double end_vtime_sec = os::elapsedVTime(); |
ysr@777 | 1066 | double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; |
ysr@777 | 1067 | double elapsed_time_sec = end_time_sec - start_time_sec; |
ysr@777 | 1068 | _cm->clear_has_overflown(); |
ysr@777 | 1069 | |
ysr@777 | 1070 | bool ret = _cm->do_yield_check(worker_i); |
ysr@777 | 1071 | |
ysr@777 | 1072 | jlong sleep_time_ms; |
ysr@777 | 1073 | if (!_cm->has_aborted() && the_task->has_aborted()) { |
ysr@777 | 1074 | sleep_time_ms = |
ysr@777 | 1075 | (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); |
ysr@777 | 1076 | ConcurrentGCThread::stsLeave(); |
ysr@777 | 1077 | os::sleep(Thread::current(), sleep_time_ms, false); |
ysr@777 | 1078 | ConcurrentGCThread::stsJoin(); |
ysr@777 | 1079 | } |
ysr@777 | 1080 | double end_time2_sec = os::elapsedTime(); |
ysr@777 | 1081 | double elapsed_time2_sec = end_time2_sec - start_time_sec; |
ysr@777 | 1082 | |
ysr@777 | 1083 | #if 0 |
ysr@777 | 1084 | gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " |
ysr@777 | 1085 | "overhead %1.4lf", |
ysr@777 | 1086 | elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, |
ysr@777 | 1087 | the_task->conc_overhead(os::elapsedTime()) * 8.0); |
ysr@777 | 1088 | gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", |
ysr@777 | 1089 | elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); |
ysr@777 | 1090 | #endif |
ysr@777 | 1091 | } while (!_cm->has_aborted() && the_task->has_aborted()); |
ysr@777 | 1092 | } |
ysr@777 | 1093 | the_task->record_end_time(); |
tonyp@1458 | 1094 | guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); |
ysr@777 | 1095 | |
ysr@777 | 1096 | ConcurrentGCThread::stsLeave(); |
ysr@777 | 1097 | |
ysr@777 | 1098 | double end_vtime = os::elapsedVTime(); |
ysr@777 | 1099 | _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); |
ysr@777 | 1100 | } |
ysr@777 | 1101 | |
ysr@777 | 1102 | CMConcurrentMarkingTask(ConcurrentMark* cm, |
ysr@777 | 1103 | ConcurrentMarkThread* cmt) : |
ysr@777 | 1104 | AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } |
ysr@777 | 1105 | |
ysr@777 | 1106 | ~CMConcurrentMarkingTask() { } |
ysr@777 | 1107 | }; |
ysr@777 | 1108 | |
ysr@777 | 1109 | void ConcurrentMark::markFromRoots() { |
ysr@777 | 1110 | // we might be tempted to assert that: |
ysr@777 | 1111 | // assert(asynch == !SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1112 | // "inconsistent argument?"); |
ysr@777 | 1113 | // However that wouldn't be right, because it's possible that |
ysr@777 | 1114 | // a safepoint is indeed in progress as a younger generation |
ysr@777 | 1115 | // stop-the-world GC happens even as we mark in this generation. |
ysr@777 | 1116 | |
ysr@777 | 1117 | _restart_for_overflow = false; |
ysr@777 | 1118 | |
johnc@2494 | 1119 | size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); |
tonyp@2848 | 1120 | force_overflow_conc()->init(); |
johnc@2494 | 1121 | set_phase(active_workers, true /* concurrent */); |
ysr@777 | 1122 | |
ysr@777 | 1123 | CMConcurrentMarkingTask markingTask(this, cmThread()); |
tonyp@2973 | 1124 | if (parallel_marking_threads() > 0) { |
ysr@777 | 1125 | _parallel_workers->run_task(&markingTask); |
tonyp@2973 | 1126 | } else { |
ysr@777 | 1127 | markingTask.work(0); |
tonyp@2973 | 1128 | } |
ysr@777 | 1129 | print_stats(); |
ysr@777 | 1130 | } |
ysr@777 | 1131 | |
ysr@777 | 1132 | void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { |
ysr@777 | 1133 | // world is stopped at this checkpoint |
ysr@777 | 1134 | assert(SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1135 | "world should be stopped"); |
ysr@777 | 1136 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 1137 | |
ysr@777 | 1138 | // If a full collection has happened, we shouldn't do this. |
ysr@777 | 1139 | if (has_aborted()) { |
ysr@777 | 1140 | g1h->set_marking_complete(); // So bitmap clearing isn't confused |
ysr@777 | 1141 | return; |
ysr@777 | 1142 | } |
ysr@777 | 1143 | |
kamg@2445 | 1144 | SvcGCMarker sgcm(SvcGCMarker::OTHER); |
kamg@2445 | 1145 | |
ysr@1280 | 1146 | if (VerifyDuringGC) { |
ysr@1280 | 1147 | HandleMark hm; // handle scope |
ysr@1280 | 1148 | gclog_or_tty->print(" VerifyDuringGC:(before)"); |
ysr@1280 | 1149 | Universe::heap()->prepare_for_verify(); |
johnc@2969 | 1150 | Universe::verify(/* allow dirty */ true, |
johnc@2969 | 1151 | /* silent */ false, |
johnc@2969 | 1152 | /* option */ VerifyOption_G1UsePrevMarking); |
ysr@1280 | 1153 | } |
ysr@1280 | 1154 | |
ysr@777 | 1155 | G1CollectorPolicy* g1p = g1h->g1_policy(); |
ysr@777 | 1156 | g1p->record_concurrent_mark_remark_start(); |
ysr@777 | 1157 | |
ysr@777 | 1158 | double start = os::elapsedTime(); |
ysr@777 | 1159 | |
ysr@777 | 1160 | checkpointRootsFinalWork(); |
ysr@777 | 1161 | |
ysr@777 | 1162 | double mark_work_end = os::elapsedTime(); |
ysr@777 | 1163 | |
ysr@777 | 1164 | weakRefsWork(clear_all_soft_refs); |
ysr@777 | 1165 | |
ysr@777 | 1166 | if (has_overflown()) { |
ysr@777 | 1167 | // Oops. We overflowed. Restart concurrent marking. |
ysr@777 | 1168 | _restart_for_overflow = true; |
ysr@777 | 1169 | // Clear the flag. We do not need it any more. |
ysr@777 | 1170 | clear_has_overflown(); |
tonyp@2973 | 1171 | if (G1TraceMarkStackOverflow) { |
ysr@777 | 1172 | gclog_or_tty->print_cr("\nRemark led to restart for overflow."); |
tonyp@2973 | 1173 | } |
ysr@777 | 1174 | } else { |
tonyp@2469 | 1175 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 1176 | // We're done with marking. |
tonyp@1752 | 1177 | // This is the end of the marking cycle, we're expected all |
tonyp@1752 | 1178 | // threads to have SATB queues with active set to true. |
tonyp@2469 | 1179 | satb_mq_set.set_active_all_threads(false, /* new active value */ |
tonyp@2469 | 1180 | true /* expected_active */); |
tonyp@1246 | 1181 | |
tonyp@1246 | 1182 | if (VerifyDuringGC) { |
ysr@1280 | 1183 | HandleMark hm; // handle scope |
ysr@1280 | 1184 | gclog_or_tty->print(" VerifyDuringGC:(after)"); |
ysr@1280 | 1185 | Universe::heap()->prepare_for_verify(); |
johnc@2969 | 1186 | Universe::verify(/* allow dirty */ true, |
johnc@2969 | 1187 | /* silent */ false, |
johnc@2969 | 1188 | /* option */ VerifyOption_G1UseNextMarking); |
tonyp@1246 | 1189 | } |
johnc@2494 | 1190 | assert(!restart_for_overflow(), "sanity"); |
johnc@2494 | 1191 | } |
johnc@2494 | 1192 | |
johnc@2494 | 1193 | // Reset the marking state if marking completed |
johnc@2494 | 1194 | if (!restart_for_overflow()) { |
johnc@2494 | 1195 | set_non_marking_state(); |
ysr@777 | 1196 | } |
ysr@777 | 1197 | |
ysr@777 | 1198 | #if VERIFY_OBJS_PROCESSED |
ysr@777 | 1199 | _scan_obj_cl.objs_processed = 0; |
ysr@777 | 1200 | ThreadLocalObjQueue::objs_enqueued = 0; |
ysr@777 | 1201 | #endif |
ysr@777 | 1202 | |
ysr@777 | 1203 | // Statistics |
ysr@777 | 1204 | double now = os::elapsedTime(); |
ysr@777 | 1205 | _remark_mark_times.add((mark_work_end - start) * 1000.0); |
ysr@777 | 1206 | _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); |
ysr@777 | 1207 | _remark_times.add((now - start) * 1000.0); |
ysr@777 | 1208 | |
ysr@777 | 1209 | g1p->record_concurrent_mark_remark_end(); |
ysr@777 | 1210 | } |
ysr@777 | 1211 | |
ysr@777 | 1212 | #define CARD_BM_TEST_MODE 0 |
ysr@777 | 1213 | |
ysr@777 | 1214 | class CalcLiveObjectsClosure: public HeapRegionClosure { |
ysr@777 | 1215 | |
ysr@777 | 1216 | CMBitMapRO* _bm; |
ysr@777 | 1217 | ConcurrentMark* _cm; |
ysr@777 | 1218 | bool _changed; |
ysr@777 | 1219 | bool _yield; |
ysr@777 | 1220 | size_t _words_done; |
ysr@777 | 1221 | size_t _tot_live; |
ysr@777 | 1222 | size_t _tot_used; |
ysr@777 | 1223 | size_t _regions_done; |
ysr@777 | 1224 | double _start_vtime_sec; |
ysr@777 | 1225 | |
ysr@777 | 1226 | BitMap* _region_bm; |
ysr@777 | 1227 | BitMap* _card_bm; |
ysr@777 | 1228 | intptr_t _bottom_card_num; |
ysr@777 | 1229 | bool _final; |
ysr@777 | 1230 | |
ysr@777 | 1231 | void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { |
ysr@777 | 1232 | for (intptr_t i = start_card_num; i <= last_card_num; i++) { |
ysr@777 | 1233 | #if CARD_BM_TEST_MODE |
tonyp@1458 | 1234 | guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set."); |
ysr@777 | 1235 | #else |
ysr@777 | 1236 | _card_bm->par_at_put(i - _bottom_card_num, 1); |
ysr@777 | 1237 | #endif |
ysr@777 | 1238 | } |
ysr@777 | 1239 | } |
ysr@777 | 1240 | |
ysr@777 | 1241 | public: |
ysr@777 | 1242 | CalcLiveObjectsClosure(bool final, |
ysr@777 | 1243 | CMBitMapRO *bm, ConcurrentMark *cm, |
tonyp@1371 | 1244 | BitMap* region_bm, BitMap* card_bm) : |
ysr@777 | 1245 | _bm(bm), _cm(cm), _changed(false), _yield(true), |
ysr@777 | 1246 | _words_done(0), _tot_live(0), _tot_used(0), |
tonyp@1371 | 1247 | _region_bm(region_bm), _card_bm(card_bm),_final(final), |
ysr@777 | 1248 | _regions_done(0), _start_vtime_sec(0.0) |
ysr@777 | 1249 | { |
ysr@777 | 1250 | _bottom_card_num = |
ysr@777 | 1251 | intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> |
ysr@777 | 1252 | CardTableModRefBS::card_shift); |
ysr@777 | 1253 | } |
ysr@777 | 1254 | |
tonyp@1264 | 1255 | // It takes a region that's not empty (i.e., it has at least one |
tonyp@1264 | 1256 | // live object in it and sets its corresponding bit on the region |
tonyp@1264 | 1257 | // bitmap to 1. If the region is "starts humongous" it will also set |
tonyp@1264 | 1258 | // to 1 the bits on the region bitmap that correspond to its |
tonyp@1264 | 1259 | // associated "continues humongous" regions. |
tonyp@1264 | 1260 | void set_bit_for_region(HeapRegion* hr) { |
tonyp@1264 | 1261 | assert(!hr->continuesHumongous(), "should have filtered those out"); |
tonyp@1264 | 1262 | |
tonyp@1264 | 1263 | size_t index = hr->hrs_index(); |
tonyp@1264 | 1264 | if (!hr->startsHumongous()) { |
tonyp@1264 | 1265 | // Normal (non-humongous) case: just set the bit. |
tonyp@1264 | 1266 | _region_bm->par_at_put((BitMap::idx_t) index, true); |
tonyp@1264 | 1267 | } else { |
tonyp@1264 | 1268 | // Starts humongous case: calculate how many regions are part of |
tonyp@1264 | 1269 | // this humongous region and then set the bit range. It might |
tonyp@1264 | 1270 | // have been a bit more efficient to look at the object that |
tonyp@1264 | 1271 | // spans these humongous regions to calculate their number from |
tonyp@1264 | 1272 | // the object's size. However, it's a good idea to calculate |
tonyp@1264 | 1273 | // this based on the metadata itself, and not the region |
tonyp@1264 | 1274 | // contents, so that this code is not aware of what goes into |
tonyp@1264 | 1275 | // the humongous regions (in case this changes in the future). |
tonyp@1264 | 1276 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@1264 | 1277 | size_t end_index = index + 1; |
tonyp@1266 | 1278 | while (end_index < g1h->n_regions()) { |
tonyp@1266 | 1279 | HeapRegion* chr = g1h->region_at(end_index); |
tonyp@2973 | 1280 | if (!chr->continuesHumongous()) break; |
tonyp@1264 | 1281 | end_index += 1; |
tonyp@1264 | 1282 | } |
tonyp@1264 | 1283 | _region_bm->par_at_put_range((BitMap::idx_t) index, |
tonyp@1264 | 1284 | (BitMap::idx_t) end_index, true); |
tonyp@1264 | 1285 | } |
tonyp@1264 | 1286 | } |
tonyp@1264 | 1287 | |
ysr@777 | 1288 | bool doHeapRegion(HeapRegion* hr) { |
tonyp@2973 | 1289 | if (!_final && _regions_done == 0) { |
ysr@777 | 1290 | _start_vtime_sec = os::elapsedVTime(); |
tonyp@2973 | 1291 | } |
ysr@777 | 1292 | |
iveresov@1074 | 1293 | if (hr->continuesHumongous()) { |
tonyp@1264 | 1294 | // We will ignore these here and process them when their |
tonyp@1264 | 1295 | // associated "starts humongous" region is processed (see |
tonyp@1264 | 1296 | // set_bit_for_heap_region()). Note that we cannot rely on their |
tonyp@1264 | 1297 | // associated "starts humongous" region to have their bit set to |
tonyp@1264 | 1298 | // 1 since, due to the region chunking in the parallel region |
tonyp@1264 | 1299 | // iteration, a "continues humongous" region might be visited |
tonyp@1264 | 1300 | // before its associated "starts humongous". |
iveresov@1074 | 1301 | return false; |
iveresov@1074 | 1302 | } |
ysr@777 | 1303 | |
ysr@777 | 1304 | HeapWord* nextTop = hr->next_top_at_mark_start(); |
ysr@777 | 1305 | HeapWord* start = hr->top_at_conc_mark_count(); |
ysr@777 | 1306 | assert(hr->bottom() <= start && start <= hr->end() && |
ysr@777 | 1307 | hr->bottom() <= nextTop && nextTop <= hr->end() && |
ysr@777 | 1308 | start <= nextTop, |
ysr@777 | 1309 | "Preconditions."); |
ysr@777 | 1310 | // Otherwise, record the number of word's we'll examine. |
ysr@777 | 1311 | size_t words_done = (nextTop - start); |
ysr@777 | 1312 | // Find the first marked object at or after "start". |
ysr@777 | 1313 | start = _bm->getNextMarkedWordAddress(start, nextTop); |
ysr@777 | 1314 | size_t marked_bytes = 0; |
ysr@777 | 1315 | |
ysr@777 | 1316 | // Below, the term "card num" means the result of shifting an address |
ysr@777 | 1317 | // by the card shift -- address 0 corresponds to card number 0. One |
ysr@777 | 1318 | // must subtract the card num of the bottom of the heap to obtain a |
ysr@777 | 1319 | // card table index. |
ysr@777 | 1320 | // The first card num of the sequence of live cards currently being |
ysr@777 | 1321 | // constructed. -1 ==> no sequence. |
ysr@777 | 1322 | intptr_t start_card_num = -1; |
ysr@777 | 1323 | // The last card num of the sequence of live cards currently being |
ysr@777 | 1324 | // constructed. -1 ==> no sequence. |
ysr@777 | 1325 | intptr_t last_card_num = -1; |
ysr@777 | 1326 | |
ysr@777 | 1327 | while (start < nextTop) { |
ysr@777 | 1328 | if (_yield && _cm->do_yield_check()) { |
ysr@777 | 1329 | // We yielded. It might be for a full collection, in which case |
ysr@777 | 1330 | // all bets are off; terminate the traversal. |
ysr@777 | 1331 | if (_cm->has_aborted()) { |
ysr@777 | 1332 | _changed = false; |
ysr@777 | 1333 | return true; |
ysr@777 | 1334 | } else { |
ysr@777 | 1335 | // Otherwise, it might be a collection pause, and the region |
ysr@777 | 1336 | // we're looking at might be in the collection set. We'll |
ysr@777 | 1337 | // abandon this region. |
ysr@777 | 1338 | return false; |
ysr@777 | 1339 | } |
ysr@777 | 1340 | } |
ysr@777 | 1341 | oop obj = oop(start); |
ysr@777 | 1342 | int obj_sz = obj->size(); |
ysr@777 | 1343 | // The card num of the start of the current object. |
ysr@777 | 1344 | intptr_t obj_card_num = |
ysr@777 | 1345 | intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift); |
ysr@777 | 1346 | |
ysr@777 | 1347 | HeapWord* obj_last = start + obj_sz - 1; |
ysr@777 | 1348 | intptr_t obj_last_card_num = |
ysr@777 | 1349 | intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift); |
ysr@777 | 1350 | |
ysr@777 | 1351 | if (obj_card_num != last_card_num) { |
ysr@777 | 1352 | if (start_card_num == -1) { |
ysr@777 | 1353 | assert(last_card_num == -1, "Both or neither."); |
ysr@777 | 1354 | start_card_num = obj_card_num; |
ysr@777 | 1355 | } else { |
ysr@777 | 1356 | assert(last_card_num != -1, "Both or neither."); |
ysr@777 | 1357 | assert(obj_card_num >= last_card_num, "Inv"); |
ysr@777 | 1358 | if ((obj_card_num - last_card_num) > 1) { |
ysr@777 | 1359 | // Mark the last run, and start a new one. |
ysr@777 | 1360 | mark_card_num_range(start_card_num, last_card_num); |
ysr@777 | 1361 | start_card_num = obj_card_num; |
ysr@777 | 1362 | } |
ysr@777 | 1363 | } |
ysr@777 | 1364 | #if CARD_BM_TEST_MODE |
ysr@777 | 1365 | /* |
ysr@777 | 1366 | gclog_or_tty->print_cr("Setting bits from %d/%d.", |
ysr@777 | 1367 | obj_card_num - _bottom_card_num, |
ysr@777 | 1368 | obj_last_card_num - _bottom_card_num); |
ysr@777 | 1369 | */ |
ysr@777 | 1370 | for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) { |
ysr@777 | 1371 | _card_bm->par_at_put(j - _bottom_card_num, 1); |
ysr@777 | 1372 | } |
ysr@777 | 1373 | #endif |
ysr@777 | 1374 | } |
ysr@777 | 1375 | // In any case, we set the last card num. |
ysr@777 | 1376 | last_card_num = obj_last_card_num; |
ysr@777 | 1377 | |
apetrusenko@1465 | 1378 | marked_bytes += (size_t)obj_sz * HeapWordSize; |
ysr@777 | 1379 | // Find the next marked object after this one. |
ysr@777 | 1380 | start = _bm->getNextMarkedWordAddress(start + 1, nextTop); |
ysr@777 | 1381 | _changed = true; |
ysr@777 | 1382 | } |
ysr@777 | 1383 | // Handle the last range, if any. |
tonyp@2973 | 1384 | if (start_card_num != -1) { |
ysr@777 | 1385 | mark_card_num_range(start_card_num, last_card_num); |
tonyp@2973 | 1386 | } |
ysr@777 | 1387 | if (_final) { |
ysr@777 | 1388 | // Mark the allocated-since-marking portion... |
ysr@777 | 1389 | HeapWord* tp = hr->top(); |
ysr@777 | 1390 | if (nextTop < tp) { |
ysr@777 | 1391 | start_card_num = |
ysr@777 | 1392 | intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift); |
ysr@777 | 1393 | last_card_num = |
ysr@777 | 1394 | intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift); |
ysr@777 | 1395 | mark_card_num_range(start_card_num, last_card_num); |
ysr@777 | 1396 | // This definitely means the region has live objects. |
tonyp@1264 | 1397 | set_bit_for_region(hr); |
ysr@777 | 1398 | } |
ysr@777 | 1399 | } |
ysr@777 | 1400 | |
ysr@777 | 1401 | hr->add_to_marked_bytes(marked_bytes); |
ysr@777 | 1402 | // Update the live region bitmap. |
ysr@777 | 1403 | if (marked_bytes > 0) { |
tonyp@1264 | 1404 | set_bit_for_region(hr); |
ysr@777 | 1405 | } |
ysr@777 | 1406 | hr->set_top_at_conc_mark_count(nextTop); |
ysr@777 | 1407 | _tot_live += hr->next_live_bytes(); |
ysr@777 | 1408 | _tot_used += hr->used(); |
ysr@777 | 1409 | _words_done = words_done; |
ysr@777 | 1410 | |
ysr@777 | 1411 | if (!_final) { |
ysr@777 | 1412 | ++_regions_done; |
ysr@777 | 1413 | if (_regions_done % 10 == 0) { |
ysr@777 | 1414 | double end_vtime_sec = os::elapsedVTime(); |
ysr@777 | 1415 | double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec; |
ysr@777 | 1416 | if (elapsed_vtime_sec > (10.0 / 1000.0)) { |
ysr@777 | 1417 | jlong sleep_time_ms = |
ysr@777 | 1418 | (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); |
ysr@777 | 1419 | os::sleep(Thread::current(), sleep_time_ms, false); |
ysr@777 | 1420 | _start_vtime_sec = end_vtime_sec; |
ysr@777 | 1421 | } |
ysr@777 | 1422 | } |
ysr@777 | 1423 | } |
ysr@777 | 1424 | |
ysr@777 | 1425 | return false; |
ysr@777 | 1426 | } |
ysr@777 | 1427 | |
ysr@777 | 1428 | bool changed() { return _changed; } |
ysr@777 | 1429 | void reset() { _changed = false; _words_done = 0; } |
ysr@777 | 1430 | void no_yield() { _yield = false; } |
ysr@777 | 1431 | size_t words_done() { return _words_done; } |
ysr@777 | 1432 | size_t tot_live() { return _tot_live; } |
ysr@777 | 1433 | size_t tot_used() { return _tot_used; } |
ysr@777 | 1434 | }; |
ysr@777 | 1435 | |
ysr@777 | 1436 | |
ysr@777 | 1437 | void ConcurrentMark::calcDesiredRegions() { |
ysr@777 | 1438 | _region_bm.clear(); |
ysr@777 | 1439 | _card_bm.clear(); |
ysr@777 | 1440 | CalcLiveObjectsClosure calccl(false /*final*/, |
ysr@777 | 1441 | nextMarkBitMap(), this, |
tonyp@1371 | 1442 | &_region_bm, &_card_bm); |
ysr@777 | 1443 | G1CollectedHeap *g1h = G1CollectedHeap::heap(); |
ysr@777 | 1444 | g1h->heap_region_iterate(&calccl); |
ysr@777 | 1445 | |
ysr@777 | 1446 | do { |
ysr@777 | 1447 | calccl.reset(); |
ysr@777 | 1448 | g1h->heap_region_iterate(&calccl); |
ysr@777 | 1449 | } while (calccl.changed()); |
ysr@777 | 1450 | } |
ysr@777 | 1451 | |
ysr@777 | 1452 | class G1ParFinalCountTask: public AbstractGangTask { |
ysr@777 | 1453 | protected: |
ysr@777 | 1454 | G1CollectedHeap* _g1h; |
ysr@777 | 1455 | CMBitMap* _bm; |
ysr@777 | 1456 | size_t _n_workers; |
ysr@777 | 1457 | size_t *_live_bytes; |
ysr@777 | 1458 | size_t *_used_bytes; |
ysr@777 | 1459 | BitMap* _region_bm; |
ysr@777 | 1460 | BitMap* _card_bm; |
ysr@777 | 1461 | public: |
ysr@777 | 1462 | G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, |
tonyp@2973 | 1463 | BitMap* region_bm, BitMap* card_bm) |
tonyp@2973 | 1464 | : AbstractGangTask("G1 final counting"), _g1h(g1h), |
tonyp@2973 | 1465 | _bm(bm), _region_bm(region_bm), _card_bm(card_bm) { |
tonyp@2973 | 1466 | if (ParallelGCThreads > 0) { |
ysr@777 | 1467 | _n_workers = _g1h->workers()->total_workers(); |
tonyp@2973 | 1468 | } else { |
ysr@777 | 1469 | _n_workers = 1; |
tonyp@2973 | 1470 | } |
ysr@777 | 1471 | _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); |
ysr@777 | 1472 | _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); |
ysr@777 | 1473 | } |
ysr@777 | 1474 | |
ysr@777 | 1475 | ~G1ParFinalCountTask() { |
ysr@777 | 1476 | FREE_C_HEAP_ARRAY(size_t, _live_bytes); |
ysr@777 | 1477 | FREE_C_HEAP_ARRAY(size_t, _used_bytes); |
ysr@777 | 1478 | } |
ysr@777 | 1479 | |
ysr@777 | 1480 | void work(int i) { |
ysr@777 | 1481 | CalcLiveObjectsClosure calccl(true /*final*/, |
ysr@777 | 1482 | _bm, _g1h->concurrent_mark(), |
tonyp@1371 | 1483 | _region_bm, _card_bm); |
ysr@777 | 1484 | calccl.no_yield(); |
jmasa@2188 | 1485 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
tonyp@790 | 1486 | _g1h->heap_region_par_iterate_chunked(&calccl, i, |
tonyp@790 | 1487 | HeapRegion::FinalCountClaimValue); |
ysr@777 | 1488 | } else { |
ysr@777 | 1489 | _g1h->heap_region_iterate(&calccl); |
ysr@777 | 1490 | } |
ysr@777 | 1491 | assert(calccl.complete(), "Shouldn't have yielded!"); |
ysr@777 | 1492 | |
tonyp@1458 | 1493 | assert((size_t) i < _n_workers, "invariant"); |
ysr@777 | 1494 | _live_bytes[i] = calccl.tot_live(); |
ysr@777 | 1495 | _used_bytes[i] = calccl.tot_used(); |
ysr@777 | 1496 | } |
ysr@777 | 1497 | size_t live_bytes() { |
ysr@777 | 1498 | size_t live_bytes = 0; |
ysr@777 | 1499 | for (size_t i = 0; i < _n_workers; ++i) |
ysr@777 | 1500 | live_bytes += _live_bytes[i]; |
ysr@777 | 1501 | return live_bytes; |
ysr@777 | 1502 | } |
ysr@777 | 1503 | size_t used_bytes() { |
ysr@777 | 1504 | size_t used_bytes = 0; |
ysr@777 | 1505 | for (size_t i = 0; i < _n_workers; ++i) |
ysr@777 | 1506 | used_bytes += _used_bytes[i]; |
ysr@777 | 1507 | return used_bytes; |
ysr@777 | 1508 | } |
ysr@777 | 1509 | }; |
ysr@777 | 1510 | |
ysr@777 | 1511 | class G1ParNoteEndTask; |
ysr@777 | 1512 | |
ysr@777 | 1513 | class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { |
ysr@777 | 1514 | G1CollectedHeap* _g1; |
ysr@777 | 1515 | int _worker_num; |
ysr@777 | 1516 | size_t _max_live_bytes; |
ysr@777 | 1517 | size_t _regions_claimed; |
ysr@777 | 1518 | size_t _freed_bytes; |
tonyp@2493 | 1519 | FreeRegionList* _local_cleanup_list; |
tonyp@2493 | 1520 | HumongousRegionSet* _humongous_proxy_set; |
tonyp@2493 | 1521 | HRRSCleanupTask* _hrrs_cleanup_task; |
ysr@777 | 1522 | double _claimed_region_time; |
ysr@777 | 1523 | double _max_region_time; |
ysr@777 | 1524 | |
ysr@777 | 1525 | public: |
ysr@777 | 1526 | G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, |
tonyp@2493 | 1527 | int worker_num, |
tonyp@2493 | 1528 | FreeRegionList* local_cleanup_list, |
tonyp@2493 | 1529 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2493 | 1530 | HRRSCleanupTask* hrrs_cleanup_task); |
ysr@777 | 1531 | size_t freed_bytes() { return _freed_bytes; } |
ysr@777 | 1532 | |
ysr@777 | 1533 | bool doHeapRegion(HeapRegion *r); |
ysr@777 | 1534 | |
ysr@777 | 1535 | size_t max_live_bytes() { return _max_live_bytes; } |
ysr@777 | 1536 | size_t regions_claimed() { return _regions_claimed; } |
ysr@777 | 1537 | double claimed_region_time_sec() { return _claimed_region_time; } |
ysr@777 | 1538 | double max_region_time_sec() { return _max_region_time; } |
ysr@777 | 1539 | }; |
ysr@777 | 1540 | |
ysr@777 | 1541 | class G1ParNoteEndTask: public AbstractGangTask { |
ysr@777 | 1542 | friend class G1NoteEndOfConcMarkClosure; |
tonyp@2472 | 1543 | |
ysr@777 | 1544 | protected: |
ysr@777 | 1545 | G1CollectedHeap* _g1h; |
ysr@777 | 1546 | size_t _max_live_bytes; |
ysr@777 | 1547 | size_t _freed_bytes; |
tonyp@2472 | 1548 | FreeRegionList* _cleanup_list; |
tonyp@2472 | 1549 | |
ysr@777 | 1550 | public: |
ysr@777 | 1551 | G1ParNoteEndTask(G1CollectedHeap* g1h, |
tonyp@2472 | 1552 | FreeRegionList* cleanup_list) : |
ysr@777 | 1553 | AbstractGangTask("G1 note end"), _g1h(g1h), |
tonyp@2472 | 1554 | _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } |
ysr@777 | 1555 | |
ysr@777 | 1556 | void work(int i) { |
ysr@777 | 1557 | double start = os::elapsedTime(); |
tonyp@2493 | 1558 | FreeRegionList local_cleanup_list("Local Cleanup List"); |
tonyp@2493 | 1559 | HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); |
tonyp@2493 | 1560 | HRRSCleanupTask hrrs_cleanup_task; |
tonyp@2493 | 1561 | G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, |
tonyp@2493 | 1562 | &humongous_proxy_set, |
tonyp@2493 | 1563 | &hrrs_cleanup_task); |
jmasa@2188 | 1564 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
tonyp@790 | 1565 | _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, |
tonyp@790 | 1566 | HeapRegion::NoteEndClaimValue); |
ysr@777 | 1567 | } else { |
ysr@777 | 1568 | _g1h->heap_region_iterate(&g1_note_end); |
ysr@777 | 1569 | } |
ysr@777 | 1570 | assert(g1_note_end.complete(), "Shouldn't have yielded!"); |
ysr@777 | 1571 | |
tonyp@2472 | 1572 | // Now update the lists |
tonyp@2472 | 1573 | _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), |
tonyp@2472 | 1574 | NULL /* free_list */, |
tonyp@2493 | 1575 | &humongous_proxy_set, |
tonyp@2472 | 1576 | true /* par */); |
ysr@777 | 1577 | { |
ysr@777 | 1578 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 1579 | _max_live_bytes += g1_note_end.max_live_bytes(); |
ysr@777 | 1580 | _freed_bytes += g1_note_end.freed_bytes(); |
tonyp@2472 | 1581 | |
tonyp@2975 | 1582 | // If we iterate over the global cleanup list at the end of |
tonyp@2975 | 1583 | // cleanup to do this printing we will not guarantee to only |
tonyp@2975 | 1584 | // generate output for the newly-reclaimed regions (the list |
tonyp@2975 | 1585 | // might not be empty at the beginning of cleanup; we might |
tonyp@2975 | 1586 | // still be working on its previous contents). So we do the |
tonyp@2975 | 1587 | // printing here, before we append the new regions to the global |
tonyp@2975 | 1588 | // cleanup list. |
tonyp@2975 | 1589 | |
tonyp@2975 | 1590 | G1HRPrinter* hr_printer = _g1h->hr_printer(); |
tonyp@2975 | 1591 | if (hr_printer->is_active()) { |
tonyp@2975 | 1592 | HeapRegionLinkedListIterator iter(&local_cleanup_list); |
tonyp@2975 | 1593 | while (iter.more_available()) { |
tonyp@2975 | 1594 | HeapRegion* hr = iter.get_next(); |
tonyp@2975 | 1595 | hr_printer->cleanup(hr); |
tonyp@2975 | 1596 | } |
tonyp@2975 | 1597 | } |
tonyp@2975 | 1598 | |
tonyp@2493 | 1599 | _cleanup_list->add_as_tail(&local_cleanup_list); |
tonyp@2493 | 1600 | assert(local_cleanup_list.is_empty(), "post-condition"); |
tonyp@2493 | 1601 | |
tonyp@2493 | 1602 | HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); |
ysr@777 | 1603 | } |
ysr@777 | 1604 | double end = os::elapsedTime(); |
ysr@777 | 1605 | if (G1PrintParCleanupStats) { |
ysr@777 | 1606 | gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " |
ysr@777 | 1607 | "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", |
ysr@777 | 1608 | i, start, end, (end-start)*1000.0, |
ysr@777 | 1609 | g1_note_end.regions_claimed(), |
ysr@777 | 1610 | g1_note_end.claimed_region_time_sec()*1000.0, |
ysr@777 | 1611 | g1_note_end.max_region_time_sec()*1000.0); |
ysr@777 | 1612 | } |
ysr@777 | 1613 | } |
ysr@777 | 1614 | size_t max_live_bytes() { return _max_live_bytes; } |
ysr@777 | 1615 | size_t freed_bytes() { return _freed_bytes; } |
ysr@777 | 1616 | }; |
ysr@777 | 1617 | |
ysr@777 | 1618 | class G1ParScrubRemSetTask: public AbstractGangTask { |
ysr@777 | 1619 | protected: |
ysr@777 | 1620 | G1RemSet* _g1rs; |
ysr@777 | 1621 | BitMap* _region_bm; |
ysr@777 | 1622 | BitMap* _card_bm; |
ysr@777 | 1623 | public: |
ysr@777 | 1624 | G1ParScrubRemSetTask(G1CollectedHeap* g1h, |
ysr@777 | 1625 | BitMap* region_bm, BitMap* card_bm) : |
ysr@777 | 1626 | AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), |
ysr@777 | 1627 | _region_bm(region_bm), _card_bm(card_bm) |
ysr@777 | 1628 | {} |
ysr@777 | 1629 | |
ysr@777 | 1630 | void work(int i) { |
jmasa@2188 | 1631 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
tonyp@790 | 1632 | _g1rs->scrub_par(_region_bm, _card_bm, i, |
tonyp@790 | 1633 | HeapRegion::ScrubRemSetClaimValue); |
ysr@777 | 1634 | } else { |
ysr@777 | 1635 | _g1rs->scrub(_region_bm, _card_bm); |
ysr@777 | 1636 | } |
ysr@777 | 1637 | } |
ysr@777 | 1638 | |
ysr@777 | 1639 | }; |
ysr@777 | 1640 | |
ysr@777 | 1641 | G1NoteEndOfConcMarkClosure:: |
ysr@777 | 1642 | G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, |
tonyp@2493 | 1643 | int worker_num, |
tonyp@2493 | 1644 | FreeRegionList* local_cleanup_list, |
tonyp@2493 | 1645 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2493 | 1646 | HRRSCleanupTask* hrrs_cleanup_task) |
ysr@777 | 1647 | : _g1(g1), _worker_num(worker_num), |
ysr@777 | 1648 | _max_live_bytes(0), _regions_claimed(0), |
tonyp@2472 | 1649 | _freed_bytes(0), |
ysr@777 | 1650 | _claimed_region_time(0.0), _max_region_time(0.0), |
tonyp@2493 | 1651 | _local_cleanup_list(local_cleanup_list), |
tonyp@2493 | 1652 | _humongous_proxy_set(humongous_proxy_set), |
tonyp@2493 | 1653 | _hrrs_cleanup_task(hrrs_cleanup_task) { } |
tonyp@2472 | 1654 | |
tonyp@2472 | 1655 | bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) { |
ysr@777 | 1656 | // We use a claim value of zero here because all regions |
ysr@777 | 1657 | // were claimed with value 1 in the FinalCount task. |
tonyp@2472 | 1658 | hr->reset_gc_time_stamp(); |
tonyp@2472 | 1659 | if (!hr->continuesHumongous()) { |
ysr@777 | 1660 | double start = os::elapsedTime(); |
ysr@777 | 1661 | _regions_claimed++; |
tonyp@2472 | 1662 | hr->note_end_of_marking(); |
tonyp@2472 | 1663 | _max_live_bytes += hr->max_live_bytes(); |
tonyp@2493 | 1664 | _g1->free_region_if_empty(hr, |
tonyp@2493 | 1665 | &_freed_bytes, |
tonyp@2493 | 1666 | _local_cleanup_list, |
tonyp@2493 | 1667 | _humongous_proxy_set, |
tonyp@2493 | 1668 | _hrrs_cleanup_task, |
tonyp@2493 | 1669 | true /* par */); |
ysr@777 | 1670 | double region_time = (os::elapsedTime() - start); |
ysr@777 | 1671 | _claimed_region_time += region_time; |
tonyp@2973 | 1672 | if (region_time > _max_region_time) { |
tonyp@2973 | 1673 | _max_region_time = region_time; |
tonyp@2973 | 1674 | } |
ysr@777 | 1675 | } |
ysr@777 | 1676 | return false; |
ysr@777 | 1677 | } |
ysr@777 | 1678 | |
ysr@777 | 1679 | void ConcurrentMark::cleanup() { |
ysr@777 | 1680 | // world is stopped at this checkpoint |
ysr@777 | 1681 | assert(SafepointSynchronize::is_at_safepoint(), |
ysr@777 | 1682 | "world should be stopped"); |
ysr@777 | 1683 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 1684 | |
ysr@777 | 1685 | // If a full collection has happened, we shouldn't do this. |
ysr@777 | 1686 | if (has_aborted()) { |
ysr@777 | 1687 | g1h->set_marking_complete(); // So bitmap clearing isn't confused |
ysr@777 | 1688 | return; |
ysr@777 | 1689 | } |
ysr@777 | 1690 | |
tonyp@2472 | 1691 | g1h->verify_region_sets_optional(); |
tonyp@2472 | 1692 | |
ysr@1280 | 1693 | if (VerifyDuringGC) { |
ysr@1280 | 1694 | HandleMark hm; // handle scope |
ysr@1280 | 1695 | gclog_or_tty->print(" VerifyDuringGC:(before)"); |
ysr@1280 | 1696 | Universe::heap()->prepare_for_verify(); |
johnc@2969 | 1697 | Universe::verify(/* allow dirty */ true, |
johnc@2969 | 1698 | /* silent */ false, |
johnc@2969 | 1699 | /* option */ VerifyOption_G1UsePrevMarking); |
ysr@1280 | 1700 | } |
ysr@1280 | 1701 | |
ysr@777 | 1702 | G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); |
ysr@777 | 1703 | g1p->record_concurrent_mark_cleanup_start(); |
ysr@777 | 1704 | |
ysr@777 | 1705 | double start = os::elapsedTime(); |
ysr@777 | 1706 | |
tonyp@2493 | 1707 | HeapRegionRemSet::reset_for_cleanup_tasks(); |
tonyp@2493 | 1708 | |
ysr@777 | 1709 | // Do counting once more with the world stopped for good measure. |
ysr@777 | 1710 | G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), |
ysr@777 | 1711 | &_region_bm, &_card_bm); |
jmasa@2188 | 1712 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
tonyp@790 | 1713 | assert(g1h->check_heap_region_claim_values( |
tonyp@790 | 1714 | HeapRegion::InitialClaimValue), |
tonyp@790 | 1715 | "sanity check"); |
tonyp@790 | 1716 | |
ysr@777 | 1717 | int n_workers = g1h->workers()->total_workers(); |
ysr@777 | 1718 | g1h->set_par_threads(n_workers); |
ysr@777 | 1719 | g1h->workers()->run_task(&g1_par_count_task); |
ysr@777 | 1720 | g1h->set_par_threads(0); |
tonyp@790 | 1721 | |
tonyp@790 | 1722 | assert(g1h->check_heap_region_claim_values( |
tonyp@790 | 1723 | HeapRegion::FinalCountClaimValue), |
tonyp@790 | 1724 | "sanity check"); |
ysr@777 | 1725 | } else { |
ysr@777 | 1726 | g1_par_count_task.work(0); |
ysr@777 | 1727 | } |
ysr@777 | 1728 | |
ysr@777 | 1729 | size_t known_garbage_bytes = |
ysr@777 | 1730 | g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); |
ysr@777 | 1731 | g1p->set_known_garbage_bytes(known_garbage_bytes); |
ysr@777 | 1732 | |
ysr@777 | 1733 | size_t start_used_bytes = g1h->used(); |
ysr@777 | 1734 | _at_least_one_mark_complete = true; |
ysr@777 | 1735 | g1h->set_marking_complete(); |
ysr@777 | 1736 | |
tonyp@3114 | 1737 | ergo_verbose4(ErgoConcCycles, |
tonyp@3114 | 1738 | "finish cleanup", |
tonyp@3114 | 1739 | ergo_format_byte("occupancy") |
tonyp@3114 | 1740 | ergo_format_byte("capacity") |
tonyp@3114 | 1741 | ergo_format_byte_perc("known garbage"), |
tonyp@3114 | 1742 | start_used_bytes, g1h->capacity(), |
tonyp@3114 | 1743 | known_garbage_bytes, |
tonyp@3114 | 1744 | ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0); |
tonyp@3114 | 1745 | |
ysr@777 | 1746 | double count_end = os::elapsedTime(); |
ysr@777 | 1747 | double this_final_counting_time = (count_end - start); |
ysr@777 | 1748 | if (G1PrintParCleanupStats) { |
ysr@777 | 1749 | gclog_or_tty->print_cr("Cleanup:"); |
ysr@777 | 1750 | gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", |
ysr@777 | 1751 | this_final_counting_time*1000.0); |
ysr@777 | 1752 | } |
ysr@777 | 1753 | _total_counting_time += this_final_counting_time; |
ysr@777 | 1754 | |
tonyp@2717 | 1755 | if (G1PrintRegionLivenessInfo) { |
tonyp@2717 | 1756 | G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); |
tonyp@2717 | 1757 | _g1h->heap_region_iterate(&cl); |
tonyp@2717 | 1758 | } |
tonyp@2717 | 1759 | |
ysr@777 | 1760 | // Install newly created mark bitMap as "prev". |
ysr@777 | 1761 | swapMarkBitMaps(); |
ysr@777 | 1762 | |
ysr@777 | 1763 | g1h->reset_gc_time_stamp(); |
ysr@777 | 1764 | |
ysr@777 | 1765 | // Note end of marking in all heap regions. |
ysr@777 | 1766 | double note_end_start = os::elapsedTime(); |
tonyp@2472 | 1767 | G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); |
jmasa@2188 | 1768 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 1769 | int n_workers = g1h->workers()->total_workers(); |
ysr@777 | 1770 | g1h->set_par_threads(n_workers); |
ysr@777 | 1771 | g1h->workers()->run_task(&g1_par_note_end_task); |
ysr@777 | 1772 | g1h->set_par_threads(0); |
tonyp@790 | 1773 | |
tonyp@790 | 1774 | assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), |
tonyp@790 | 1775 | "sanity check"); |
ysr@777 | 1776 | } else { |
ysr@777 | 1777 | g1_par_note_end_task.work(0); |
ysr@777 | 1778 | } |
tonyp@2472 | 1779 | |
tonyp@2472 | 1780 | if (!cleanup_list_is_empty()) { |
tonyp@2472 | 1781 | // The cleanup list is not empty, so we'll have to process it |
tonyp@2472 | 1782 | // concurrently. Notify anyone else that might be wanting free |
tonyp@2472 | 1783 | // regions that there will be more free regions coming soon. |
tonyp@2472 | 1784 | g1h->set_free_regions_coming(); |
tonyp@2472 | 1785 | } |
ysr@777 | 1786 | double note_end_end = os::elapsedTime(); |
ysr@777 | 1787 | if (G1PrintParCleanupStats) { |
ysr@777 | 1788 | gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", |
ysr@777 | 1789 | (note_end_end - note_end_start)*1000.0); |
ysr@777 | 1790 | } |
ysr@777 | 1791 | |
tonyp@790 | 1792 | |
ysr@777 | 1793 | // call below, since it affects the metric by which we sort the heap |
ysr@777 | 1794 | // regions. |
ysr@777 | 1795 | if (G1ScrubRemSets) { |
ysr@777 | 1796 | double rs_scrub_start = os::elapsedTime(); |
ysr@777 | 1797 | G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); |
jmasa@2188 | 1798 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 1799 | int n_workers = g1h->workers()->total_workers(); |
ysr@777 | 1800 | g1h->set_par_threads(n_workers); |
ysr@777 | 1801 | g1h->workers()->run_task(&g1_par_scrub_rs_task); |
ysr@777 | 1802 | g1h->set_par_threads(0); |
tonyp@790 | 1803 | |
tonyp@790 | 1804 | assert(g1h->check_heap_region_claim_values( |
tonyp@790 | 1805 | HeapRegion::ScrubRemSetClaimValue), |
tonyp@790 | 1806 | "sanity check"); |
ysr@777 | 1807 | } else { |
ysr@777 | 1808 | g1_par_scrub_rs_task.work(0); |
ysr@777 | 1809 | } |
ysr@777 | 1810 | |
ysr@777 | 1811 | double rs_scrub_end = os::elapsedTime(); |
ysr@777 | 1812 | double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); |
ysr@777 | 1813 | _total_rs_scrub_time += this_rs_scrub_time; |
ysr@777 | 1814 | } |
ysr@777 | 1815 | |
ysr@777 | 1816 | // this will also free any regions totally full of garbage objects, |
ysr@777 | 1817 | // and sort the regions. |
ysr@777 | 1818 | g1h->g1_policy()->record_concurrent_mark_cleanup_end( |
ysr@777 | 1819 | g1_par_note_end_task.freed_bytes(), |
ysr@777 | 1820 | g1_par_note_end_task.max_live_bytes()); |
ysr@777 | 1821 | |
ysr@777 | 1822 | // Statistics. |
ysr@777 | 1823 | double end = os::elapsedTime(); |
ysr@777 | 1824 | _cleanup_times.add((end - start) * 1000.0); |
ysr@777 | 1825 | |
ysr@777 | 1826 | // G1CollectedHeap::heap()->print(); |
ysr@777 | 1827 | // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", |
ysr@777 | 1828 | // G1CollectedHeap::heap()->get_gc_time_stamp()); |
ysr@777 | 1829 | |
ysr@777 | 1830 | if (PrintGC || PrintGCDetails) { |
ysr@777 | 1831 | g1h->print_size_transition(gclog_or_tty, |
ysr@777 | 1832 | start_used_bytes, |
ysr@777 | 1833 | g1h->used(), |
ysr@777 | 1834 | g1h->capacity()); |
ysr@777 | 1835 | } |
ysr@777 | 1836 | |
ysr@777 | 1837 | size_t cleaned_up_bytes = start_used_bytes - g1h->used(); |
ysr@777 | 1838 | g1p->decrease_known_garbage_bytes(cleaned_up_bytes); |
ysr@777 | 1839 | |
ysr@777 | 1840 | // We need to make this be a "collection" so any collection pause that |
ysr@777 | 1841 | // races with it goes around and waits for completeCleanup to finish. |
ysr@777 | 1842 | g1h->increment_total_collections(); |
ysr@777 | 1843 | |
johnc@1186 | 1844 | if (VerifyDuringGC) { |
ysr@1280 | 1845 | HandleMark hm; // handle scope |
ysr@1280 | 1846 | gclog_or_tty->print(" VerifyDuringGC:(after)"); |
ysr@1280 | 1847 | Universe::heap()->prepare_for_verify(); |
johnc@2969 | 1848 | Universe::verify(/* allow dirty */ true, |
johnc@2969 | 1849 | /* silent */ false, |
johnc@2969 | 1850 | /* option */ VerifyOption_G1UsePrevMarking); |
ysr@777 | 1851 | } |
tonyp@2472 | 1852 | |
tonyp@2472 | 1853 | g1h->verify_region_sets_optional(); |
ysr@777 | 1854 | } |
ysr@777 | 1855 | |
ysr@777 | 1856 | void ConcurrentMark::completeCleanup() { |
ysr@777 | 1857 | if (has_aborted()) return; |
ysr@777 | 1858 | |
tonyp@2472 | 1859 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2472 | 1860 | |
tonyp@2472 | 1861 | _cleanup_list.verify_optional(); |
tonyp@2643 | 1862 | FreeRegionList tmp_free_list("Tmp Free List"); |
tonyp@2472 | 1863 | |
tonyp@2472 | 1864 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 1865 | gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " |
tonyp@2472 | 1866 | "cleanup list has "SIZE_FORMAT" entries", |
tonyp@2472 | 1867 | _cleanup_list.length()); |
tonyp@2472 | 1868 | } |
tonyp@2472 | 1869 | |
tonyp@2472 | 1870 | // Noone else should be accessing the _cleanup_list at this point, |
tonyp@2472 | 1871 | // so it's not necessary to take any locks |
tonyp@2472 | 1872 | while (!_cleanup_list.is_empty()) { |
tonyp@2472 | 1873 | HeapRegion* hr = _cleanup_list.remove_head(); |
tonyp@2472 | 1874 | assert(hr != NULL, "the list was not empty"); |
tonyp@2849 | 1875 | hr->par_clear(); |
tonyp@2643 | 1876 | tmp_free_list.add_as_tail(hr); |
tonyp@2472 | 1877 | |
tonyp@2472 | 1878 | // Instead of adding one region at a time to the secondary_free_list, |
tonyp@2472 | 1879 | // we accumulate them in the local list and move them a few at a |
tonyp@2472 | 1880 | // time. This also cuts down on the number of notify_all() calls |
tonyp@2472 | 1881 | // we do during this process. We'll also append the local list when |
tonyp@2472 | 1882 | // _cleanup_list is empty (which means we just removed the last |
tonyp@2472 | 1883 | // region from the _cleanup_list). |
tonyp@2643 | 1884 | if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || |
tonyp@2472 | 1885 | _cleanup_list.is_empty()) { |
tonyp@2472 | 1886 | if (G1ConcRegionFreeingVerbose) { |
tonyp@2472 | 1887 | gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " |
tonyp@2472 | 1888 | "appending "SIZE_FORMAT" entries to the " |
tonyp@2472 | 1889 | "secondary_free_list, clean list still has " |
tonyp@2472 | 1890 | SIZE_FORMAT" entries", |
tonyp@2643 | 1891 | tmp_free_list.length(), |
tonyp@2472 | 1892 | _cleanup_list.length()); |
ysr@777 | 1893 | } |
tonyp@2472 | 1894 | |
tonyp@2472 | 1895 | { |
tonyp@2472 | 1896 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2643 | 1897 | g1h->secondary_free_list_add_as_tail(&tmp_free_list); |
tonyp@2472 | 1898 | SecondaryFreeList_lock->notify_all(); |
tonyp@2472 | 1899 | } |
tonyp@2472 | 1900 | |
tonyp@2472 | 1901 | if (G1StressConcRegionFreeing) { |
tonyp@2472 | 1902 | for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { |
tonyp@2472 | 1903 | os::sleep(Thread::current(), (jlong) 1, false); |
tonyp@2472 | 1904 | } |
tonyp@2472 | 1905 | } |
ysr@777 | 1906 | } |
ysr@777 | 1907 | } |
tonyp@2643 | 1908 | assert(tmp_free_list.is_empty(), "post-condition"); |
ysr@777 | 1909 | } |
ysr@777 | 1910 | |
johnc@2494 | 1911 | // Support closures for reference procssing in G1 |
johnc@2494 | 1912 | |
johnc@2379 | 1913 | bool G1CMIsAliveClosure::do_object_b(oop obj) { |
johnc@2379 | 1914 | HeapWord* addr = (HeapWord*)obj; |
johnc@2379 | 1915 | return addr != NULL && |
johnc@2379 | 1916 | (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); |
johnc@2379 | 1917 | } |
ysr@777 | 1918 | |
ysr@777 | 1919 | class G1CMKeepAliveClosure: public OopClosure { |
ysr@777 | 1920 | G1CollectedHeap* _g1; |
ysr@777 | 1921 | ConcurrentMark* _cm; |
ysr@777 | 1922 | CMBitMap* _bitMap; |
ysr@777 | 1923 | public: |
ysr@777 | 1924 | G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm, |
ysr@777 | 1925 | CMBitMap* bitMap) : |
ysr@777 | 1926 | _g1(g1), _cm(cm), |
ysr@777 | 1927 | _bitMap(bitMap) {} |
ysr@777 | 1928 | |
ysr@1280 | 1929 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 1930 | virtual void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 1931 | |
ysr@1280 | 1932 | template <class T> void do_oop_work(T* p) { |
johnc@2494 | 1933 | oop obj = oopDesc::load_decode_heap_oop(p); |
johnc@2494 | 1934 | HeapWord* addr = (HeapWord*)obj; |
johnc@2494 | 1935 | |
tonyp@2973 | 1936 | if (_cm->verbose_high()) { |
johnc@2494 | 1937 | gclog_or_tty->print_cr("\t[0] we're looking at location " |
tonyp@2973 | 1938 | "*"PTR_FORMAT" = "PTR_FORMAT, |
tonyp@2973 | 1939 | p, (void*) obj); |
tonyp@2973 | 1940 | } |
johnc@2494 | 1941 | |
johnc@2494 | 1942 | if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { |
ysr@777 | 1943 | _bitMap->mark(addr); |
johnc@2494 | 1944 | _cm->mark_stack_push(obj); |
ysr@777 | 1945 | } |
ysr@777 | 1946 | } |
ysr@777 | 1947 | }; |
ysr@777 | 1948 | |
ysr@777 | 1949 | class G1CMDrainMarkingStackClosure: public VoidClosure { |
ysr@777 | 1950 | CMMarkStack* _markStack; |
ysr@777 | 1951 | CMBitMap* _bitMap; |
ysr@777 | 1952 | G1CMKeepAliveClosure* _oopClosure; |
ysr@777 | 1953 | public: |
ysr@777 | 1954 | G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack, |
ysr@777 | 1955 | G1CMKeepAliveClosure* oopClosure) : |
ysr@777 | 1956 | _bitMap(bitMap), |
ysr@777 | 1957 | _markStack(markStack), |
ysr@777 | 1958 | _oopClosure(oopClosure) |
ysr@777 | 1959 | {} |
ysr@777 | 1960 | |
ysr@777 | 1961 | void do_void() { |
ysr@777 | 1962 | _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); |
ysr@777 | 1963 | } |
ysr@777 | 1964 | }; |
ysr@777 | 1965 | |
johnc@2494 | 1966 | // 'Keep Alive' closure used by parallel reference processing. |
johnc@2494 | 1967 | // An instance of this closure is used in the parallel reference processing |
johnc@2494 | 1968 | // code rather than an instance of G1CMKeepAliveClosure. We could have used |
johnc@2494 | 1969 | // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are |
johnc@2494 | 1970 | // placed on to discovered ref lists once so we can mark and push with no |
johnc@2494 | 1971 | // need to check whether the object has already been marked. Using the |
johnc@2494 | 1972 | // G1CMKeepAliveClosure would mean, however, having all the worker threads |
johnc@2494 | 1973 | // operating on the global mark stack. This means that an individual |
johnc@2494 | 1974 | // worker would be doing lock-free pushes while it processes its own |
johnc@2494 | 1975 | // discovered ref list followed by drain call. If the discovered ref lists |
johnc@2494 | 1976 | // are unbalanced then this could cause interference with the other |
johnc@2494 | 1977 | // workers. Using a CMTask (and its embedded local data structures) |
johnc@2494 | 1978 | // avoids that potential interference. |
johnc@2494 | 1979 | class G1CMParKeepAliveAndDrainClosure: public OopClosure { |
johnc@2494 | 1980 | ConcurrentMark* _cm; |
johnc@2494 | 1981 | CMTask* _task; |
johnc@2494 | 1982 | CMBitMap* _bitMap; |
johnc@2494 | 1983 | int _ref_counter_limit; |
johnc@2494 | 1984 | int _ref_counter; |
johnc@2494 | 1985 | public: |
johnc@2494 | 1986 | G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, |
johnc@2494 | 1987 | CMTask* task, |
johnc@2494 | 1988 | CMBitMap* bitMap) : |
johnc@2494 | 1989 | _cm(cm), _task(task), _bitMap(bitMap), |
johnc@2494 | 1990 | _ref_counter_limit(G1RefProcDrainInterval) |
johnc@2494 | 1991 | { |
johnc@2494 | 1992 | assert(_ref_counter_limit > 0, "sanity"); |
johnc@2494 | 1993 | _ref_counter = _ref_counter_limit; |
johnc@2494 | 1994 | } |
johnc@2494 | 1995 | |
johnc@2494 | 1996 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
johnc@2494 | 1997 | virtual void do_oop( oop* p) { do_oop_work(p); } |
johnc@2494 | 1998 | |
johnc@2494 | 1999 | template <class T> void do_oop_work(T* p) { |
johnc@2494 | 2000 | if (!_cm->has_overflown()) { |
johnc@2494 | 2001 | oop obj = oopDesc::load_decode_heap_oop(p); |
tonyp@2973 | 2002 | if (_cm->verbose_high()) { |
johnc@2494 | 2003 | gclog_or_tty->print_cr("\t[%d] we're looking at location " |
johnc@2494 | 2004 | "*"PTR_FORMAT" = "PTR_FORMAT, |
johnc@2494 | 2005 | _task->task_id(), p, (void*) obj); |
tonyp@2973 | 2006 | } |
johnc@2494 | 2007 | |
johnc@2494 | 2008 | _task->deal_with_reference(obj); |
johnc@2494 | 2009 | _ref_counter--; |
johnc@2494 | 2010 | |
johnc@2494 | 2011 | if (_ref_counter == 0) { |
johnc@2494 | 2012 | // We have dealt with _ref_counter_limit references, pushing them and objects |
johnc@2494 | 2013 | // reachable from them on to the local stack (and possibly the global stack). |
johnc@2494 | 2014 | // Call do_marking_step() to process these entries. We call the routine in a |
johnc@2494 | 2015 | // loop, which we'll exit if there's nothing more to do (i.e. we're done |
johnc@2494 | 2016 | // with the entries that we've pushed as a result of the deal_with_reference |
johnc@2494 | 2017 | // calls above) or we overflow. |
johnc@2494 | 2018 | // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag |
johnc@2494 | 2019 | // while there may still be some work to do. (See the comment at the |
johnc@2494 | 2020 | // beginning of CMTask::do_marking_step() for those conditions - one of which |
johnc@2494 | 2021 | // is reaching the specified time target.) It is only when |
johnc@2494 | 2022 | // CMTask::do_marking_step() returns without setting the has_aborted() flag |
johnc@2494 | 2023 | // that the marking has completed. |
johnc@2494 | 2024 | do { |
johnc@2494 | 2025 | double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
johnc@2494 | 2026 | _task->do_marking_step(mark_step_duration_ms, |
johnc@2494 | 2027 | false /* do_stealing */, |
johnc@2494 | 2028 | false /* do_termination */); |
johnc@2494 | 2029 | } while (_task->has_aborted() && !_cm->has_overflown()); |
johnc@2494 | 2030 | _ref_counter = _ref_counter_limit; |
johnc@2494 | 2031 | } |
johnc@2494 | 2032 | } else { |
tonyp@2973 | 2033 | if (_cm->verbose_high()) { |
johnc@2494 | 2034 | gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id()); |
tonyp@2973 | 2035 | } |
johnc@2494 | 2036 | } |
johnc@2494 | 2037 | } |
johnc@2494 | 2038 | }; |
johnc@2494 | 2039 | |
johnc@2494 | 2040 | class G1CMParDrainMarkingStackClosure: public VoidClosure { |
johnc@2494 | 2041 | ConcurrentMark* _cm; |
johnc@2494 | 2042 | CMTask* _task; |
johnc@2494 | 2043 | public: |
johnc@2494 | 2044 | G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : |
johnc@2494 | 2045 | _cm(cm), _task(task) |
johnc@2494 | 2046 | {} |
johnc@2494 | 2047 | |
johnc@2494 | 2048 | void do_void() { |
johnc@2494 | 2049 | do { |
tonyp@2973 | 2050 | if (_cm->verbose_high()) { |
tonyp@2973 | 2051 | gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", |
tonyp@2973 | 2052 | _task->task_id()); |
tonyp@2973 | 2053 | } |
johnc@2494 | 2054 | |
johnc@2494 | 2055 | // We call CMTask::do_marking_step() to completely drain the local and |
johnc@2494 | 2056 | // global marking stacks. The routine is called in a loop, which we'll |
johnc@2494 | 2057 | // exit if there's nothing more to do (i.e. we'completely drained the |
johnc@2494 | 2058 | // entries that were pushed as a result of applying the |
johnc@2494 | 2059 | // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref |
johnc@2494 | 2060 | // lists above) or we overflow the global marking stack. |
johnc@2494 | 2061 | // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag |
johnc@2494 | 2062 | // while there may still be some work to do. (See the comment at the |
johnc@2494 | 2063 | // beginning of CMTask::do_marking_step() for those conditions - one of which |
johnc@2494 | 2064 | // is reaching the specified time target.) It is only when |
johnc@2494 | 2065 | // CMTask::do_marking_step() returns without setting the has_aborted() flag |
johnc@2494 | 2066 | // that the marking has completed. |
johnc@2494 | 2067 | |
johnc@2494 | 2068 | _task->do_marking_step(1000000000.0 /* something very large */, |
johnc@2494 | 2069 | true /* do_stealing */, |
johnc@2494 | 2070 | true /* do_termination */); |
johnc@2494 | 2071 | } while (_task->has_aborted() && !_cm->has_overflown()); |
johnc@2494 | 2072 | } |
johnc@2494 | 2073 | }; |
johnc@2494 | 2074 | |
johnc@2494 | 2075 | // Implementation of AbstractRefProcTaskExecutor for G1 |
johnc@2494 | 2076 | class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
johnc@2494 | 2077 | private: |
johnc@2494 | 2078 | G1CollectedHeap* _g1h; |
johnc@2494 | 2079 | ConcurrentMark* _cm; |
johnc@2494 | 2080 | CMBitMap* _bitmap; |
johnc@2494 | 2081 | WorkGang* _workers; |
johnc@2494 | 2082 | int _active_workers; |
johnc@2494 | 2083 | |
johnc@2494 | 2084 | public: |
johnc@2494 | 2085 | G1RefProcTaskExecutor(G1CollectedHeap* g1h, |
johnc@2494 | 2086 | ConcurrentMark* cm, |
johnc@2494 | 2087 | CMBitMap* bitmap, |
johnc@2494 | 2088 | WorkGang* workers, |
johnc@2494 | 2089 | int n_workers) : |
johnc@2494 | 2090 | _g1h(g1h), _cm(cm), _bitmap(bitmap), |
johnc@2494 | 2091 | _workers(workers), _active_workers(n_workers) |
johnc@2494 | 2092 | { } |
johnc@2494 | 2093 | |
johnc@2494 | 2094 | // Executes the given task using concurrent marking worker threads. |
johnc@2494 | 2095 | virtual void execute(ProcessTask& task); |
johnc@2494 | 2096 | virtual void execute(EnqueueTask& task); |
johnc@2494 | 2097 | }; |
johnc@2494 | 2098 | |
johnc@2494 | 2099 | class G1RefProcTaskProxy: public AbstractGangTask { |
johnc@2494 | 2100 | typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
johnc@2494 | 2101 | ProcessTask& _proc_task; |
johnc@2494 | 2102 | G1CollectedHeap* _g1h; |
johnc@2494 | 2103 | ConcurrentMark* _cm; |
johnc@2494 | 2104 | CMBitMap* _bitmap; |
johnc@2494 | 2105 | |
johnc@2494 | 2106 | public: |
johnc@2494 | 2107 | G1RefProcTaskProxy(ProcessTask& proc_task, |
johnc@2494 | 2108 | G1CollectedHeap* g1h, |
johnc@2494 | 2109 | ConcurrentMark* cm, |
johnc@2494 | 2110 | CMBitMap* bitmap) : |
johnc@2494 | 2111 | AbstractGangTask("Process reference objects in parallel"), |
johnc@2494 | 2112 | _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap) |
johnc@2494 | 2113 | {} |
johnc@2494 | 2114 | |
johnc@2494 | 2115 | virtual void work(int i) { |
johnc@2494 | 2116 | CMTask* marking_task = _cm->task(i); |
johnc@2494 | 2117 | G1CMIsAliveClosure g1_is_alive(_g1h); |
johnc@2494 | 2118 | G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap); |
johnc@2494 | 2119 | G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); |
johnc@2494 | 2120 | |
johnc@2494 | 2121 | _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); |
johnc@2494 | 2122 | } |
johnc@2494 | 2123 | }; |
johnc@2494 | 2124 | |
johnc@2494 | 2125 | void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) { |
johnc@2494 | 2126 | assert(_workers != NULL, "Need parallel worker threads."); |
johnc@2494 | 2127 | |
johnc@2494 | 2128 | G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap); |
johnc@2494 | 2129 | |
johnc@2494 | 2130 | // We need to reset the phase for each task execution so that |
johnc@2494 | 2131 | // the termination protocol of CMTask::do_marking_step works. |
johnc@2494 | 2132 | _cm->set_phase(_active_workers, false /* concurrent */); |
johnc@2494 | 2133 | _g1h->set_par_threads(_active_workers); |
johnc@2494 | 2134 | _workers->run_task(&proc_task_proxy); |
johnc@2494 | 2135 | _g1h->set_par_threads(0); |
johnc@2494 | 2136 | } |
johnc@2494 | 2137 | |
johnc@2494 | 2138 | class G1RefEnqueueTaskProxy: public AbstractGangTask { |
johnc@2494 | 2139 | typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
johnc@2494 | 2140 | EnqueueTask& _enq_task; |
johnc@2494 | 2141 | |
johnc@2494 | 2142 | public: |
johnc@2494 | 2143 | G1RefEnqueueTaskProxy(EnqueueTask& enq_task) : |
johnc@2494 | 2144 | AbstractGangTask("Enqueue reference objects in parallel"), |
johnc@2494 | 2145 | _enq_task(enq_task) |
johnc@2494 | 2146 | { } |
johnc@2494 | 2147 | |
johnc@2494 | 2148 | virtual void work(int i) { |
johnc@2494 | 2149 | _enq_task.work(i); |
johnc@2494 | 2150 | } |
johnc@2494 | 2151 | }; |
johnc@2494 | 2152 | |
johnc@2494 | 2153 | void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) { |
johnc@2494 | 2154 | assert(_workers != NULL, "Need parallel worker threads."); |
johnc@2494 | 2155 | |
johnc@2494 | 2156 | G1RefEnqueueTaskProxy enq_task_proxy(enq_task); |
johnc@2494 | 2157 | |
johnc@2494 | 2158 | _g1h->set_par_threads(_active_workers); |
johnc@2494 | 2159 | _workers->run_task(&enq_task_proxy); |
johnc@2494 | 2160 | _g1h->set_par_threads(0); |
johnc@2494 | 2161 | } |
johnc@2494 | 2162 | |
ysr@777 | 2163 | void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { |
ysr@777 | 2164 | ResourceMark rm; |
ysr@777 | 2165 | HandleMark hm; |
ysr@888 | 2166 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@888 | 2167 | ReferenceProcessor* rp = g1h->ref_processor(); |
ysr@777 | 2168 | |
johnc@2316 | 2169 | // See the comment in G1CollectedHeap::ref_processing_init() |
johnc@2316 | 2170 | // about how reference processing currently works in G1. |
johnc@2316 | 2171 | |
ysr@777 | 2172 | // Process weak references. |
ysr@892 | 2173 | rp->setup_policy(clear_all_soft_refs); |
ysr@777 | 2174 | assert(_markStack.isEmpty(), "mark stack should be empty"); |
ysr@777 | 2175 | |
johnc@2379 | 2176 | G1CMIsAliveClosure g1_is_alive(g1h); |
johnc@2379 | 2177 | G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); |
ysr@777 | 2178 | G1CMDrainMarkingStackClosure |
johnc@2379 | 2179 | g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); |
johnc@2494 | 2180 | // We use the work gang from the G1CollectedHeap and we utilize all |
johnc@2494 | 2181 | // the worker threads. |
ysr@2651 | 2182 | int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; |
ysr@2651 | 2183 | active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); |
johnc@2494 | 2184 | |
johnc@2494 | 2185 | G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), |
johnc@2494 | 2186 | g1h->workers(), active_workers); |
johnc@2494 | 2187 | |
ysr@2651 | 2188 | |
johnc@2494 | 2189 | if (rp->processing_is_mt()) { |
johnc@2494 | 2190 | // Set the degree of MT here. If the discovery is done MT, there |
johnc@2494 | 2191 | // may have been a different number of threads doing the discovery |
johnc@2494 | 2192 | // and a different number of discovered lists may have Ref objects. |
johnc@2494 | 2193 | // That is OK as long as the Reference lists are balanced (see |
johnc@2494 | 2194 | // balance_all_queues() and balance_queues()). |
ysr@2651 | 2195 | rp->set_active_mt_degree(active_workers); |
johnc@2494 | 2196 | |
johnc@2494 | 2197 | rp->process_discovered_references(&g1_is_alive, |
johnc@2494 | 2198 | &g1_keep_alive, |
johnc@2494 | 2199 | &g1_drain_mark_stack, |
johnc@2494 | 2200 | &par_task_executor); |
johnc@2494 | 2201 | |
johnc@2494 | 2202 | // The work routines of the parallel keep_alive and drain_marking_stack |
johnc@2494 | 2203 | // will set the has_overflown flag if we overflow the global marking |
johnc@2494 | 2204 | // stack. |
johnc@2494 | 2205 | } else { |
johnc@2494 | 2206 | rp->process_discovered_references(&g1_is_alive, |
johnc@2494 | 2207 | &g1_keep_alive, |
johnc@2494 | 2208 | &g1_drain_mark_stack, |
johnc@2494 | 2209 | NULL); |
johnc@2494 | 2210 | |
johnc@2494 | 2211 | } |
johnc@2494 | 2212 | |
ysr@777 | 2213 | assert(_markStack.overflow() || _markStack.isEmpty(), |
johnc@2494 | 2214 | "mark stack should be empty (unless it overflowed)"); |
ysr@777 | 2215 | if (_markStack.overflow()) { |
johnc@2494 | 2216 | // Should have been done already when we tried to push an |
johnc@2494 | 2217 | // entry on to the global mark stack. But let's do it again. |
ysr@777 | 2218 | set_has_overflown(); |
ysr@777 | 2219 | } |
ysr@777 | 2220 | |
johnc@2494 | 2221 | if (rp->processing_is_mt()) { |
johnc@2494 | 2222 | assert(rp->num_q() == active_workers, "why not"); |
johnc@2494 | 2223 | rp->enqueue_discovered_references(&par_task_executor); |
johnc@2494 | 2224 | } else { |
johnc@2494 | 2225 | rp->enqueue_discovered_references(); |
johnc@2494 | 2226 | } |
johnc@2494 | 2227 | |
ysr@777 | 2228 | rp->verify_no_references_recorded(); |
ysr@777 | 2229 | assert(!rp->discovery_enabled(), "should have been disabled"); |
ysr@777 | 2230 | |
coleenp@2497 | 2231 | // Now clean up stale oops in StringTable |
johnc@2379 | 2232 | StringTable::unlink(&g1_is_alive); |
coleenp@2497 | 2233 | // Clean up unreferenced symbols in symbol table. |
coleenp@2497 | 2234 | SymbolTable::unlink(); |
ysr@777 | 2235 | } |
ysr@777 | 2236 | |
ysr@777 | 2237 | void ConcurrentMark::swapMarkBitMaps() { |
ysr@777 | 2238 | CMBitMapRO* temp = _prevMarkBitMap; |
ysr@777 | 2239 | _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; |
ysr@777 | 2240 | _nextMarkBitMap = (CMBitMap*) temp; |
ysr@777 | 2241 | } |
ysr@777 | 2242 | |
ysr@777 | 2243 | class CMRemarkTask: public AbstractGangTask { |
ysr@777 | 2244 | private: |
ysr@777 | 2245 | ConcurrentMark *_cm; |
ysr@777 | 2246 | |
ysr@777 | 2247 | public: |
ysr@777 | 2248 | void work(int worker_i) { |
ysr@777 | 2249 | // Since all available tasks are actually started, we should |
ysr@777 | 2250 | // only proceed if we're supposed to be actived. |
ysr@777 | 2251 | if ((size_t)worker_i < _cm->active_tasks()) { |
ysr@777 | 2252 | CMTask* task = _cm->task(worker_i); |
ysr@777 | 2253 | task->record_start_time(); |
ysr@777 | 2254 | do { |
johnc@2494 | 2255 | task->do_marking_step(1000000000.0 /* something very large */, |
johnc@2494 | 2256 | true /* do_stealing */, |
johnc@2494 | 2257 | true /* do_termination */); |
ysr@777 | 2258 | } while (task->has_aborted() && !_cm->has_overflown()); |
ysr@777 | 2259 | // If we overflow, then we do not want to restart. We instead |
ysr@777 | 2260 | // want to abort remark and do concurrent marking again. |
ysr@777 | 2261 | task->record_end_time(); |
ysr@777 | 2262 | } |
ysr@777 | 2263 | } |
ysr@777 | 2264 | |
ysr@777 | 2265 | CMRemarkTask(ConcurrentMark* cm) : |
ysr@777 | 2266 | AbstractGangTask("Par Remark"), _cm(cm) { } |
ysr@777 | 2267 | }; |
ysr@777 | 2268 | |
ysr@777 | 2269 | void ConcurrentMark::checkpointRootsFinalWork() { |
ysr@777 | 2270 | ResourceMark rm; |
ysr@777 | 2271 | HandleMark hm; |
ysr@777 | 2272 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 2273 | |
ysr@777 | 2274 | g1h->ensure_parsability(false); |
ysr@777 | 2275 | |
jmasa@2188 | 2276 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
jrose@1424 | 2277 | G1CollectedHeap::StrongRootsScope srs(g1h); |
ysr@777 | 2278 | // this is remark, so we'll use up all available threads |
ysr@777 | 2279 | int active_workers = ParallelGCThreads; |
johnc@2494 | 2280 | set_phase(active_workers, false /* concurrent */); |
ysr@777 | 2281 | |
ysr@777 | 2282 | CMRemarkTask remarkTask(this); |
ysr@777 | 2283 | // We will start all available threads, even if we decide that the |
ysr@777 | 2284 | // active_workers will be fewer. The extra ones will just bail out |
ysr@777 | 2285 | // immediately. |
ysr@777 | 2286 | int n_workers = g1h->workers()->total_workers(); |
ysr@777 | 2287 | g1h->set_par_threads(n_workers); |
ysr@777 | 2288 | g1h->workers()->run_task(&remarkTask); |
ysr@777 | 2289 | g1h->set_par_threads(0); |
ysr@777 | 2290 | } else { |
jrose@1424 | 2291 | G1CollectedHeap::StrongRootsScope srs(g1h); |
ysr@777 | 2292 | // this is remark, so we'll use up all available threads |
ysr@777 | 2293 | int active_workers = 1; |
johnc@2494 | 2294 | set_phase(active_workers, false /* concurrent */); |
ysr@777 | 2295 | |
ysr@777 | 2296 | CMRemarkTask remarkTask(this); |
ysr@777 | 2297 | // We will start all available threads, even if we decide that the |
ysr@777 | 2298 | // active_workers will be fewer. The extra ones will just bail out |
ysr@777 | 2299 | // immediately. |
ysr@777 | 2300 | remarkTask.work(0); |
ysr@777 | 2301 | } |
tonyp@1458 | 2302 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
tonyp@1458 | 2303 | guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); |
ysr@777 | 2304 | |
ysr@777 | 2305 | print_stats(); |
ysr@777 | 2306 | |
ysr@777 | 2307 | #if VERIFY_OBJS_PROCESSED |
ysr@777 | 2308 | if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { |
ysr@777 | 2309 | gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", |
ysr@777 | 2310 | _scan_obj_cl.objs_processed, |
ysr@777 | 2311 | ThreadLocalObjQueue::objs_enqueued); |
ysr@777 | 2312 | guarantee(_scan_obj_cl.objs_processed == |
ysr@777 | 2313 | ThreadLocalObjQueue::objs_enqueued, |
ysr@777 | 2314 | "Different number of objs processed and enqueued."); |
ysr@777 | 2315 | } |
ysr@777 | 2316 | #endif |
ysr@777 | 2317 | } |
ysr@777 | 2318 | |
tonyp@1479 | 2319 | #ifndef PRODUCT |
tonyp@1479 | 2320 | |
tonyp@1823 | 2321 | class PrintReachableOopClosure: public OopClosure { |
ysr@777 | 2322 | private: |
ysr@777 | 2323 | G1CollectedHeap* _g1h; |
ysr@777 | 2324 | outputStream* _out; |
johnc@2969 | 2325 | VerifyOption _vo; |
tonyp@1823 | 2326 | bool _all; |
ysr@777 | 2327 | |
ysr@777 | 2328 | public: |
johnc@2969 | 2329 | PrintReachableOopClosure(outputStream* out, |
johnc@2969 | 2330 | VerifyOption vo, |
tonyp@1823 | 2331 | bool all) : |
tonyp@1479 | 2332 | _g1h(G1CollectedHeap::heap()), |
johnc@2969 | 2333 | _out(out), _vo(vo), _all(all) { } |
ysr@777 | 2334 | |
ysr@1280 | 2335 | void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 2336 | void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 2337 | |
ysr@1280 | 2338 | template <class T> void do_oop_work(T* p) { |
ysr@1280 | 2339 | oop obj = oopDesc::load_decode_heap_oop(p); |
ysr@777 | 2340 | const char* str = NULL; |
ysr@777 | 2341 | const char* str2 = ""; |
ysr@777 | 2342 | |
tonyp@1823 | 2343 | if (obj == NULL) { |
tonyp@1823 | 2344 | str = ""; |
tonyp@1823 | 2345 | } else if (!_g1h->is_in_g1_reserved(obj)) { |
tonyp@1823 | 2346 | str = " O"; |
tonyp@1823 | 2347 | } else { |
ysr@777 | 2348 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
tonyp@1458 | 2349 | guarantee(hr != NULL, "invariant"); |
tonyp@1479 | 2350 | bool over_tams = false; |
johnc@2969 | 2351 | bool marked = false; |
johnc@2969 | 2352 | |
johnc@2969 | 2353 | switch (_vo) { |
johnc@2969 | 2354 | case VerifyOption_G1UsePrevMarking: |
johnc@2969 | 2355 | over_tams = hr->obj_allocated_since_prev_marking(obj); |
johnc@2969 | 2356 | marked = _g1h->isMarkedPrev(obj); |
johnc@2969 | 2357 | break; |
johnc@2969 | 2358 | case VerifyOption_G1UseNextMarking: |
johnc@2969 | 2359 | over_tams = hr->obj_allocated_since_next_marking(obj); |
johnc@2969 | 2360 | marked = _g1h->isMarkedNext(obj); |
johnc@2969 | 2361 | break; |
johnc@2969 | 2362 | case VerifyOption_G1UseMarkWord: |
johnc@2969 | 2363 | marked = obj->is_gc_marked(); |
johnc@2969 | 2364 | break; |
johnc@2969 | 2365 | default: |
johnc@2969 | 2366 | ShouldNotReachHere(); |
tonyp@1479 | 2367 | } |
tonyp@1479 | 2368 | |
tonyp@1479 | 2369 | if (over_tams) { |
tonyp@1823 | 2370 | str = " >"; |
tonyp@1823 | 2371 | if (marked) { |
ysr@777 | 2372 | str2 = " AND MARKED"; |
tonyp@1479 | 2373 | } |
tonyp@1823 | 2374 | } else if (marked) { |
tonyp@1823 | 2375 | str = " M"; |
tonyp@1479 | 2376 | } else { |
tonyp@1823 | 2377 | str = " NOT"; |
tonyp@1479 | 2378 | } |
ysr@777 | 2379 | } |
ysr@777 | 2380 | |
tonyp@1823 | 2381 | _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", |
ysr@777 | 2382 | p, (void*) obj, str, str2); |
ysr@777 | 2383 | } |
ysr@777 | 2384 | }; |
ysr@777 | 2385 | |
tonyp@1823 | 2386 | class PrintReachableObjectClosure : public ObjectClosure { |
ysr@777 | 2387 | private: |
johnc@2969 | 2388 | G1CollectedHeap* _g1h; |
johnc@2969 | 2389 | outputStream* _out; |
johnc@2969 | 2390 | VerifyOption _vo; |
johnc@2969 | 2391 | bool _all; |
johnc@2969 | 2392 | HeapRegion* _hr; |
ysr@777 | 2393 | |
ysr@777 | 2394 | public: |
johnc@2969 | 2395 | PrintReachableObjectClosure(outputStream* out, |
johnc@2969 | 2396 | VerifyOption vo, |
tonyp@1823 | 2397 | bool all, |
tonyp@1823 | 2398 | HeapRegion* hr) : |
johnc@2969 | 2399 | _g1h(G1CollectedHeap::heap()), |
johnc@2969 | 2400 | _out(out), _vo(vo), _all(all), _hr(hr) { } |
tonyp@1823 | 2401 | |
tonyp@1823 | 2402 | void do_object(oop o) { |
johnc@2969 | 2403 | bool over_tams = false; |
johnc@2969 | 2404 | bool marked = false; |
johnc@2969 | 2405 | |
johnc@2969 | 2406 | switch (_vo) { |
johnc@2969 | 2407 | case VerifyOption_G1UsePrevMarking: |
johnc@2969 | 2408 | over_tams = _hr->obj_allocated_since_prev_marking(o); |
johnc@2969 | 2409 | marked = _g1h->isMarkedPrev(o); |
johnc@2969 | 2410 | break; |
johnc@2969 | 2411 | case VerifyOption_G1UseNextMarking: |
johnc@2969 | 2412 | over_tams = _hr->obj_allocated_since_next_marking(o); |
johnc@2969 | 2413 | marked = _g1h->isMarkedNext(o); |
johnc@2969 | 2414 | break; |
johnc@2969 | 2415 | case VerifyOption_G1UseMarkWord: |
johnc@2969 | 2416 | marked = o->is_gc_marked(); |
johnc@2969 | 2417 | break; |
johnc@2969 | 2418 | default: |
johnc@2969 | 2419 | ShouldNotReachHere(); |
tonyp@1823 | 2420 | } |
tonyp@1823 | 2421 | bool print_it = _all || over_tams || marked; |
tonyp@1823 | 2422 | |
tonyp@1823 | 2423 | if (print_it) { |
tonyp@1823 | 2424 | _out->print_cr(" "PTR_FORMAT"%s", |
tonyp@1823 | 2425 | o, (over_tams) ? " >" : (marked) ? " M" : ""); |
johnc@2969 | 2426 | PrintReachableOopClosure oopCl(_out, _vo, _all); |
tonyp@1823 | 2427 | o->oop_iterate(&oopCl); |
tonyp@1823 | 2428 | } |
ysr@777 | 2429 | } |
ysr@777 | 2430 | }; |
ysr@777 | 2431 | |
tonyp@1823 | 2432 | class PrintReachableRegionClosure : public HeapRegionClosure { |
ysr@777 | 2433 | private: |
ysr@777 | 2434 | outputStream* _out; |
johnc@2969 | 2435 | VerifyOption _vo; |
tonyp@1823 | 2436 | bool _all; |
ysr@777 | 2437 | |
ysr@777 | 2438 | public: |
ysr@777 | 2439 | bool doHeapRegion(HeapRegion* hr) { |
ysr@777 | 2440 | HeapWord* b = hr->bottom(); |
ysr@777 | 2441 | HeapWord* e = hr->end(); |
ysr@777 | 2442 | HeapWord* t = hr->top(); |
tonyp@1479 | 2443 | HeapWord* p = NULL; |
johnc@2969 | 2444 | |
johnc@2969 | 2445 | switch (_vo) { |
johnc@2969 | 2446 | case VerifyOption_G1UsePrevMarking: |
johnc@2969 | 2447 | p = hr->prev_top_at_mark_start(); |
johnc@2969 | 2448 | break; |
johnc@2969 | 2449 | case VerifyOption_G1UseNextMarking: |
johnc@2969 | 2450 | p = hr->next_top_at_mark_start(); |
johnc@2969 | 2451 | break; |
johnc@2969 | 2452 | case VerifyOption_G1UseMarkWord: |
johnc@2969 | 2453 | // When we are verifying marking using the mark word |
johnc@2969 | 2454 | // TAMS has no relevance. |
johnc@2969 | 2455 | assert(p == NULL, "post-condition"); |
johnc@2969 | 2456 | break; |
johnc@2969 | 2457 | default: |
johnc@2969 | 2458 | ShouldNotReachHere(); |
tonyp@1479 | 2459 | } |
ysr@777 | 2460 | _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " |
tonyp@1479 | 2461 | "TAMS: "PTR_FORMAT, b, e, t, p); |
tonyp@1823 | 2462 | _out->cr(); |
tonyp@1823 | 2463 | |
tonyp@1823 | 2464 | HeapWord* from = b; |
tonyp@1823 | 2465 | HeapWord* to = t; |
tonyp@1823 | 2466 | |
tonyp@1823 | 2467 | if (to > from) { |
tonyp@1823 | 2468 | _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); |
tonyp@1823 | 2469 | _out->cr(); |
johnc@2969 | 2470 | PrintReachableObjectClosure ocl(_out, _vo, _all, hr); |
tonyp@1823 | 2471 | hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); |
tonyp@1823 | 2472 | _out->cr(); |
tonyp@1823 | 2473 | } |
ysr@777 | 2474 | |
ysr@777 | 2475 | return false; |
ysr@777 | 2476 | } |
ysr@777 | 2477 | |
johnc@2969 | 2478 | PrintReachableRegionClosure(outputStream* out, |
johnc@2969 | 2479 | VerifyOption vo, |
tonyp@1823 | 2480 | bool all) : |
johnc@2969 | 2481 | _out(out), _vo(vo), _all(all) { } |
ysr@777 | 2482 | }; |
ysr@777 | 2483 | |
johnc@2969 | 2484 | static const char* verify_option_to_tams(VerifyOption vo) { |
johnc@2969 | 2485 | switch (vo) { |
johnc@2969 | 2486 | case VerifyOption_G1UsePrevMarking: |
johnc@2969 | 2487 | return "PTAMS"; |
johnc@2969 | 2488 | case VerifyOption_G1UseNextMarking: |
johnc@2969 | 2489 | return "NTAMS"; |
johnc@2969 | 2490 | default: |
johnc@2969 | 2491 | return "NONE"; |
johnc@2969 | 2492 | } |
johnc@2969 | 2493 | } |
johnc@2969 | 2494 | |
tonyp@1823 | 2495 | void ConcurrentMark::print_reachable(const char* str, |
johnc@2969 | 2496 | VerifyOption vo, |
tonyp@1823 | 2497 | bool all) { |
tonyp@1823 | 2498 | gclog_or_tty->cr(); |
tonyp@1823 | 2499 | gclog_or_tty->print_cr("== Doing heap dump... "); |
tonyp@1479 | 2500 | |
tonyp@1479 | 2501 | if (G1PrintReachableBaseFile == NULL) { |
tonyp@1479 | 2502 | gclog_or_tty->print_cr(" #### error: no base file defined"); |
tonyp@1479 | 2503 | return; |
tonyp@1479 | 2504 | } |
tonyp@1479 | 2505 | |
tonyp@1479 | 2506 | if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > |
tonyp@1479 | 2507 | (JVM_MAXPATHLEN - 1)) { |
tonyp@1479 | 2508 | gclog_or_tty->print_cr(" #### error: file name too long"); |
tonyp@1479 | 2509 | return; |
tonyp@1479 | 2510 | } |
tonyp@1479 | 2511 | |
tonyp@1479 | 2512 | char file_name[JVM_MAXPATHLEN]; |
tonyp@1479 | 2513 | sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); |
tonyp@1479 | 2514 | gclog_or_tty->print_cr(" dumping to file %s", file_name); |
tonyp@1479 | 2515 | |
tonyp@1479 | 2516 | fileStream fout(file_name); |
tonyp@1479 | 2517 | if (!fout.is_open()) { |
tonyp@1479 | 2518 | gclog_or_tty->print_cr(" #### error: could not open file"); |
tonyp@1479 | 2519 | return; |
tonyp@1479 | 2520 | } |
tonyp@1479 | 2521 | |
tonyp@1479 | 2522 | outputStream* out = &fout; |
johnc@2969 | 2523 | out->print_cr("-- USING %s", verify_option_to_tams(vo)); |
tonyp@1479 | 2524 | out->cr(); |
tonyp@1479 | 2525 | |
tonyp@1823 | 2526 | out->print_cr("--- ITERATING OVER REGIONS"); |
tonyp@1479 | 2527 | out->cr(); |
johnc@2969 | 2528 | PrintReachableRegionClosure rcl(out, vo, all); |
ysr@777 | 2529 | _g1h->heap_region_iterate(&rcl); |
tonyp@1479 | 2530 | out->cr(); |
tonyp@1479 | 2531 | |
tonyp@1479 | 2532 | gclog_or_tty->print_cr(" done"); |
tonyp@1823 | 2533 | gclog_or_tty->flush(); |
ysr@777 | 2534 | } |
ysr@777 | 2535 | |
tonyp@1479 | 2536 | #endif // PRODUCT |
tonyp@1479 | 2537 | |
ysr@777 | 2538 | // This note is for drainAllSATBBuffers and the code in between. |
ysr@777 | 2539 | // In the future we could reuse a task to do this work during an |
ysr@777 | 2540 | // evacuation pause (since now tasks are not active and can be claimed |
ysr@777 | 2541 | // during an evacuation pause). This was a late change to the code and |
ysr@777 | 2542 | // is currently not being taken advantage of. |
ysr@777 | 2543 | |
ysr@777 | 2544 | class CMGlobalObjectClosure : public ObjectClosure { |
ysr@777 | 2545 | private: |
ysr@777 | 2546 | ConcurrentMark* _cm; |
ysr@777 | 2547 | |
ysr@777 | 2548 | public: |
ysr@777 | 2549 | void do_object(oop obj) { |
ysr@777 | 2550 | _cm->deal_with_reference(obj); |
ysr@777 | 2551 | } |
ysr@777 | 2552 | |
ysr@777 | 2553 | CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } |
ysr@777 | 2554 | }; |
ysr@777 | 2555 | |
ysr@777 | 2556 | void ConcurrentMark::deal_with_reference(oop obj) { |
tonyp@2968 | 2557 | if (verbose_high()) { |
ysr@777 | 2558 | gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, |
ysr@777 | 2559 | (void*) obj); |
tonyp@2968 | 2560 | } |
ysr@777 | 2561 | |
ysr@777 | 2562 | HeapWord* objAddr = (HeapWord*) obj; |
ysr@1280 | 2563 | assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); |
ysr@777 | 2564 | if (_g1h->is_in_g1_reserved(objAddr)) { |
tonyp@2968 | 2565 | assert(obj != NULL, "null check is implicit"); |
tonyp@2968 | 2566 | if (!_nextMarkBitMap->isMarked(objAddr)) { |
tonyp@2968 | 2567 | // Only get the containing region if the object is not marked on the |
tonyp@2968 | 2568 | // bitmap (otherwise, it's a waste of time since we won't do |
tonyp@2968 | 2569 | // anything with it). |
tonyp@2968 | 2570 | HeapRegion* hr = _g1h->heap_region_containing_raw(obj); |
tonyp@2968 | 2571 | if (!hr->obj_allocated_since_next_marking(obj)) { |
tonyp@2968 | 2572 | if (verbose_high()) { |
tonyp@2968 | 2573 | gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " |
tonyp@2968 | 2574 | "marked", (void*) obj); |
tonyp@2968 | 2575 | } |
tonyp@2968 | 2576 | |
tonyp@2968 | 2577 | // we need to mark it first |
tonyp@2968 | 2578 | if (_nextMarkBitMap->parMark(objAddr)) { |
tonyp@2968 | 2579 | // No OrderAccess:store_load() is needed. It is implicit in the |
tonyp@2968 | 2580 | // CAS done in parMark(objAddr) above |
tonyp@2968 | 2581 | HeapWord* finger = _finger; |
tonyp@2968 | 2582 | if (objAddr < finger) { |
tonyp@2968 | 2583 | if (verbose_high()) { |
tonyp@2968 | 2584 | gclog_or_tty->print_cr("[global] below the global finger " |
tonyp@2968 | 2585 | "("PTR_FORMAT"), pushing it", finger); |
tonyp@2968 | 2586 | } |
tonyp@2968 | 2587 | if (!mark_stack_push(obj)) { |
tonyp@2968 | 2588 | if (verbose_low()) { |
tonyp@2968 | 2589 | gclog_or_tty->print_cr("[global] global stack overflow during " |
tonyp@2968 | 2590 | "deal_with_reference"); |
tonyp@2968 | 2591 | } |
tonyp@2968 | 2592 | } |
ysr@777 | 2593 | } |
ysr@777 | 2594 | } |
ysr@777 | 2595 | } |
ysr@777 | 2596 | } |
ysr@777 | 2597 | } |
ysr@777 | 2598 | } |
ysr@777 | 2599 | |
ysr@777 | 2600 | void ConcurrentMark::drainAllSATBBuffers() { |
ysr@777 | 2601 | CMGlobalObjectClosure oc(this); |
ysr@777 | 2602 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 2603 | satb_mq_set.set_closure(&oc); |
ysr@777 | 2604 | |
ysr@777 | 2605 | while (satb_mq_set.apply_closure_to_completed_buffer()) { |
tonyp@2973 | 2606 | if (verbose_medium()) { |
ysr@777 | 2607 | gclog_or_tty->print_cr("[global] processed an SATB buffer"); |
tonyp@2973 | 2608 | } |
ysr@777 | 2609 | } |
ysr@777 | 2610 | |
ysr@777 | 2611 | // no need to check whether we should do this, as this is only |
ysr@777 | 2612 | // called during an evacuation pause |
ysr@777 | 2613 | satb_mq_set.iterate_closure_all_threads(); |
ysr@777 | 2614 | |
ysr@777 | 2615 | satb_mq_set.set_closure(NULL); |
tonyp@1458 | 2616 | assert(satb_mq_set.completed_buffers_num() == 0, "invariant"); |
ysr@777 | 2617 | } |
ysr@777 | 2618 | |
ysr@777 | 2619 | void ConcurrentMark::markPrev(oop p) { |
ysr@777 | 2620 | // Note we are overriding the read-only view of the prev map here, via |
ysr@777 | 2621 | // the cast. |
ysr@777 | 2622 | ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p); |
ysr@777 | 2623 | } |
ysr@777 | 2624 | |
ysr@777 | 2625 | void ConcurrentMark::clear(oop p) { |
ysr@777 | 2626 | assert(p != NULL && p->is_oop(), "expected an oop"); |
ysr@777 | 2627 | HeapWord* addr = (HeapWord*)p; |
ysr@777 | 2628 | assert(addr >= _nextMarkBitMap->startWord() || |
ysr@777 | 2629 | addr < _nextMarkBitMap->endWord(), "in a region"); |
ysr@777 | 2630 | |
ysr@777 | 2631 | _nextMarkBitMap->clear(addr); |
ysr@777 | 2632 | } |
ysr@777 | 2633 | |
ysr@777 | 2634 | void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { |
ysr@777 | 2635 | // Note we are overriding the read-only view of the prev map here, via |
ysr@777 | 2636 | // the cast. |
ysr@777 | 2637 | ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); |
ysr@777 | 2638 | _nextMarkBitMap->clearRange(mr); |
ysr@777 | 2639 | } |
ysr@777 | 2640 | |
ysr@777 | 2641 | HeapRegion* |
ysr@777 | 2642 | ConcurrentMark::claim_region(int task_num) { |
ysr@777 | 2643 | // "checkpoint" the finger |
ysr@777 | 2644 | HeapWord* finger = _finger; |
ysr@777 | 2645 | |
ysr@777 | 2646 | // _heap_end will not change underneath our feet; it only changes at |
ysr@777 | 2647 | // yield points. |
ysr@777 | 2648 | while (finger < _heap_end) { |
tonyp@1458 | 2649 | assert(_g1h->is_in_g1_reserved(finger), "invariant"); |
ysr@777 | 2650 | |
tonyp@2968 | 2651 | // Note on how this code handles humongous regions. In the |
tonyp@2968 | 2652 | // normal case the finger will reach the start of a "starts |
tonyp@2968 | 2653 | // humongous" (SH) region. Its end will either be the end of the |
tonyp@2968 | 2654 | // last "continues humongous" (CH) region in the sequence, or the |
tonyp@2968 | 2655 | // standard end of the SH region (if the SH is the only region in |
tonyp@2968 | 2656 | // the sequence). That way claim_region() will skip over the CH |
tonyp@2968 | 2657 | // regions. However, there is a subtle race between a CM thread |
tonyp@2968 | 2658 | // executing this method and a mutator thread doing a humongous |
tonyp@2968 | 2659 | // object allocation. The two are not mutually exclusive as the CM |
tonyp@2968 | 2660 | // thread does not need to hold the Heap_lock when it gets |
tonyp@2968 | 2661 | // here. So there is a chance that claim_region() will come across |
tonyp@2968 | 2662 | // a free region that's in the progress of becoming a SH or a CH |
tonyp@2968 | 2663 | // region. In the former case, it will either |
tonyp@2968 | 2664 | // a) Miss the update to the region's end, in which case it will |
tonyp@2968 | 2665 | // visit every subsequent CH region, will find their bitmaps |
tonyp@2968 | 2666 | // empty, and do nothing, or |
tonyp@2968 | 2667 | // b) Will observe the update of the region's end (in which case |
tonyp@2968 | 2668 | // it will skip the subsequent CH regions). |
tonyp@2968 | 2669 | // If it comes across a region that suddenly becomes CH, the |
tonyp@2968 | 2670 | // scenario will be similar to b). So, the race between |
tonyp@2968 | 2671 | // claim_region() and a humongous object allocation might force us |
tonyp@2968 | 2672 | // to do a bit of unnecessary work (due to some unnecessary bitmap |
tonyp@2968 | 2673 | // iterations) but it should not introduce and correctness issues. |
tonyp@2968 | 2674 | HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); |
ysr@777 | 2675 | HeapWord* bottom = curr_region->bottom(); |
ysr@777 | 2676 | HeapWord* end = curr_region->end(); |
ysr@777 | 2677 | HeapWord* limit = curr_region->next_top_at_mark_start(); |
ysr@777 | 2678 | |
tonyp@2968 | 2679 | if (verbose_low()) { |
ysr@777 | 2680 | gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " |
ysr@777 | 2681 | "["PTR_FORMAT", "PTR_FORMAT"), " |
ysr@777 | 2682 | "limit = "PTR_FORMAT, |
ysr@777 | 2683 | task_num, curr_region, bottom, end, limit); |
tonyp@2968 | 2684 | } |
tonyp@2968 | 2685 | |
tonyp@2968 | 2686 | // Is the gap between reading the finger and doing the CAS too long? |
tonyp@2968 | 2687 | HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); |
ysr@777 | 2688 | if (res == finger) { |
ysr@777 | 2689 | // we succeeded |
ysr@777 | 2690 | |
ysr@777 | 2691 | // notice that _finger == end cannot be guaranteed here since, |
ysr@777 | 2692 | // someone else might have moved the finger even further |
tonyp@1458 | 2693 | assert(_finger >= end, "the finger should have moved forward"); |
ysr@777 | 2694 | |
tonyp@2973 | 2695 | if (verbose_low()) { |
ysr@777 | 2696 | gclog_or_tty->print_cr("[%d] we were successful with region = " |
ysr@777 | 2697 | PTR_FORMAT, task_num, curr_region); |
tonyp@2973 | 2698 | } |
ysr@777 | 2699 | |
ysr@777 | 2700 | if (limit > bottom) { |
tonyp@2973 | 2701 | if (verbose_low()) { |
ysr@777 | 2702 | gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " |
ysr@777 | 2703 | "returning it ", task_num, curr_region); |
tonyp@2973 | 2704 | } |
ysr@777 | 2705 | return curr_region; |
ysr@777 | 2706 | } else { |
tonyp@1458 | 2707 | assert(limit == bottom, |
tonyp@1458 | 2708 | "the region limit should be at bottom"); |
tonyp@2973 | 2709 | if (verbose_low()) { |
ysr@777 | 2710 | gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " |
ysr@777 | 2711 | "returning NULL", task_num, curr_region); |
tonyp@2973 | 2712 | } |
ysr@777 | 2713 | // we return NULL and the caller should try calling |
ysr@777 | 2714 | // claim_region() again. |
ysr@777 | 2715 | return NULL; |
ysr@777 | 2716 | } |
ysr@777 | 2717 | } else { |
tonyp@1458 | 2718 | assert(_finger > finger, "the finger should have moved forward"); |
tonyp@2973 | 2719 | if (verbose_low()) { |
ysr@777 | 2720 | gclog_or_tty->print_cr("[%d] somebody else moved the finger, " |
ysr@777 | 2721 | "global finger = "PTR_FORMAT", " |
ysr@777 | 2722 | "our finger = "PTR_FORMAT, |
ysr@777 | 2723 | task_num, _finger, finger); |
tonyp@2973 | 2724 | } |
ysr@777 | 2725 | |
ysr@777 | 2726 | // read it again |
ysr@777 | 2727 | finger = _finger; |
ysr@777 | 2728 | } |
ysr@777 | 2729 | } |
ysr@777 | 2730 | |
ysr@777 | 2731 | return NULL; |
ysr@777 | 2732 | } |
ysr@777 | 2733 | |
johnc@2190 | 2734 | bool ConcurrentMark::invalidate_aborted_regions_in_cset() { |
johnc@2190 | 2735 | bool result = false; |
johnc@2190 | 2736 | for (int i = 0; i < (int)_max_task_num; ++i) { |
johnc@2190 | 2737 | CMTask* the_task = _tasks[i]; |
johnc@2190 | 2738 | MemRegion mr = the_task->aborted_region(); |
johnc@2190 | 2739 | if (mr.start() != NULL) { |
johnc@2190 | 2740 | assert(mr.end() != NULL, "invariant"); |
johnc@2190 | 2741 | assert(mr.word_size() > 0, "invariant"); |
johnc@2190 | 2742 | HeapRegion* hr = _g1h->heap_region_containing(mr.start()); |
johnc@2190 | 2743 | assert(hr != NULL, "invariant"); |
johnc@2190 | 2744 | if (hr->in_collection_set()) { |
johnc@2190 | 2745 | // The region points into the collection set |
johnc@2190 | 2746 | the_task->set_aborted_region(MemRegion()); |
johnc@2190 | 2747 | result = true; |
johnc@2190 | 2748 | } |
johnc@2190 | 2749 | } |
johnc@2190 | 2750 | } |
johnc@2190 | 2751 | return result; |
johnc@2190 | 2752 | } |
johnc@2190 | 2753 | |
johnc@2190 | 2754 | bool ConcurrentMark::has_aborted_regions() { |
johnc@2190 | 2755 | for (int i = 0; i < (int)_max_task_num; ++i) { |
johnc@2190 | 2756 | CMTask* the_task = _tasks[i]; |
johnc@2190 | 2757 | MemRegion mr = the_task->aborted_region(); |
johnc@2190 | 2758 | if (mr.start() != NULL) { |
johnc@2190 | 2759 | assert(mr.end() != NULL, "invariant"); |
johnc@2190 | 2760 | assert(mr.word_size() > 0, "invariant"); |
johnc@2190 | 2761 | return true; |
johnc@2190 | 2762 | } |
johnc@2190 | 2763 | } |
johnc@2190 | 2764 | return false; |
johnc@2190 | 2765 | } |
johnc@2190 | 2766 | |
ysr@777 | 2767 | void ConcurrentMark::oops_do(OopClosure* cl) { |
tonyp@2973 | 2768 | if (_markStack.size() > 0 && verbose_low()) { |
ysr@777 | 2769 | gclog_or_tty->print_cr("[global] scanning the global marking stack, " |
ysr@777 | 2770 | "size = %d", _markStack.size()); |
tonyp@2973 | 2771 | } |
ysr@777 | 2772 | // we first iterate over the contents of the mark stack... |
ysr@777 | 2773 | _markStack.oops_do(cl); |
ysr@777 | 2774 | |
ysr@777 | 2775 | for (int i = 0; i < (int)_max_task_num; ++i) { |
ysr@777 | 2776 | OopTaskQueue* queue = _task_queues->queue((int)i); |
ysr@777 | 2777 | |
tonyp@2973 | 2778 | if (queue->size() > 0 && verbose_low()) { |
ysr@777 | 2779 | gclog_or_tty->print_cr("[global] scanning task queue of task %d, " |
ysr@777 | 2780 | "size = %d", i, queue->size()); |
tonyp@2973 | 2781 | } |
ysr@777 | 2782 | |
ysr@777 | 2783 | // ...then over the contents of the all the task queues. |
ysr@777 | 2784 | queue->oops_do(cl); |
ysr@777 | 2785 | } |
ysr@777 | 2786 | |
johnc@2190 | 2787 | // Invalidate any entries, that are in the region stack, that |
ysr@777 | 2788 | // point into the collection set |
ysr@777 | 2789 | if (_regionStack.invalidate_entries_into_cset()) { |
ysr@777 | 2790 | // otherwise, any gray objects copied during the evacuation pause |
ysr@777 | 2791 | // might not be visited. |
tonyp@1458 | 2792 | assert(_should_gray_objects, "invariant"); |
ysr@777 | 2793 | } |
johnc@2190 | 2794 | |
johnc@2190 | 2795 | // Invalidate any aborted regions, recorded in the individual CM |
johnc@2190 | 2796 | // tasks, that point into the collection set. |
johnc@2190 | 2797 | if (invalidate_aborted_regions_in_cset()) { |
johnc@2190 | 2798 | // otherwise, any gray objects copied during the evacuation pause |
johnc@2190 | 2799 | // might not be visited. |
johnc@2190 | 2800 | assert(_should_gray_objects, "invariant"); |
johnc@2190 | 2801 | } |
johnc@2190 | 2802 | |
ysr@777 | 2803 | } |
ysr@777 | 2804 | |
tonyp@2848 | 2805 | void ConcurrentMark::clear_marking_state(bool clear_overflow) { |
ysr@777 | 2806 | _markStack.setEmpty(); |
ysr@777 | 2807 | _markStack.clear_overflow(); |
ysr@777 | 2808 | _regionStack.setEmpty(); |
ysr@777 | 2809 | _regionStack.clear_overflow(); |
tonyp@2848 | 2810 | if (clear_overflow) { |
tonyp@2848 | 2811 | clear_has_overflown(); |
tonyp@2848 | 2812 | } else { |
tonyp@2848 | 2813 | assert(has_overflown(), "pre-condition"); |
tonyp@2848 | 2814 | } |
ysr@777 | 2815 | _finger = _heap_start; |
ysr@777 | 2816 | |
ysr@777 | 2817 | for (int i = 0; i < (int)_max_task_num; ++i) { |
ysr@777 | 2818 | OopTaskQueue* queue = _task_queues->queue(i); |
ysr@777 | 2819 | queue->set_empty(); |
johnc@2240 | 2820 | // Clear any partial regions from the CMTasks |
johnc@2240 | 2821 | _tasks[i]->clear_aborted_region(); |
ysr@777 | 2822 | } |
ysr@777 | 2823 | } |
ysr@777 | 2824 | |
ysr@777 | 2825 | void ConcurrentMark::print_stats() { |
ysr@777 | 2826 | if (verbose_stats()) { |
ysr@777 | 2827 | gclog_or_tty->print_cr("---------------------------------------------------------------------"); |
ysr@777 | 2828 | for (size_t i = 0; i < _active_tasks; ++i) { |
ysr@777 | 2829 | _tasks[i]->print_stats(); |
ysr@777 | 2830 | gclog_or_tty->print_cr("---------------------------------------------------------------------"); |
ysr@777 | 2831 | } |
ysr@777 | 2832 | } |
ysr@777 | 2833 | } |
ysr@777 | 2834 | |
ysr@777 | 2835 | class CSMarkOopClosure: public OopClosure { |
ysr@777 | 2836 | friend class CSMarkBitMapClosure; |
ysr@777 | 2837 | |
ysr@777 | 2838 | G1CollectedHeap* _g1h; |
ysr@777 | 2839 | CMBitMap* _bm; |
ysr@777 | 2840 | ConcurrentMark* _cm; |
ysr@777 | 2841 | oop* _ms; |
ysr@777 | 2842 | jint* _array_ind_stack; |
ysr@777 | 2843 | int _ms_size; |
ysr@777 | 2844 | int _ms_ind; |
ysr@777 | 2845 | int _array_increment; |
ysr@777 | 2846 | |
ysr@777 | 2847 | bool push(oop obj, int arr_ind = 0) { |
ysr@777 | 2848 | if (_ms_ind == _ms_size) { |
ysr@777 | 2849 | gclog_or_tty->print_cr("Mark stack is full."); |
ysr@777 | 2850 | return false; |
ysr@777 | 2851 | } |
ysr@777 | 2852 | _ms[_ms_ind] = obj; |
tonyp@2973 | 2853 | if (obj->is_objArray()) { |
tonyp@2973 | 2854 | _array_ind_stack[_ms_ind] = arr_ind; |
tonyp@2973 | 2855 | } |
ysr@777 | 2856 | _ms_ind++; |
ysr@777 | 2857 | return true; |
ysr@777 | 2858 | } |
ysr@777 | 2859 | |
ysr@777 | 2860 | oop pop() { |
tonyp@2973 | 2861 | if (_ms_ind == 0) { |
tonyp@2973 | 2862 | return NULL; |
tonyp@2973 | 2863 | } else { |
ysr@777 | 2864 | _ms_ind--; |
ysr@777 | 2865 | return _ms[_ms_ind]; |
ysr@777 | 2866 | } |
ysr@777 | 2867 | } |
ysr@777 | 2868 | |
ysr@1280 | 2869 | template <class T> bool drain() { |
ysr@777 | 2870 | while (_ms_ind > 0) { |
ysr@777 | 2871 | oop obj = pop(); |
ysr@777 | 2872 | assert(obj != NULL, "Since index was non-zero."); |
ysr@777 | 2873 | if (obj->is_objArray()) { |
ysr@777 | 2874 | jint arr_ind = _array_ind_stack[_ms_ind]; |
ysr@777 | 2875 | objArrayOop aobj = objArrayOop(obj); |
ysr@777 | 2876 | jint len = aobj->length(); |
ysr@777 | 2877 | jint next_arr_ind = arr_ind + _array_increment; |
ysr@777 | 2878 | if (next_arr_ind < len) { |
ysr@777 | 2879 | push(obj, next_arr_ind); |
ysr@777 | 2880 | } |
ysr@777 | 2881 | // Now process this portion of this one. |
ysr@777 | 2882 | int lim = MIN2(next_arr_ind, len); |
ysr@777 | 2883 | for (int j = arr_ind; j < lim; j++) { |
apetrusenko@1347 | 2884 | do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j)); |
ysr@777 | 2885 | } |
ysr@777 | 2886 | |
ysr@777 | 2887 | } else { |
ysr@777 | 2888 | obj->oop_iterate(this); |
ysr@777 | 2889 | } |
ysr@777 | 2890 | if (abort()) return false; |
ysr@777 | 2891 | } |
ysr@777 | 2892 | return true; |
ysr@777 | 2893 | } |
ysr@777 | 2894 | |
ysr@777 | 2895 | public: |
ysr@777 | 2896 | CSMarkOopClosure(ConcurrentMark* cm, int ms_size) : |
ysr@777 | 2897 | _g1h(G1CollectedHeap::heap()), |
ysr@777 | 2898 | _cm(cm), |
ysr@777 | 2899 | _bm(cm->nextMarkBitMap()), |
ysr@777 | 2900 | _ms_size(ms_size), _ms_ind(0), |
ysr@777 | 2901 | _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), |
ysr@777 | 2902 | _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), |
ysr@777 | 2903 | _array_increment(MAX2(ms_size/8, 16)) |
ysr@777 | 2904 | {} |
ysr@777 | 2905 | |
ysr@777 | 2906 | ~CSMarkOopClosure() { |
ysr@777 | 2907 | FREE_C_HEAP_ARRAY(oop, _ms); |
ysr@777 | 2908 | FREE_C_HEAP_ARRAY(jint, _array_ind_stack); |
ysr@777 | 2909 | } |
ysr@777 | 2910 | |
ysr@1280 | 2911 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 2912 | virtual void do_oop( oop* p) { do_oop_work(p); } |
ysr@1280 | 2913 | |
ysr@1280 | 2914 | template <class T> void do_oop_work(T* p) { |
ysr@1280 | 2915 | T heap_oop = oopDesc::load_heap_oop(p); |
ysr@1280 | 2916 | if (oopDesc::is_null(heap_oop)) return; |
ysr@1280 | 2917 | oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
ysr@777 | 2918 | if (obj->is_forwarded()) { |
ysr@777 | 2919 | // If the object has already been forwarded, we have to make sure |
ysr@777 | 2920 | // that it's marked. So follow the forwarding pointer. Note that |
ysr@777 | 2921 | // this does the right thing for self-forwarding pointers in the |
ysr@777 | 2922 | // evacuation failure case. |
ysr@777 | 2923 | obj = obj->forwardee(); |
ysr@777 | 2924 | } |
ysr@777 | 2925 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
ysr@777 | 2926 | if (hr != NULL) { |
ysr@777 | 2927 | if (hr->in_collection_set()) { |
ysr@777 | 2928 | if (_g1h->is_obj_ill(obj)) { |
ysr@777 | 2929 | _bm->mark((HeapWord*)obj); |
ysr@777 | 2930 | if (!push(obj)) { |
ysr@777 | 2931 | gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed."); |
ysr@777 | 2932 | set_abort(); |
ysr@777 | 2933 | } |
ysr@777 | 2934 | } |
ysr@777 | 2935 | } else { |
ysr@777 | 2936 | // Outside the collection set; we need to gray it |
ysr@777 | 2937 | _cm->deal_with_reference(obj); |
ysr@777 | 2938 | } |
ysr@777 | 2939 | } |
ysr@777 | 2940 | } |
ysr@777 | 2941 | }; |
ysr@777 | 2942 | |
ysr@777 | 2943 | class CSMarkBitMapClosure: public BitMapClosure { |
ysr@777 | 2944 | G1CollectedHeap* _g1h; |
ysr@777 | 2945 | CMBitMap* _bitMap; |
ysr@777 | 2946 | ConcurrentMark* _cm; |
ysr@777 | 2947 | CSMarkOopClosure _oop_cl; |
ysr@777 | 2948 | public: |
ysr@777 | 2949 | CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) : |
ysr@777 | 2950 | _g1h(G1CollectedHeap::heap()), |
ysr@777 | 2951 | _bitMap(cm->nextMarkBitMap()), |
ysr@777 | 2952 | _oop_cl(cm, ms_size) |
ysr@777 | 2953 | {} |
ysr@777 | 2954 | |
ysr@777 | 2955 | ~CSMarkBitMapClosure() {} |
ysr@777 | 2956 | |
ysr@777 | 2957 | bool do_bit(size_t offset) { |
ysr@777 | 2958 | // convert offset into a HeapWord* |
ysr@777 | 2959 | HeapWord* addr = _bitMap->offsetToHeapWord(offset); |
ysr@777 | 2960 | assert(_bitMap->endWord() && addr < _bitMap->endWord(), |
ysr@777 | 2961 | "address out of range"); |
ysr@777 | 2962 | assert(_bitMap->isMarked(addr), "tautology"); |
ysr@777 | 2963 | oop obj = oop(addr); |
ysr@777 | 2964 | if (!obj->is_forwarded()) { |
ysr@777 | 2965 | if (!_oop_cl.push(obj)) return false; |
ysr@1280 | 2966 | if (UseCompressedOops) { |
ysr@1280 | 2967 | if (!_oop_cl.drain<narrowOop>()) return false; |
ysr@1280 | 2968 | } else { |
ysr@1280 | 2969 | if (!_oop_cl.drain<oop>()) return false; |
ysr@1280 | 2970 | } |
ysr@777 | 2971 | } |
ysr@777 | 2972 | // Otherwise... |
ysr@777 | 2973 | return true; |
ysr@777 | 2974 | } |
ysr@777 | 2975 | }; |
ysr@777 | 2976 | |
ysr@777 | 2977 | |
ysr@777 | 2978 | class CompleteMarkingInCSHRClosure: public HeapRegionClosure { |
ysr@777 | 2979 | CMBitMap* _bm; |
ysr@777 | 2980 | CSMarkBitMapClosure _bit_cl; |
ysr@777 | 2981 | enum SomePrivateConstants { |
ysr@777 | 2982 | MSSize = 1000 |
ysr@777 | 2983 | }; |
ysr@777 | 2984 | bool _completed; |
ysr@777 | 2985 | public: |
ysr@777 | 2986 | CompleteMarkingInCSHRClosure(ConcurrentMark* cm) : |
ysr@777 | 2987 | _bm(cm->nextMarkBitMap()), |
ysr@777 | 2988 | _bit_cl(cm, MSSize), |
ysr@777 | 2989 | _completed(true) |
ysr@777 | 2990 | {} |
ysr@777 | 2991 | |
ysr@777 | 2992 | ~CompleteMarkingInCSHRClosure() {} |
ysr@777 | 2993 | |
ysr@777 | 2994 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 2995 | if (!r->evacuation_failed()) { |
ysr@777 | 2996 | MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start()); |
ysr@777 | 2997 | if (!mr.is_empty()) { |
ysr@777 | 2998 | if (!_bm->iterate(&_bit_cl, mr)) { |
ysr@777 | 2999 | _completed = false; |
ysr@777 | 3000 | return true; |
ysr@777 | 3001 | } |
ysr@777 | 3002 | } |
ysr@777 | 3003 | } |
ysr@777 | 3004 | return false; |
ysr@777 | 3005 | } |
ysr@777 | 3006 | |
ysr@777 | 3007 | bool completed() { return _completed; } |
ysr@777 | 3008 | }; |
ysr@777 | 3009 | |
ysr@777 | 3010 | class ClearMarksInHRClosure: public HeapRegionClosure { |
ysr@777 | 3011 | CMBitMap* _bm; |
ysr@777 | 3012 | public: |
ysr@777 | 3013 | ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { } |
ysr@777 | 3014 | |
ysr@777 | 3015 | bool doHeapRegion(HeapRegion* r) { |
ysr@777 | 3016 | if (!r->used_region().is_empty() && !r->evacuation_failed()) { |
ysr@777 | 3017 | MemRegion usedMR = r->used_region(); |
ysr@777 | 3018 | _bm->clearRange(r->used_region()); |
ysr@777 | 3019 | } |
ysr@777 | 3020 | return false; |
ysr@777 | 3021 | } |
ysr@777 | 3022 | }; |
ysr@777 | 3023 | |
ysr@777 | 3024 | void ConcurrentMark::complete_marking_in_collection_set() { |
ysr@777 | 3025 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 3026 | |
ysr@777 | 3027 | if (!g1h->mark_in_progress()) { |
ysr@777 | 3028 | g1h->g1_policy()->record_mark_closure_time(0.0); |
ysr@777 | 3029 | return; |
ysr@777 | 3030 | } |
ysr@777 | 3031 | |
ysr@777 | 3032 | int i = 1; |
ysr@777 | 3033 | double start = os::elapsedTime(); |
ysr@777 | 3034 | while (true) { |
ysr@777 | 3035 | i++; |
ysr@777 | 3036 | CompleteMarkingInCSHRClosure cmplt(this); |
ysr@777 | 3037 | g1h->collection_set_iterate(&cmplt); |
ysr@777 | 3038 | if (cmplt.completed()) break; |
ysr@777 | 3039 | } |
ysr@777 | 3040 | double end_time = os::elapsedTime(); |
ysr@777 | 3041 | double elapsed_time_ms = (end_time - start) * 1000.0; |
ysr@777 | 3042 | g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); |
ysr@777 | 3043 | |
ysr@777 | 3044 | ClearMarksInHRClosure clr(nextMarkBitMap()); |
ysr@777 | 3045 | g1h->collection_set_iterate(&clr); |
ysr@777 | 3046 | } |
ysr@777 | 3047 | |
ysr@777 | 3048 | // The next two methods deal with the following optimisation. Some |
ysr@777 | 3049 | // objects are gray by being marked and located above the finger. If |
ysr@777 | 3050 | // they are copied, during an evacuation pause, below the finger then |
ysr@777 | 3051 | // the need to be pushed on the stack. The observation is that, if |
ysr@777 | 3052 | // there are no regions in the collection set located above the |
ysr@777 | 3053 | // finger, then the above cannot happen, hence we do not need to |
ysr@777 | 3054 | // explicitly gray any objects when copying them to below the |
ysr@777 | 3055 | // finger. The global stack will be scanned to ensure that, if it |
ysr@777 | 3056 | // points to objects being copied, it will update their |
ysr@777 | 3057 | // location. There is a tricky situation with the gray objects in |
ysr@777 | 3058 | // region stack that are being coped, however. See the comment in |
ysr@777 | 3059 | // newCSet(). |
ysr@777 | 3060 | |
ysr@777 | 3061 | void ConcurrentMark::newCSet() { |
tonyp@2973 | 3062 | if (!concurrent_marking_in_progress()) { |
ysr@777 | 3063 | // nothing to do if marking is not in progress |
ysr@777 | 3064 | return; |
tonyp@2973 | 3065 | } |
ysr@777 | 3066 | |
ysr@777 | 3067 | // find what the lowest finger is among the global and local fingers |
ysr@777 | 3068 | _min_finger = _finger; |
ysr@777 | 3069 | for (int i = 0; i < (int)_max_task_num; ++i) { |
ysr@777 | 3070 | CMTask* task = _tasks[i]; |
ysr@777 | 3071 | HeapWord* task_finger = task->finger(); |
tonyp@2973 | 3072 | if (task_finger != NULL && task_finger < _min_finger) { |
ysr@777 | 3073 | _min_finger = task_finger; |
tonyp@2973 | 3074 | } |
ysr@777 | 3075 | } |
ysr@777 | 3076 | |
ysr@777 | 3077 | _should_gray_objects = false; |
ysr@777 | 3078 | |
ysr@777 | 3079 | // This fixes a very subtle and fustrating bug. It might be the case |
ysr@777 | 3080 | // that, during en evacuation pause, heap regions that contain |
ysr@777 | 3081 | // objects that are gray (by being in regions contained in the |
ysr@777 | 3082 | // region stack) are included in the collection set. Since such gray |
ysr@777 | 3083 | // objects will be moved, and because it's not easy to redirect |
ysr@777 | 3084 | // region stack entries to point to a new location (because objects |
ysr@777 | 3085 | // in one region might be scattered to multiple regions after they |
ysr@777 | 3086 | // are copied), one option is to ensure that all marked objects |
ysr@777 | 3087 | // copied during a pause are pushed on the stack. Notice, however, |
ysr@777 | 3088 | // that this problem can only happen when the region stack is not |
ysr@777 | 3089 | // empty during an evacuation pause. So, we make the fix a bit less |
ysr@777 | 3090 | // conservative and ensure that regions are pushed on the stack, |
ysr@777 | 3091 | // irrespective whether all collection set regions are below the |
ysr@777 | 3092 | // finger, if the region stack is not empty. This is expected to be |
ysr@777 | 3093 | // a rare case, so I don't think it's necessary to be smarted about it. |
tonyp@2973 | 3094 | if (!region_stack_empty() || has_aborted_regions()) { |
ysr@777 | 3095 | _should_gray_objects = true; |
tonyp@2973 | 3096 | } |
ysr@777 | 3097 | } |
ysr@777 | 3098 | |
ysr@777 | 3099 | void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { |
tonyp@2973 | 3100 | if (!concurrent_marking_in_progress()) return; |
ysr@777 | 3101 | |
ysr@777 | 3102 | HeapWord* region_end = hr->end(); |
tonyp@2973 | 3103 | if (region_end > _min_finger) { |
ysr@777 | 3104 | _should_gray_objects = true; |
tonyp@2973 | 3105 | } |
ysr@777 | 3106 | } |
ysr@777 | 3107 | |
johnc@2910 | 3108 | // Resets the region fields of active CMTasks whose values point |
johnc@2910 | 3109 | // into the collection set. |
johnc@2910 | 3110 | void ConcurrentMark::reset_active_task_region_fields_in_cset() { |
johnc@2910 | 3111 | assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); |
johnc@2910 | 3112 | assert(parallel_marking_threads() <= _max_task_num, "sanity"); |
johnc@2910 | 3113 | |
johnc@2910 | 3114 | for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { |
johnc@2910 | 3115 | CMTask* task = _tasks[i]; |
johnc@2910 | 3116 | HeapWord* task_finger = task->finger(); |
johnc@2910 | 3117 | if (task_finger != NULL) { |
johnc@2910 | 3118 | assert(_g1h->is_in_g1_reserved(task_finger), "not in heap"); |
johnc@2910 | 3119 | HeapRegion* finger_region = _g1h->heap_region_containing(task_finger); |
johnc@2910 | 3120 | if (finger_region->in_collection_set()) { |
johnc@2910 | 3121 | // The task's current region is in the collection set. |
johnc@2910 | 3122 | // This region will be evacuated in the current GC and |
johnc@2910 | 3123 | // the region fields in the task will be stale. |
johnc@2910 | 3124 | task->giveup_current_region(); |
johnc@2910 | 3125 | } |
johnc@2910 | 3126 | } |
johnc@2910 | 3127 | } |
johnc@2910 | 3128 | } |
johnc@2910 | 3129 | |
ysr@777 | 3130 | // abandon current marking iteration due to a Full GC |
ysr@777 | 3131 | void ConcurrentMark::abort() { |
ysr@777 | 3132 | // Clear all marks to force marking thread to do nothing |
ysr@777 | 3133 | _nextMarkBitMap->clearAll(); |
ysr@777 | 3134 | // Empty mark stack |
ysr@777 | 3135 | clear_marking_state(); |
johnc@2190 | 3136 | for (int i = 0; i < (int)_max_task_num; ++i) { |
ysr@777 | 3137 | _tasks[i]->clear_region_fields(); |
johnc@2190 | 3138 | } |
ysr@777 | 3139 | _has_aborted = true; |
ysr@777 | 3140 | |
ysr@777 | 3141 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 3142 | satb_mq_set.abandon_partial_marking(); |
tonyp@1752 | 3143 | // This can be called either during or outside marking, we'll read |
tonyp@1752 | 3144 | // the expected_active value from the SATB queue set. |
tonyp@1752 | 3145 | satb_mq_set.set_active_all_threads( |
tonyp@1752 | 3146 | false, /* new active value */ |
tonyp@1752 | 3147 | satb_mq_set.is_active() /* expected_active */); |
ysr@777 | 3148 | } |
ysr@777 | 3149 | |
ysr@777 | 3150 | static void print_ms_time_info(const char* prefix, const char* name, |
ysr@777 | 3151 | NumberSeq& ns) { |
ysr@777 | 3152 | gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3153 | prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); |
ysr@777 | 3154 | if (ns.num() > 0) { |
ysr@777 | 3155 | gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", |
ysr@777 | 3156 | prefix, ns.sd(), ns.maximum()); |
ysr@777 | 3157 | } |
ysr@777 | 3158 | } |
ysr@777 | 3159 | |
ysr@777 | 3160 | void ConcurrentMark::print_summary_info() { |
ysr@777 | 3161 | gclog_or_tty->print_cr(" Concurrent marking:"); |
ysr@777 | 3162 | print_ms_time_info(" ", "init marks", _init_times); |
ysr@777 | 3163 | print_ms_time_info(" ", "remarks", _remark_times); |
ysr@777 | 3164 | { |
ysr@777 | 3165 | print_ms_time_info(" ", "final marks", _remark_mark_times); |
ysr@777 | 3166 | print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); |
ysr@777 | 3167 | |
ysr@777 | 3168 | } |
ysr@777 | 3169 | print_ms_time_info(" ", "cleanups", _cleanup_times); |
ysr@777 | 3170 | gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3171 | _total_counting_time, |
ysr@777 | 3172 | (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / |
ysr@777 | 3173 | (double)_cleanup_times.num() |
ysr@777 | 3174 | : 0.0)); |
ysr@777 | 3175 | if (G1ScrubRemSets) { |
ysr@777 | 3176 | gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", |
ysr@777 | 3177 | _total_rs_scrub_time, |
ysr@777 | 3178 | (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / |
ysr@777 | 3179 | (double)_cleanup_times.num() |
ysr@777 | 3180 | : 0.0)); |
ysr@777 | 3181 | } |
ysr@777 | 3182 | gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", |
ysr@777 | 3183 | (_init_times.sum() + _remark_times.sum() + |
ysr@777 | 3184 | _cleanup_times.sum())/1000.0); |
ysr@777 | 3185 | gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " |
ysr@777 | 3186 | "(%8.2f s marking, %8.2f s counting).", |
ysr@777 | 3187 | cmThread()->vtime_accum(), |
ysr@777 | 3188 | cmThread()->vtime_mark_accum(), |
ysr@777 | 3189 | cmThread()->vtime_count_accum()); |
ysr@777 | 3190 | } |
ysr@777 | 3191 | |
tonyp@1454 | 3192 | void ConcurrentMark::print_worker_threads_on(outputStream* st) const { |
tonyp@1454 | 3193 | _parallel_workers->print_worker_threads_on(st); |
tonyp@1454 | 3194 | } |
tonyp@1454 | 3195 | |
ysr@777 | 3196 | // Closures |
ysr@777 | 3197 | // XXX: there seems to be a lot of code duplication here; |
ysr@777 | 3198 | // should refactor and consolidate the shared code. |
ysr@777 | 3199 | |
ysr@777 | 3200 | // This closure is used to mark refs into the CMS generation in |
ysr@777 | 3201 | // the CMS bit map. Called at the first checkpoint. |
ysr@777 | 3202 | |
ysr@777 | 3203 | // We take a break if someone is trying to stop the world. |
ysr@777 | 3204 | bool ConcurrentMark::do_yield_check(int worker_i) { |
ysr@777 | 3205 | if (should_yield()) { |
tonyp@2973 | 3206 | if (worker_i == 0) { |
ysr@777 | 3207 | _g1h->g1_policy()->record_concurrent_pause(); |
tonyp@2973 | 3208 | } |
ysr@777 | 3209 | cmThread()->yield(); |
tonyp@2973 | 3210 | if (worker_i == 0) { |
ysr@777 | 3211 | _g1h->g1_policy()->record_concurrent_pause_end(); |
tonyp@2973 | 3212 | } |
ysr@777 | 3213 | return true; |
ysr@777 | 3214 | } else { |
ysr@777 | 3215 | return false; |
ysr@777 | 3216 | } |
ysr@777 | 3217 | } |
ysr@777 | 3218 | |
ysr@777 | 3219 | bool ConcurrentMark::should_yield() { |
ysr@777 | 3220 | return cmThread()->should_yield(); |
ysr@777 | 3221 | } |
ysr@777 | 3222 | |
ysr@777 | 3223 | bool ConcurrentMark::containing_card_is_marked(void* p) { |
ysr@777 | 3224 | size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); |
ysr@777 | 3225 | return _card_bm.at(offset >> CardTableModRefBS::card_shift); |
ysr@777 | 3226 | } |
ysr@777 | 3227 | |
ysr@777 | 3228 | bool ConcurrentMark::containing_cards_are_marked(void* start, |
ysr@777 | 3229 | void* last) { |
tonyp@2973 | 3230 | return containing_card_is_marked(start) && |
tonyp@2973 | 3231 | containing_card_is_marked(last); |
ysr@777 | 3232 | } |
ysr@777 | 3233 | |
ysr@777 | 3234 | #ifndef PRODUCT |
ysr@777 | 3235 | // for debugging purposes |
ysr@777 | 3236 | void ConcurrentMark::print_finger() { |
ysr@777 | 3237 | gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, |
ysr@777 | 3238 | _heap_start, _heap_end, _finger); |
ysr@777 | 3239 | for (int i = 0; i < (int) _max_task_num; ++i) { |
ysr@777 | 3240 | gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger()); |
ysr@777 | 3241 | } |
ysr@777 | 3242 | gclog_or_tty->print_cr(""); |
ysr@777 | 3243 | } |
ysr@777 | 3244 | #endif |
ysr@777 | 3245 | |
tonyp@2968 | 3246 | void CMTask::scan_object(oop obj) { |
tonyp@2968 | 3247 | assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); |
tonyp@2968 | 3248 | |
tonyp@2968 | 3249 | if (_cm->verbose_high()) { |
tonyp@2968 | 3250 | gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, |
tonyp@2968 | 3251 | _task_id, (void*) obj); |
tonyp@2968 | 3252 | } |
tonyp@2968 | 3253 | |
tonyp@2968 | 3254 | size_t obj_size = obj->size(); |
tonyp@2968 | 3255 | _words_scanned += obj_size; |
tonyp@2968 | 3256 | |
tonyp@2968 | 3257 | obj->oop_iterate(_cm_oop_closure); |
tonyp@2968 | 3258 | statsOnly( ++_objs_scanned ); |
tonyp@2968 | 3259 | check_limits(); |
tonyp@2968 | 3260 | } |
tonyp@2968 | 3261 | |
ysr@777 | 3262 | // Closure for iteration over bitmaps |
ysr@777 | 3263 | class CMBitMapClosure : public BitMapClosure { |
ysr@777 | 3264 | private: |
ysr@777 | 3265 | // the bitmap that is being iterated over |
ysr@777 | 3266 | CMBitMap* _nextMarkBitMap; |
ysr@777 | 3267 | ConcurrentMark* _cm; |
ysr@777 | 3268 | CMTask* _task; |
ysr@777 | 3269 | // true if we're scanning a heap region claimed by the task (so that |
ysr@777 | 3270 | // we move the finger along), false if we're not, i.e. currently when |
ysr@777 | 3271 | // scanning a heap region popped from the region stack (so that we |
ysr@777 | 3272 | // do not move the task finger along; it'd be a mistake if we did so). |
ysr@777 | 3273 | bool _scanning_heap_region; |
ysr@777 | 3274 | |
ysr@777 | 3275 | public: |
ysr@777 | 3276 | CMBitMapClosure(CMTask *task, |
ysr@777 | 3277 | ConcurrentMark* cm, |
ysr@777 | 3278 | CMBitMap* nextMarkBitMap) |
ysr@777 | 3279 | : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } |
ysr@777 | 3280 | |
ysr@777 | 3281 | void set_scanning_heap_region(bool scanning_heap_region) { |
ysr@777 | 3282 | _scanning_heap_region = scanning_heap_region; |
ysr@777 | 3283 | } |
ysr@777 | 3284 | |
ysr@777 | 3285 | bool do_bit(size_t offset) { |
ysr@777 | 3286 | HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); |
tonyp@1458 | 3287 | assert(_nextMarkBitMap->isMarked(addr), "invariant"); |
tonyp@1458 | 3288 | assert( addr < _cm->finger(), "invariant"); |
ysr@777 | 3289 | |
ysr@777 | 3290 | if (_scanning_heap_region) { |
ysr@777 | 3291 | statsOnly( _task->increase_objs_found_on_bitmap() ); |
tonyp@1458 | 3292 | assert(addr >= _task->finger(), "invariant"); |
ysr@777 | 3293 | // We move that task's local finger along. |
ysr@777 | 3294 | _task->move_finger_to(addr); |
ysr@777 | 3295 | } else { |
ysr@777 | 3296 | // We move the task's region finger along. |
ysr@777 | 3297 | _task->move_region_finger_to(addr); |
ysr@777 | 3298 | } |
ysr@777 | 3299 | |
ysr@777 | 3300 | _task->scan_object(oop(addr)); |
ysr@777 | 3301 | // we only partially drain the local queue and global stack |
ysr@777 | 3302 | _task->drain_local_queue(true); |
ysr@777 | 3303 | _task->drain_global_stack(true); |
ysr@777 | 3304 | |
ysr@777 | 3305 | // if the has_aborted flag has been raised, we need to bail out of |
ysr@777 | 3306 | // the iteration |
ysr@777 | 3307 | return !_task->has_aborted(); |
ysr@777 | 3308 | } |
ysr@777 | 3309 | }; |
ysr@777 | 3310 | |
ysr@777 | 3311 | // Closure for iterating over objects, currently only used for |
ysr@777 | 3312 | // processing SATB buffers. |
ysr@777 | 3313 | class CMObjectClosure : public ObjectClosure { |
ysr@777 | 3314 | private: |
ysr@777 | 3315 | CMTask* _task; |
ysr@777 | 3316 | |
ysr@777 | 3317 | public: |
ysr@777 | 3318 | void do_object(oop obj) { |
ysr@777 | 3319 | _task->deal_with_reference(obj); |
ysr@777 | 3320 | } |
ysr@777 | 3321 | |
ysr@777 | 3322 | CMObjectClosure(CMTask* task) : _task(task) { } |
ysr@777 | 3323 | }; |
ysr@777 | 3324 | |
tonyp@2968 | 3325 | G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, |
tonyp@2968 | 3326 | ConcurrentMark* cm, |
tonyp@2968 | 3327 | CMTask* task) |
tonyp@2968 | 3328 | : _g1h(g1h), _cm(cm), _task(task) { |
tonyp@2968 | 3329 | assert(_ref_processor == NULL, "should be initialized to NULL"); |
tonyp@2968 | 3330 | |
tonyp@2968 | 3331 | if (G1UseConcMarkReferenceProcessing) { |
tonyp@2968 | 3332 | _ref_processor = g1h->ref_processor(); |
tonyp@2968 | 3333 | assert(_ref_processor != NULL, "should not be NULL"); |
ysr@777 | 3334 | } |
tonyp@2968 | 3335 | } |
ysr@777 | 3336 | |
ysr@777 | 3337 | void CMTask::setup_for_region(HeapRegion* hr) { |
tonyp@1458 | 3338 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 3339 | assert(hr != NULL, |
tonyp@1458 | 3340 | "claim_region() should have filtered out continues humongous regions"); |
tonyp@1458 | 3341 | assert(!hr->continuesHumongous(), |
tonyp@1458 | 3342 | "claim_region() should have filtered out continues humongous regions"); |
ysr@777 | 3343 | |
tonyp@2973 | 3344 | if (_cm->verbose_low()) { |
ysr@777 | 3345 | gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, |
ysr@777 | 3346 | _task_id, hr); |
tonyp@2973 | 3347 | } |
ysr@777 | 3348 | |
ysr@777 | 3349 | _curr_region = hr; |
ysr@777 | 3350 | _finger = hr->bottom(); |
ysr@777 | 3351 | update_region_limit(); |
ysr@777 | 3352 | } |
ysr@777 | 3353 | |
ysr@777 | 3354 | void CMTask::update_region_limit() { |
ysr@777 | 3355 | HeapRegion* hr = _curr_region; |
ysr@777 | 3356 | HeapWord* bottom = hr->bottom(); |
ysr@777 | 3357 | HeapWord* limit = hr->next_top_at_mark_start(); |
ysr@777 | 3358 | |
ysr@777 | 3359 | if (limit == bottom) { |
tonyp@2973 | 3360 | if (_cm->verbose_low()) { |
ysr@777 | 3361 | gclog_or_tty->print_cr("[%d] found an empty region " |
ysr@777 | 3362 | "["PTR_FORMAT", "PTR_FORMAT")", |
ysr@777 | 3363 | _task_id, bottom, limit); |
tonyp@2973 | 3364 | } |
ysr@777 | 3365 | // The region was collected underneath our feet. |
ysr@777 | 3366 | // We set the finger to bottom to ensure that the bitmap |
ysr@777 | 3367 | // iteration that will follow this will not do anything. |
ysr@777 | 3368 | // (this is not a condition that holds when we set the region up, |
ysr@777 | 3369 | // as the region is not supposed to be empty in the first place) |
ysr@777 | 3370 | _finger = bottom; |
ysr@777 | 3371 | } else if (limit >= _region_limit) { |
tonyp@1458 | 3372 | assert(limit >= _finger, "peace of mind"); |
ysr@777 | 3373 | } else { |
tonyp@1458 | 3374 | assert(limit < _region_limit, "only way to get here"); |
ysr@777 | 3375 | // This can happen under some pretty unusual circumstances. An |
ysr@777 | 3376 | // evacuation pause empties the region underneath our feet (NTAMS |
ysr@777 | 3377 | // at bottom). We then do some allocation in the region (NTAMS |
ysr@777 | 3378 | // stays at bottom), followed by the region being used as a GC |
ysr@777 | 3379 | // alloc region (NTAMS will move to top() and the objects |
ysr@777 | 3380 | // originally below it will be grayed). All objects now marked in |
ysr@777 | 3381 | // the region are explicitly grayed, if below the global finger, |
ysr@777 | 3382 | // and we do not need in fact to scan anything else. So, we simply |
ysr@777 | 3383 | // set _finger to be limit to ensure that the bitmap iteration |
ysr@777 | 3384 | // doesn't do anything. |
ysr@777 | 3385 | _finger = limit; |
ysr@777 | 3386 | } |
ysr@777 | 3387 | |
ysr@777 | 3388 | _region_limit = limit; |
ysr@777 | 3389 | } |
ysr@777 | 3390 | |
ysr@777 | 3391 | void CMTask::giveup_current_region() { |
tonyp@1458 | 3392 | assert(_curr_region != NULL, "invariant"); |
tonyp@2973 | 3393 | if (_cm->verbose_low()) { |
ysr@777 | 3394 | gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, |
ysr@777 | 3395 | _task_id, _curr_region); |
tonyp@2973 | 3396 | } |
ysr@777 | 3397 | clear_region_fields(); |
ysr@777 | 3398 | } |
ysr@777 | 3399 | |
ysr@777 | 3400 | void CMTask::clear_region_fields() { |
ysr@777 | 3401 | // Values for these three fields that indicate that we're not |
ysr@777 | 3402 | // holding on to a region. |
ysr@777 | 3403 | _curr_region = NULL; |
ysr@777 | 3404 | _finger = NULL; |
ysr@777 | 3405 | _region_limit = NULL; |
ysr@777 | 3406 | |
ysr@777 | 3407 | _region_finger = NULL; |
ysr@777 | 3408 | } |
ysr@777 | 3409 | |
tonyp@2968 | 3410 | void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { |
tonyp@2968 | 3411 | if (cm_oop_closure == NULL) { |
tonyp@2968 | 3412 | assert(_cm_oop_closure != NULL, "invariant"); |
tonyp@2968 | 3413 | } else { |
tonyp@2968 | 3414 | assert(_cm_oop_closure == NULL, "invariant"); |
tonyp@2968 | 3415 | } |
tonyp@2968 | 3416 | _cm_oop_closure = cm_oop_closure; |
tonyp@2968 | 3417 | } |
tonyp@2968 | 3418 | |
ysr@777 | 3419 | void CMTask::reset(CMBitMap* nextMarkBitMap) { |
tonyp@1458 | 3420 | guarantee(nextMarkBitMap != NULL, "invariant"); |
ysr@777 | 3421 | |
tonyp@2973 | 3422 | if (_cm->verbose_low()) { |
ysr@777 | 3423 | gclog_or_tty->print_cr("[%d] resetting", _task_id); |
tonyp@2973 | 3424 | } |
ysr@777 | 3425 | |
ysr@777 | 3426 | _nextMarkBitMap = nextMarkBitMap; |
ysr@777 | 3427 | clear_region_fields(); |
johnc@2240 | 3428 | assert(_aborted_region.is_empty(), "should have been cleared"); |
ysr@777 | 3429 | |
ysr@777 | 3430 | _calls = 0; |
ysr@777 | 3431 | _elapsed_time_ms = 0.0; |
ysr@777 | 3432 | _termination_time_ms = 0.0; |
ysr@777 | 3433 | _termination_start_time_ms = 0.0; |
ysr@777 | 3434 | |
ysr@777 | 3435 | #if _MARKING_STATS_ |
ysr@777 | 3436 | _local_pushes = 0; |
ysr@777 | 3437 | _local_pops = 0; |
ysr@777 | 3438 | _local_max_size = 0; |
ysr@777 | 3439 | _objs_scanned = 0; |
ysr@777 | 3440 | _global_pushes = 0; |
ysr@777 | 3441 | _global_pops = 0; |
ysr@777 | 3442 | _global_max_size = 0; |
ysr@777 | 3443 | _global_transfers_to = 0; |
ysr@777 | 3444 | _global_transfers_from = 0; |
ysr@777 | 3445 | _region_stack_pops = 0; |
ysr@777 | 3446 | _regions_claimed = 0; |
ysr@777 | 3447 | _objs_found_on_bitmap = 0; |
ysr@777 | 3448 | _satb_buffers_processed = 0; |
ysr@777 | 3449 | _steal_attempts = 0; |
ysr@777 | 3450 | _steals = 0; |
ysr@777 | 3451 | _aborted = 0; |
ysr@777 | 3452 | _aborted_overflow = 0; |
ysr@777 | 3453 | _aborted_cm_aborted = 0; |
ysr@777 | 3454 | _aborted_yield = 0; |
ysr@777 | 3455 | _aborted_timed_out = 0; |
ysr@777 | 3456 | _aborted_satb = 0; |
ysr@777 | 3457 | _aborted_termination = 0; |
ysr@777 | 3458 | #endif // _MARKING_STATS_ |
ysr@777 | 3459 | } |
ysr@777 | 3460 | |
ysr@777 | 3461 | bool CMTask::should_exit_termination() { |
ysr@777 | 3462 | regular_clock_call(); |
ysr@777 | 3463 | // This is called when we are in the termination protocol. We should |
ysr@777 | 3464 | // quit if, for some reason, this task wants to abort or the global |
ysr@777 | 3465 | // stack is not empty (this means that we can get work from it). |
ysr@777 | 3466 | return !_cm->mark_stack_empty() || has_aborted(); |
ysr@777 | 3467 | } |
ysr@777 | 3468 | |
ysr@777 | 3469 | void CMTask::reached_limit() { |
tonyp@1458 | 3470 | assert(_words_scanned >= _words_scanned_limit || |
tonyp@1458 | 3471 | _refs_reached >= _refs_reached_limit , |
tonyp@1458 | 3472 | "shouldn't have been called otherwise"); |
ysr@777 | 3473 | regular_clock_call(); |
ysr@777 | 3474 | } |
ysr@777 | 3475 | |
ysr@777 | 3476 | void CMTask::regular_clock_call() { |
tonyp@2973 | 3477 | if (has_aborted()) return; |
ysr@777 | 3478 | |
ysr@777 | 3479 | // First, we need to recalculate the words scanned and refs reached |
ysr@777 | 3480 | // limits for the next clock call. |
ysr@777 | 3481 | recalculate_limits(); |
ysr@777 | 3482 | |
ysr@777 | 3483 | // During the regular clock call we do the following |
ysr@777 | 3484 | |
ysr@777 | 3485 | // (1) If an overflow has been flagged, then we abort. |
ysr@777 | 3486 | if (_cm->has_overflown()) { |
ysr@777 | 3487 | set_has_aborted(); |
ysr@777 | 3488 | return; |
ysr@777 | 3489 | } |
ysr@777 | 3490 | |
ysr@777 | 3491 | // If we are not concurrent (i.e. we're doing remark) we don't need |
ysr@777 | 3492 | // to check anything else. The other steps are only needed during |
ysr@777 | 3493 | // the concurrent marking phase. |
tonyp@2973 | 3494 | if (!concurrent()) return; |
ysr@777 | 3495 | |
ysr@777 | 3496 | // (2) If marking has been aborted for Full GC, then we also abort. |
ysr@777 | 3497 | if (_cm->has_aborted()) { |
ysr@777 | 3498 | set_has_aborted(); |
ysr@777 | 3499 | statsOnly( ++_aborted_cm_aborted ); |
ysr@777 | 3500 | return; |
ysr@777 | 3501 | } |
ysr@777 | 3502 | |
ysr@777 | 3503 | double curr_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 3504 | |
ysr@777 | 3505 | // (3) If marking stats are enabled, then we update the step history. |
ysr@777 | 3506 | #if _MARKING_STATS_ |
tonyp@2973 | 3507 | if (_words_scanned >= _words_scanned_limit) { |
ysr@777 | 3508 | ++_clock_due_to_scanning; |
tonyp@2973 | 3509 | } |
tonyp@2973 | 3510 | if (_refs_reached >= _refs_reached_limit) { |
ysr@777 | 3511 | ++_clock_due_to_marking; |
tonyp@2973 | 3512 | } |
ysr@777 | 3513 | |
ysr@777 | 3514 | double last_interval_ms = curr_time_ms - _interval_start_time_ms; |
ysr@777 | 3515 | _interval_start_time_ms = curr_time_ms; |
ysr@777 | 3516 | _all_clock_intervals_ms.add(last_interval_ms); |
ysr@777 | 3517 | |
ysr@777 | 3518 | if (_cm->verbose_medium()) { |
tonyp@2973 | 3519 | gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " |
tonyp@2973 | 3520 | "scanned = %d%s, refs reached = %d%s", |
tonyp@2973 | 3521 | _task_id, last_interval_ms, |
tonyp@2973 | 3522 | _words_scanned, |
tonyp@2973 | 3523 | (_words_scanned >= _words_scanned_limit) ? " (*)" : "", |
tonyp@2973 | 3524 | _refs_reached, |
tonyp@2973 | 3525 | (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); |
ysr@777 | 3526 | } |
ysr@777 | 3527 | #endif // _MARKING_STATS_ |
ysr@777 | 3528 | |
ysr@777 | 3529 | // (4) We check whether we should yield. If we have to, then we abort. |
ysr@777 | 3530 | if (_cm->should_yield()) { |
ysr@777 | 3531 | // We should yield. To do this we abort the task. The caller is |
ysr@777 | 3532 | // responsible for yielding. |
ysr@777 | 3533 | set_has_aborted(); |
ysr@777 | 3534 | statsOnly( ++_aborted_yield ); |
ysr@777 | 3535 | return; |
ysr@777 | 3536 | } |
ysr@777 | 3537 | |
ysr@777 | 3538 | // (5) We check whether we've reached our time quota. If we have, |
ysr@777 | 3539 | // then we abort. |
ysr@777 | 3540 | double elapsed_time_ms = curr_time_ms - _start_time_ms; |
ysr@777 | 3541 | if (elapsed_time_ms > _time_target_ms) { |
ysr@777 | 3542 | set_has_aborted(); |
johnc@2494 | 3543 | _has_timed_out = true; |
ysr@777 | 3544 | statsOnly( ++_aborted_timed_out ); |
ysr@777 | 3545 | return; |
ysr@777 | 3546 | } |
ysr@777 | 3547 | |
ysr@777 | 3548 | // (6) Finally, we check whether there are enough completed STAB |
ysr@777 | 3549 | // buffers available for processing. If there are, we abort. |
ysr@777 | 3550 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
ysr@777 | 3551 | if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { |
tonyp@2973 | 3552 | if (_cm->verbose_low()) { |
ysr@777 | 3553 | gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", |
ysr@777 | 3554 | _task_id); |
tonyp@2973 | 3555 | } |
ysr@777 | 3556 | // we do need to process SATB buffers, we'll abort and restart |
ysr@777 | 3557 | // the marking task to do so |
ysr@777 | 3558 | set_has_aborted(); |
ysr@777 | 3559 | statsOnly( ++_aborted_satb ); |
ysr@777 | 3560 | return; |
ysr@777 | 3561 | } |
ysr@777 | 3562 | } |
ysr@777 | 3563 | |
ysr@777 | 3564 | void CMTask::recalculate_limits() { |
ysr@777 | 3565 | _real_words_scanned_limit = _words_scanned + words_scanned_period; |
ysr@777 | 3566 | _words_scanned_limit = _real_words_scanned_limit; |
ysr@777 | 3567 | |
ysr@777 | 3568 | _real_refs_reached_limit = _refs_reached + refs_reached_period; |
ysr@777 | 3569 | _refs_reached_limit = _real_refs_reached_limit; |
ysr@777 | 3570 | } |
ysr@777 | 3571 | |
ysr@777 | 3572 | void CMTask::decrease_limits() { |
ysr@777 | 3573 | // This is called when we believe that we're going to do an infrequent |
ysr@777 | 3574 | // operation which will increase the per byte scanned cost (i.e. move |
ysr@777 | 3575 | // entries to/from the global stack). It basically tries to decrease the |
ysr@777 | 3576 | // scanning limit so that the clock is called earlier. |
ysr@777 | 3577 | |
tonyp@2973 | 3578 | if (_cm->verbose_medium()) { |
ysr@777 | 3579 | gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); |
tonyp@2973 | 3580 | } |
ysr@777 | 3581 | |
ysr@777 | 3582 | _words_scanned_limit = _real_words_scanned_limit - |
ysr@777 | 3583 | 3 * words_scanned_period / 4; |
ysr@777 | 3584 | _refs_reached_limit = _real_refs_reached_limit - |
ysr@777 | 3585 | 3 * refs_reached_period / 4; |
ysr@777 | 3586 | } |
ysr@777 | 3587 | |
ysr@777 | 3588 | void CMTask::move_entries_to_global_stack() { |
ysr@777 | 3589 | // local array where we'll store the entries that will be popped |
ysr@777 | 3590 | // from the local queue |
ysr@777 | 3591 | oop buffer[global_stack_transfer_size]; |
ysr@777 | 3592 | |
ysr@777 | 3593 | int n = 0; |
ysr@777 | 3594 | oop obj; |
ysr@777 | 3595 | while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { |
ysr@777 | 3596 | buffer[n] = obj; |
ysr@777 | 3597 | ++n; |
ysr@777 | 3598 | } |
ysr@777 | 3599 | |
ysr@777 | 3600 | if (n > 0) { |
ysr@777 | 3601 | // we popped at least one entry from the local queue |
ysr@777 | 3602 | |
ysr@777 | 3603 | statsOnly( ++_global_transfers_to; _local_pops += n ); |
ysr@777 | 3604 | |
ysr@777 | 3605 | if (!_cm->mark_stack_push(buffer, n)) { |
tonyp@2973 | 3606 | if (_cm->verbose_low()) { |
tonyp@2973 | 3607 | gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", |
tonyp@2973 | 3608 | _task_id); |
tonyp@2973 | 3609 | } |
ysr@777 | 3610 | set_has_aborted(); |
ysr@777 | 3611 | } else { |
ysr@777 | 3612 | // the transfer was successful |
ysr@777 | 3613 | |
tonyp@2973 | 3614 | if (_cm->verbose_medium()) { |
ysr@777 | 3615 | gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", |
ysr@777 | 3616 | _task_id, n); |
tonyp@2973 | 3617 | } |
ysr@777 | 3618 | statsOnly( int tmp_size = _cm->mark_stack_size(); |
tonyp@2973 | 3619 | if (tmp_size > _global_max_size) { |
ysr@777 | 3620 | _global_max_size = tmp_size; |
tonyp@2973 | 3621 | } |
ysr@777 | 3622 | _global_pushes += n ); |
ysr@777 | 3623 | } |
ysr@777 | 3624 | } |
ysr@777 | 3625 | |
ysr@777 | 3626 | // this operation was quite expensive, so decrease the limits |
ysr@777 | 3627 | decrease_limits(); |
ysr@777 | 3628 | } |
ysr@777 | 3629 | |
ysr@777 | 3630 | void CMTask::get_entries_from_global_stack() { |
ysr@777 | 3631 | // local array where we'll store the entries that will be popped |
ysr@777 | 3632 | // from the global stack. |
ysr@777 | 3633 | oop buffer[global_stack_transfer_size]; |
ysr@777 | 3634 | int n; |
ysr@777 | 3635 | _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); |
tonyp@1458 | 3636 | assert(n <= global_stack_transfer_size, |
tonyp@1458 | 3637 | "we should not pop more than the given limit"); |
ysr@777 | 3638 | if (n > 0) { |
ysr@777 | 3639 | // yes, we did actually pop at least one entry |
ysr@777 | 3640 | |
ysr@777 | 3641 | statsOnly( ++_global_transfers_from; _global_pops += n ); |
tonyp@2973 | 3642 | if (_cm->verbose_medium()) { |
ysr@777 | 3643 | gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", |
ysr@777 | 3644 | _task_id, n); |
tonyp@2973 | 3645 | } |
ysr@777 | 3646 | for (int i = 0; i < n; ++i) { |
ysr@777 | 3647 | bool success = _task_queue->push(buffer[i]); |
ysr@777 | 3648 | // We only call this when the local queue is empty or under a |
ysr@777 | 3649 | // given target limit. So, we do not expect this push to fail. |
tonyp@1458 | 3650 | assert(success, "invariant"); |
ysr@777 | 3651 | } |
ysr@777 | 3652 | |
ysr@777 | 3653 | statsOnly( int tmp_size = _task_queue->size(); |
tonyp@2973 | 3654 | if (tmp_size > _local_max_size) { |
ysr@777 | 3655 | _local_max_size = tmp_size; |
tonyp@2973 | 3656 | } |
ysr@777 | 3657 | _local_pushes += n ); |
ysr@777 | 3658 | } |
ysr@777 | 3659 | |
ysr@777 | 3660 | // this operation was quite expensive, so decrease the limits |
ysr@777 | 3661 | decrease_limits(); |
ysr@777 | 3662 | } |
ysr@777 | 3663 | |
ysr@777 | 3664 | void CMTask::drain_local_queue(bool partially) { |
tonyp@2973 | 3665 | if (has_aborted()) return; |
ysr@777 | 3666 | |
ysr@777 | 3667 | // Decide what the target size is, depending whether we're going to |
ysr@777 | 3668 | // drain it partially (so that other tasks can steal if they run out |
ysr@777 | 3669 | // of things to do) or totally (at the very end). |
ysr@777 | 3670 | size_t target_size; |
tonyp@2973 | 3671 | if (partially) { |
ysr@777 | 3672 | target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); |
tonyp@2973 | 3673 | } else { |
ysr@777 | 3674 | target_size = 0; |
tonyp@2973 | 3675 | } |
ysr@777 | 3676 | |
ysr@777 | 3677 | if (_task_queue->size() > target_size) { |
tonyp@2973 | 3678 | if (_cm->verbose_high()) { |
ysr@777 | 3679 | gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", |
ysr@777 | 3680 | _task_id, target_size); |
tonyp@2973 | 3681 | } |
ysr@777 | 3682 | |
ysr@777 | 3683 | oop obj; |
ysr@777 | 3684 | bool ret = _task_queue->pop_local(obj); |
ysr@777 | 3685 | while (ret) { |
ysr@777 | 3686 | statsOnly( ++_local_pops ); |
ysr@777 | 3687 | |
tonyp@2973 | 3688 | if (_cm->verbose_high()) { |
ysr@777 | 3689 | gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, |
ysr@777 | 3690 | (void*) obj); |
tonyp@2973 | 3691 | } |
ysr@777 | 3692 | |
tonyp@1458 | 3693 | assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); |
tonyp@2643 | 3694 | assert(!_g1h->is_on_master_free_list( |
tonyp@2472 | 3695 | _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); |
ysr@777 | 3696 | |
ysr@777 | 3697 | scan_object(obj); |
ysr@777 | 3698 | |
tonyp@2973 | 3699 | if (_task_queue->size() <= target_size || has_aborted()) { |
ysr@777 | 3700 | ret = false; |
tonyp@2973 | 3701 | } else { |
ysr@777 | 3702 | ret = _task_queue->pop_local(obj); |
tonyp@2973 | 3703 | } |
ysr@777 | 3704 | } |
ysr@777 | 3705 | |
tonyp@2973 | 3706 | if (_cm->verbose_high()) { |
ysr@777 | 3707 | gclog_or_tty->print_cr("[%d] drained local queue, size = %d", |
ysr@777 | 3708 | _task_id, _task_queue->size()); |
tonyp@2973 | 3709 | } |
ysr@777 | 3710 | } |
ysr@777 | 3711 | } |
ysr@777 | 3712 | |
ysr@777 | 3713 | void CMTask::drain_global_stack(bool partially) { |
tonyp@2973 | 3714 | if (has_aborted()) return; |
ysr@777 | 3715 | |
ysr@777 | 3716 | // We have a policy to drain the local queue before we attempt to |
ysr@777 | 3717 | // drain the global stack. |
tonyp@1458 | 3718 | assert(partially || _task_queue->size() == 0, "invariant"); |
ysr@777 | 3719 | |
ysr@777 | 3720 | // Decide what the target size is, depending whether we're going to |
ysr@777 | 3721 | // drain it partially (so that other tasks can steal if they run out |
ysr@777 | 3722 | // of things to do) or totally (at the very end). Notice that, |
ysr@777 | 3723 | // because we move entries from the global stack in chunks or |
ysr@777 | 3724 | // because another task might be doing the same, we might in fact |
ysr@777 | 3725 | // drop below the target. But, this is not a problem. |
ysr@777 | 3726 | size_t target_size; |
tonyp@2973 | 3727 | if (partially) { |
ysr@777 | 3728 | target_size = _cm->partial_mark_stack_size_target(); |
tonyp@2973 | 3729 | } else { |
ysr@777 | 3730 | target_size = 0; |
tonyp@2973 | 3731 | } |
ysr@777 | 3732 | |
ysr@777 | 3733 | if (_cm->mark_stack_size() > target_size) { |
tonyp@2973 | 3734 | if (_cm->verbose_low()) { |
ysr@777 | 3735 | gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", |
ysr@777 | 3736 | _task_id, target_size); |
tonyp@2973 | 3737 | } |
ysr@777 | 3738 | |
ysr@777 | 3739 | while (!has_aborted() && _cm->mark_stack_size() > target_size) { |
ysr@777 | 3740 | get_entries_from_global_stack(); |
ysr@777 | 3741 | drain_local_queue(partially); |
ysr@777 | 3742 | } |
ysr@777 | 3743 | |
tonyp@2973 | 3744 | if (_cm->verbose_low()) { |
ysr@777 | 3745 | gclog_or_tty->print_cr("[%d] drained global stack, size = %d", |
ysr@777 | 3746 | _task_id, _cm->mark_stack_size()); |
tonyp@2973 | 3747 | } |
ysr@777 | 3748 | } |
ysr@777 | 3749 | } |
ysr@777 | 3750 | |
ysr@777 | 3751 | // SATB Queue has several assumptions on whether to call the par or |
ysr@777 | 3752 | // non-par versions of the methods. this is why some of the code is |
ysr@777 | 3753 | // replicated. We should really get rid of the single-threaded version |
ysr@777 | 3754 | // of the code to simplify things. |
ysr@777 | 3755 | void CMTask::drain_satb_buffers() { |
tonyp@2973 | 3756 | if (has_aborted()) return; |
ysr@777 | 3757 | |
ysr@777 | 3758 | // We set this so that the regular clock knows that we're in the |
ysr@777 | 3759 | // middle of draining buffers and doesn't set the abort flag when it |
ysr@777 | 3760 | // notices that SATB buffers are available for draining. It'd be |
ysr@777 | 3761 | // very counter productive if it did that. :-) |
ysr@777 | 3762 | _draining_satb_buffers = true; |
ysr@777 | 3763 | |
ysr@777 | 3764 | CMObjectClosure oc(this); |
ysr@777 | 3765 | SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
tonyp@2973 | 3766 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 3767 | satb_mq_set.set_par_closure(_task_id, &oc); |
tonyp@2973 | 3768 | } else { |
ysr@777 | 3769 | satb_mq_set.set_closure(&oc); |
tonyp@2973 | 3770 | } |
ysr@777 | 3771 | |
ysr@777 | 3772 | // This keeps claiming and applying the closure to completed buffers |
ysr@777 | 3773 | // until we run out of buffers or we need to abort. |
jmasa@2188 | 3774 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 3775 | while (!has_aborted() && |
ysr@777 | 3776 | satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { |
tonyp@2973 | 3777 | if (_cm->verbose_medium()) { |
ysr@777 | 3778 | gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); |
tonyp@2973 | 3779 | } |
ysr@777 | 3780 | statsOnly( ++_satb_buffers_processed ); |
ysr@777 | 3781 | regular_clock_call(); |
ysr@777 | 3782 | } |
ysr@777 | 3783 | } else { |
ysr@777 | 3784 | while (!has_aborted() && |
ysr@777 | 3785 | satb_mq_set.apply_closure_to_completed_buffer()) { |
tonyp@2973 | 3786 | if (_cm->verbose_medium()) { |
ysr@777 | 3787 | gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); |
tonyp@2973 | 3788 | } |
ysr@777 | 3789 | statsOnly( ++_satb_buffers_processed ); |
ysr@777 | 3790 | regular_clock_call(); |
ysr@777 | 3791 | } |
ysr@777 | 3792 | } |
ysr@777 | 3793 | |
ysr@777 | 3794 | if (!concurrent() && !has_aborted()) { |
ysr@777 | 3795 | // We should only do this during remark. |
tonyp@2973 | 3796 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 3797 | satb_mq_set.par_iterate_closure_all_threads(_task_id); |
tonyp@2973 | 3798 | } else { |
ysr@777 | 3799 | satb_mq_set.iterate_closure_all_threads(); |
tonyp@2973 | 3800 | } |
ysr@777 | 3801 | } |
ysr@777 | 3802 | |
ysr@777 | 3803 | _draining_satb_buffers = false; |
ysr@777 | 3804 | |
tonyp@1458 | 3805 | assert(has_aborted() || |
tonyp@1458 | 3806 | concurrent() || |
tonyp@1458 | 3807 | satb_mq_set.completed_buffers_num() == 0, "invariant"); |
ysr@777 | 3808 | |
tonyp@2973 | 3809 | if (G1CollectedHeap::use_parallel_gc_threads()) { |
ysr@777 | 3810 | satb_mq_set.set_par_closure(_task_id, NULL); |
tonyp@2973 | 3811 | } else { |
ysr@777 | 3812 | satb_mq_set.set_closure(NULL); |
tonyp@2973 | 3813 | } |
ysr@777 | 3814 | |
ysr@777 | 3815 | // again, this was a potentially expensive operation, decrease the |
ysr@777 | 3816 | // limits to get the regular clock call early |
ysr@777 | 3817 | decrease_limits(); |
ysr@777 | 3818 | } |
ysr@777 | 3819 | |
ysr@777 | 3820 | void CMTask::drain_region_stack(BitMapClosure* bc) { |
tonyp@2973 | 3821 | if (has_aborted()) return; |
ysr@777 | 3822 | |
tonyp@1458 | 3823 | assert(_region_finger == NULL, |
tonyp@1458 | 3824 | "it should be NULL when we're not scanning a region"); |
ysr@777 | 3825 | |
johnc@2190 | 3826 | if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) { |
tonyp@2973 | 3827 | if (_cm->verbose_low()) { |
ysr@777 | 3828 | gclog_or_tty->print_cr("[%d] draining region stack, size = %d", |
ysr@777 | 3829 | _task_id, _cm->region_stack_size()); |
tonyp@2973 | 3830 | } |
ysr@777 | 3831 | |
johnc@2190 | 3832 | MemRegion mr; |
johnc@2190 | 3833 | |
johnc@2190 | 3834 | if (!_aborted_region.is_empty()) { |
johnc@2190 | 3835 | mr = _aborted_region; |
johnc@2190 | 3836 | _aborted_region = MemRegion(); |
johnc@2190 | 3837 | |
tonyp@2973 | 3838 | if (_cm->verbose_low()) { |
tonyp@2973 | 3839 | gclog_or_tty->print_cr("[%d] scanning aborted region " |
tonyp@2973 | 3840 | "[ " PTR_FORMAT ", " PTR_FORMAT " )", |
tonyp@2973 | 3841 | _task_id, mr.start(), mr.end()); |
tonyp@2973 | 3842 | } |
johnc@2190 | 3843 | } else { |
johnc@2190 | 3844 | mr = _cm->region_stack_pop_lock_free(); |
johnc@2190 | 3845 | // it returns MemRegion() if the pop fails |
johnc@2190 | 3846 | statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); |
johnc@2190 | 3847 | } |
ysr@777 | 3848 | |
ysr@777 | 3849 | while (mr.start() != NULL) { |
tonyp@2973 | 3850 | if (_cm->verbose_medium()) { |
ysr@777 | 3851 | gclog_or_tty->print_cr("[%d] we are scanning region " |
ysr@777 | 3852 | "["PTR_FORMAT", "PTR_FORMAT")", |
ysr@777 | 3853 | _task_id, mr.start(), mr.end()); |
tonyp@2973 | 3854 | } |
johnc@2190 | 3855 | |
tonyp@1458 | 3856 | assert(mr.end() <= _cm->finger(), |
tonyp@1458 | 3857 | "otherwise the region shouldn't be on the stack"); |
ysr@777 | 3858 | assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); |
ysr@777 | 3859 | if (_nextMarkBitMap->iterate(bc, mr)) { |
tonyp@1458 | 3860 | assert(!has_aborted(), |
tonyp@1458 | 3861 | "cannot abort the task without aborting the bitmap iteration"); |
ysr@777 | 3862 | |
ysr@777 | 3863 | // We finished iterating over the region without aborting. |
ysr@777 | 3864 | regular_clock_call(); |
tonyp@2973 | 3865 | if (has_aborted()) { |
ysr@777 | 3866 | mr = MemRegion(); |
tonyp@2973 | 3867 | } else { |
johnc@2190 | 3868 | mr = _cm->region_stack_pop_lock_free(); |
ysr@777 | 3869 | // it returns MemRegion() if the pop fails |
ysr@777 | 3870 | statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); |
ysr@777 | 3871 | } |
ysr@777 | 3872 | } else { |
tonyp@1458 | 3873 | assert(has_aborted(), "currently the only way to do so"); |
ysr@777 | 3874 | |
ysr@777 | 3875 | // The only way to abort the bitmap iteration is to return |
ysr@777 | 3876 | // false from the do_bit() method. However, inside the |
ysr@777 | 3877 | // do_bit() method we move the _region_finger to point to the |
ysr@777 | 3878 | // object currently being looked at. So, if we bail out, we |
ysr@777 | 3879 | // have definitely set _region_finger to something non-null. |
tonyp@1458 | 3880 | assert(_region_finger != NULL, "invariant"); |
ysr@777 | 3881 | |
johnc@2190 | 3882 | // Make sure that any previously aborted region has been |
johnc@2190 | 3883 | // cleared. |
johnc@2190 | 3884 | assert(_aborted_region.is_empty(), "aborted region not cleared"); |
johnc@2190 | 3885 | |
ysr@777 | 3886 | // The iteration was actually aborted. So now _region_finger |
ysr@777 | 3887 | // points to the address of the object we last scanned. If we |
ysr@777 | 3888 | // leave it there, when we restart this task, we will rescan |
ysr@777 | 3889 | // the object. It is easy to avoid this. We move the finger by |
ysr@777 | 3890 | // enough to point to the next possible object header (the |
ysr@777 | 3891 | // bitmap knows by how much we need to move it as it knows its |
ysr@777 | 3892 | // granularity). |
ysr@777 | 3893 | MemRegion newRegion = |
ysr@777 | 3894 | MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); |
ysr@777 | 3895 | |
ysr@777 | 3896 | if (!newRegion.is_empty()) { |
ysr@777 | 3897 | if (_cm->verbose_low()) { |
johnc@2190 | 3898 | gclog_or_tty->print_cr("[%d] recording unscanned region" |
johnc@2190 | 3899 | "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask", |
ysr@777 | 3900 | _task_id, |
ysr@777 | 3901 | newRegion.start(), newRegion.end()); |
ysr@777 | 3902 | } |
johnc@2190 | 3903 | // Now record the part of the region we didn't scan to |
johnc@2190 | 3904 | // make sure this task scans it later. |
johnc@2190 | 3905 | _aborted_region = newRegion; |
ysr@777 | 3906 | } |
ysr@777 | 3907 | // break from while |
ysr@777 | 3908 | mr = MemRegion(); |
ysr@777 | 3909 | } |
ysr@777 | 3910 | _region_finger = NULL; |
ysr@777 | 3911 | } |
ysr@777 | 3912 | |
tonyp@2973 | 3913 | if (_cm->verbose_low()) { |
ysr@777 | 3914 | gclog_or_tty->print_cr("[%d] drained region stack, size = %d", |
ysr@777 | 3915 | _task_id, _cm->region_stack_size()); |
tonyp@2973 | 3916 | } |
ysr@777 | 3917 | } |
ysr@777 | 3918 | } |
ysr@777 | 3919 | |
ysr@777 | 3920 | void CMTask::print_stats() { |
ysr@777 | 3921 | gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", |
ysr@777 | 3922 | _task_id, _calls); |
ysr@777 | 3923 | gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", |
ysr@777 | 3924 | _elapsed_time_ms, _termination_time_ms); |
ysr@777 | 3925 | gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", |
ysr@777 | 3926 | _step_times_ms.num(), _step_times_ms.avg(), |
ysr@777 | 3927 | _step_times_ms.sd()); |
ysr@777 | 3928 | gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", |
ysr@777 | 3929 | _step_times_ms.maximum(), _step_times_ms.sum()); |
ysr@777 | 3930 | |
ysr@777 | 3931 | #if _MARKING_STATS_ |
ysr@777 | 3932 | gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", |
ysr@777 | 3933 | _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), |
ysr@777 | 3934 | _all_clock_intervals_ms.sd()); |
ysr@777 | 3935 | gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", |
ysr@777 | 3936 | _all_clock_intervals_ms.maximum(), |
ysr@777 | 3937 | _all_clock_intervals_ms.sum()); |
ysr@777 | 3938 | gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", |
ysr@777 | 3939 | _clock_due_to_scanning, _clock_due_to_marking); |
ysr@777 | 3940 | gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", |
ysr@777 | 3941 | _objs_scanned, _objs_found_on_bitmap); |
ysr@777 | 3942 | gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", |
ysr@777 | 3943 | _local_pushes, _local_pops, _local_max_size); |
ysr@777 | 3944 | gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", |
ysr@777 | 3945 | _global_pushes, _global_pops, _global_max_size); |
ysr@777 | 3946 | gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", |
ysr@777 | 3947 | _global_transfers_to,_global_transfers_from); |
ysr@777 | 3948 | gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", |
ysr@777 | 3949 | _regions_claimed, _region_stack_pops); |
ysr@777 | 3950 | gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); |
ysr@777 | 3951 | gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", |
ysr@777 | 3952 | _steal_attempts, _steals); |
ysr@777 | 3953 | gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); |
ysr@777 | 3954 | gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", |
ysr@777 | 3955 | _aborted_overflow, _aborted_cm_aborted, _aborted_yield); |
ysr@777 | 3956 | gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", |
ysr@777 | 3957 | _aborted_timed_out, _aborted_satb, _aborted_termination); |
ysr@777 | 3958 | #endif // _MARKING_STATS_ |
ysr@777 | 3959 | } |
ysr@777 | 3960 | |
ysr@777 | 3961 | /***************************************************************************** |
ysr@777 | 3962 | |
ysr@777 | 3963 | The do_marking_step(time_target_ms) method is the building block |
ysr@777 | 3964 | of the parallel marking framework. It can be called in parallel |
ysr@777 | 3965 | with other invocations of do_marking_step() on different tasks |
ysr@777 | 3966 | (but only one per task, obviously) and concurrently with the |
ysr@777 | 3967 | mutator threads, or during remark, hence it eliminates the need |
ysr@777 | 3968 | for two versions of the code. When called during remark, it will |
ysr@777 | 3969 | pick up from where the task left off during the concurrent marking |
ysr@777 | 3970 | phase. Interestingly, tasks are also claimable during evacuation |
ysr@777 | 3971 | pauses too, since do_marking_step() ensures that it aborts before |
ysr@777 | 3972 | it needs to yield. |
ysr@777 | 3973 | |
ysr@777 | 3974 | The data structures that is uses to do marking work are the |
ysr@777 | 3975 | following: |
ysr@777 | 3976 | |
ysr@777 | 3977 | (1) Marking Bitmap. If there are gray objects that appear only |
ysr@777 | 3978 | on the bitmap (this happens either when dealing with an overflow |
ysr@777 | 3979 | or when the initial marking phase has simply marked the roots |
ysr@777 | 3980 | and didn't push them on the stack), then tasks claim heap |
ysr@777 | 3981 | regions whose bitmap they then scan to find gray objects. A |
ysr@777 | 3982 | global finger indicates where the end of the last claimed region |
ysr@777 | 3983 | is. A local finger indicates how far into the region a task has |
ysr@777 | 3984 | scanned. The two fingers are used to determine how to gray an |
ysr@777 | 3985 | object (i.e. whether simply marking it is OK, as it will be |
ysr@777 | 3986 | visited by a task in the future, or whether it needs to be also |
ysr@777 | 3987 | pushed on a stack). |
ysr@777 | 3988 | |
ysr@777 | 3989 | (2) Local Queue. The local queue of the task which is accessed |
ysr@777 | 3990 | reasonably efficiently by the task. Other tasks can steal from |
ysr@777 | 3991 | it when they run out of work. Throughout the marking phase, a |
ysr@777 | 3992 | task attempts to keep its local queue short but not totally |
ysr@777 | 3993 | empty, so that entries are available for stealing by other |
ysr@777 | 3994 | tasks. Only when there is no more work, a task will totally |
ysr@777 | 3995 | drain its local queue. |
ysr@777 | 3996 | |
ysr@777 | 3997 | (3) Global Mark Stack. This handles local queue overflow. During |
ysr@777 | 3998 | marking only sets of entries are moved between it and the local |
ysr@777 | 3999 | queues, as access to it requires a mutex and more fine-grain |
ysr@777 | 4000 | interaction with it which might cause contention. If it |
ysr@777 | 4001 | overflows, then the marking phase should restart and iterate |
ysr@777 | 4002 | over the bitmap to identify gray objects. Throughout the marking |
ysr@777 | 4003 | phase, tasks attempt to keep the global mark stack at a small |
ysr@777 | 4004 | length but not totally empty, so that entries are available for |
ysr@777 | 4005 | popping by other tasks. Only when there is no more work, tasks |
ysr@777 | 4006 | will totally drain the global mark stack. |
ysr@777 | 4007 | |
ysr@777 | 4008 | (4) Global Region Stack. Entries on it correspond to areas of |
ysr@777 | 4009 | the bitmap that need to be scanned since they contain gray |
ysr@777 | 4010 | objects. Pushes on the region stack only happen during |
ysr@777 | 4011 | evacuation pauses and typically correspond to areas covered by |
ysr@777 | 4012 | GC LABS. If it overflows, then the marking phase should restart |
ysr@777 | 4013 | and iterate over the bitmap to identify gray objects. Tasks will |
ysr@777 | 4014 | try to totally drain the region stack as soon as possible. |
ysr@777 | 4015 | |
ysr@777 | 4016 | (5) SATB Buffer Queue. This is where completed SATB buffers are |
ysr@777 | 4017 | made available. Buffers are regularly removed from this queue |
ysr@777 | 4018 | and scanned for roots, so that the queue doesn't get too |
ysr@777 | 4019 | long. During remark, all completed buffers are processed, as |
ysr@777 | 4020 | well as the filled in parts of any uncompleted buffers. |
ysr@777 | 4021 | |
ysr@777 | 4022 | The do_marking_step() method tries to abort when the time target |
ysr@777 | 4023 | has been reached. There are a few other cases when the |
ysr@777 | 4024 | do_marking_step() method also aborts: |
ysr@777 | 4025 | |
ysr@777 | 4026 | (1) When the marking phase has been aborted (after a Full GC). |
ysr@777 | 4027 | |
ysr@777 | 4028 | (2) When a global overflow (either on the global stack or the |
ysr@777 | 4029 | region stack) has been triggered. Before the task aborts, it |
ysr@777 | 4030 | will actually sync up with the other tasks to ensure that all |
ysr@777 | 4031 | the marking data structures (local queues, stacks, fingers etc.) |
ysr@777 | 4032 | are re-initialised so that when do_marking_step() completes, |
ysr@777 | 4033 | the marking phase can immediately restart. |
ysr@777 | 4034 | |
ysr@777 | 4035 | (3) When enough completed SATB buffers are available. The |
ysr@777 | 4036 | do_marking_step() method only tries to drain SATB buffers right |
ysr@777 | 4037 | at the beginning. So, if enough buffers are available, the |
ysr@777 | 4038 | marking step aborts and the SATB buffers are processed at |
ysr@777 | 4039 | the beginning of the next invocation. |
ysr@777 | 4040 | |
ysr@777 | 4041 | (4) To yield. when we have to yield then we abort and yield |
ysr@777 | 4042 | right at the end of do_marking_step(). This saves us from a lot |
ysr@777 | 4043 | of hassle as, by yielding we might allow a Full GC. If this |
ysr@777 | 4044 | happens then objects will be compacted underneath our feet, the |
ysr@777 | 4045 | heap might shrink, etc. We save checking for this by just |
ysr@777 | 4046 | aborting and doing the yield right at the end. |
ysr@777 | 4047 | |
ysr@777 | 4048 | From the above it follows that the do_marking_step() method should |
ysr@777 | 4049 | be called in a loop (or, otherwise, regularly) until it completes. |
ysr@777 | 4050 | |
ysr@777 | 4051 | If a marking step completes without its has_aborted() flag being |
ysr@777 | 4052 | true, it means it has completed the current marking phase (and |
ysr@777 | 4053 | also all other marking tasks have done so and have all synced up). |
ysr@777 | 4054 | |
ysr@777 | 4055 | A method called regular_clock_call() is invoked "regularly" (in |
ysr@777 | 4056 | sub ms intervals) throughout marking. It is this clock method that |
ysr@777 | 4057 | checks all the abort conditions which were mentioned above and |
ysr@777 | 4058 | decides when the task should abort. A work-based scheme is used to |
ysr@777 | 4059 | trigger this clock method: when the number of object words the |
ysr@777 | 4060 | marking phase has scanned or the number of references the marking |
ysr@777 | 4061 | phase has visited reach a given limit. Additional invocations to |
ysr@777 | 4062 | the method clock have been planted in a few other strategic places |
ysr@777 | 4063 | too. The initial reason for the clock method was to avoid calling |
ysr@777 | 4064 | vtime too regularly, as it is quite expensive. So, once it was in |
ysr@777 | 4065 | place, it was natural to piggy-back all the other conditions on it |
ysr@777 | 4066 | too and not constantly check them throughout the code. |
ysr@777 | 4067 | |
ysr@777 | 4068 | *****************************************************************************/ |
ysr@777 | 4069 | |
johnc@2494 | 4070 | void CMTask::do_marking_step(double time_target_ms, |
johnc@2494 | 4071 | bool do_stealing, |
johnc@2494 | 4072 | bool do_termination) { |
tonyp@1458 | 4073 | assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); |
tonyp@1458 | 4074 | assert(concurrent() == _cm->concurrent(), "they should be the same"); |
tonyp@1458 | 4075 | |
tonyp@1458 | 4076 | assert(concurrent() || _cm->region_stack_empty(), |
tonyp@1458 | 4077 | "the region stack should have been cleared before remark"); |
johnc@2190 | 4078 | assert(concurrent() || !_cm->has_aborted_regions(), |
johnc@2190 | 4079 | "aborted regions should have been cleared before remark"); |
tonyp@1458 | 4080 | assert(_region_finger == NULL, |
tonyp@1458 | 4081 | "this should be non-null only when a region is being scanned"); |
ysr@777 | 4082 | |
ysr@777 | 4083 | G1CollectorPolicy* g1_policy = _g1h->g1_policy(); |
tonyp@1458 | 4084 | assert(_task_queues != NULL, "invariant"); |
tonyp@1458 | 4085 | assert(_task_queue != NULL, "invariant"); |
tonyp@1458 | 4086 | assert(_task_queues->queue(_task_id) == _task_queue, "invariant"); |
tonyp@1458 | 4087 | |
tonyp@1458 | 4088 | assert(!_claimed, |
tonyp@1458 | 4089 | "only one thread should claim this task at any one time"); |
ysr@777 | 4090 | |
ysr@777 | 4091 | // OK, this doesn't safeguard again all possible scenarios, as it is |
ysr@777 | 4092 | // possible for two threads to set the _claimed flag at the same |
ysr@777 | 4093 | // time. But it is only for debugging purposes anyway and it will |
ysr@777 | 4094 | // catch most problems. |
ysr@777 | 4095 | _claimed = true; |
ysr@777 | 4096 | |
ysr@777 | 4097 | _start_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4098 | statsOnly( _interval_start_time_ms = _start_time_ms ); |
ysr@777 | 4099 | |
ysr@777 | 4100 | double diff_prediction_ms = |
ysr@777 | 4101 | g1_policy->get_new_prediction(&_marking_step_diffs_ms); |
ysr@777 | 4102 | _time_target_ms = time_target_ms - diff_prediction_ms; |
ysr@777 | 4103 | |
ysr@777 | 4104 | // set up the variables that are used in the work-based scheme to |
ysr@777 | 4105 | // call the regular clock method |
ysr@777 | 4106 | _words_scanned = 0; |
ysr@777 | 4107 | _refs_reached = 0; |
ysr@777 | 4108 | recalculate_limits(); |
ysr@777 | 4109 | |
ysr@777 | 4110 | // clear all flags |
ysr@777 | 4111 | clear_has_aborted(); |
johnc@2494 | 4112 | _has_timed_out = false; |
ysr@777 | 4113 | _draining_satb_buffers = false; |
ysr@777 | 4114 | |
ysr@777 | 4115 | ++_calls; |
ysr@777 | 4116 | |
tonyp@2973 | 4117 | if (_cm->verbose_low()) { |
ysr@777 | 4118 | gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " |
ysr@777 | 4119 | "target = %1.2lfms >>>>>>>>>>", |
ysr@777 | 4120 | _task_id, _calls, _time_target_ms); |
tonyp@2973 | 4121 | } |
ysr@777 | 4122 | |
ysr@777 | 4123 | // Set up the bitmap and oop closures. Anything that uses them is |
ysr@777 | 4124 | // eventually called from this method, so it is OK to allocate these |
ysr@777 | 4125 | // statically. |
ysr@777 | 4126 | CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); |
tonyp@2968 | 4127 | G1CMOopClosure cm_oop_closure(_g1h, _cm, this); |
tonyp@2968 | 4128 | set_cm_oop_closure(&cm_oop_closure); |
ysr@777 | 4129 | |
ysr@777 | 4130 | if (_cm->has_overflown()) { |
ysr@777 | 4131 | // This can happen if the region stack or the mark stack overflows |
ysr@777 | 4132 | // during a GC pause and this task, after a yield point, |
ysr@777 | 4133 | // restarts. We have to abort as we need to get into the overflow |
ysr@777 | 4134 | // protocol which happens right at the end of this task. |
ysr@777 | 4135 | set_has_aborted(); |
ysr@777 | 4136 | } |
ysr@777 | 4137 | |
ysr@777 | 4138 | // First drain any available SATB buffers. After this, we will not |
ysr@777 | 4139 | // look at SATB buffers before the next invocation of this method. |
ysr@777 | 4140 | // If enough completed SATB buffers are queued up, the regular clock |
ysr@777 | 4141 | // will abort this task so that it restarts. |
ysr@777 | 4142 | drain_satb_buffers(); |
ysr@777 | 4143 | // ...then partially drain the local queue and the global stack |
ysr@777 | 4144 | drain_local_queue(true); |
ysr@777 | 4145 | drain_global_stack(true); |
ysr@777 | 4146 | |
ysr@777 | 4147 | // Then totally drain the region stack. We will not look at |
ysr@777 | 4148 | // it again before the next invocation of this method. Entries on |
ysr@777 | 4149 | // the region stack are only added during evacuation pauses, for |
ysr@777 | 4150 | // which we have to yield. When we do, we abort the task anyway so |
ysr@777 | 4151 | // it will look at the region stack again when it restarts. |
ysr@777 | 4152 | bitmap_closure.set_scanning_heap_region(false); |
ysr@777 | 4153 | drain_region_stack(&bitmap_closure); |
ysr@777 | 4154 | // ...then partially drain the local queue and the global stack |
ysr@777 | 4155 | drain_local_queue(true); |
ysr@777 | 4156 | drain_global_stack(true); |
ysr@777 | 4157 | |
ysr@777 | 4158 | do { |
ysr@777 | 4159 | if (!has_aborted() && _curr_region != NULL) { |
ysr@777 | 4160 | // This means that we're already holding on to a region. |
tonyp@1458 | 4161 | assert(_finger != NULL, "if region is not NULL, then the finger " |
tonyp@1458 | 4162 | "should not be NULL either"); |
ysr@777 | 4163 | |
ysr@777 | 4164 | // We might have restarted this task after an evacuation pause |
ysr@777 | 4165 | // which might have evacuated the region we're holding on to |
ysr@777 | 4166 | // underneath our feet. Let's read its limit again to make sure |
ysr@777 | 4167 | // that we do not iterate over a region of the heap that |
ysr@777 | 4168 | // contains garbage (update_region_limit() will also move |
ysr@777 | 4169 | // _finger to the start of the region if it is found empty). |
ysr@777 | 4170 | update_region_limit(); |
ysr@777 | 4171 | // We will start from _finger not from the start of the region, |
ysr@777 | 4172 | // as we might be restarting this task after aborting half-way |
ysr@777 | 4173 | // through scanning this region. In this case, _finger points to |
ysr@777 | 4174 | // the address where we last found a marked object. If this is a |
ysr@777 | 4175 | // fresh region, _finger points to start(). |
ysr@777 | 4176 | MemRegion mr = MemRegion(_finger, _region_limit); |
ysr@777 | 4177 | |
tonyp@2973 | 4178 | if (_cm->verbose_low()) { |
ysr@777 | 4179 | gclog_or_tty->print_cr("[%d] we're scanning part " |
ysr@777 | 4180 | "["PTR_FORMAT", "PTR_FORMAT") " |
ysr@777 | 4181 | "of region "PTR_FORMAT, |
ysr@777 | 4182 | _task_id, _finger, _region_limit, _curr_region); |
tonyp@2973 | 4183 | } |
ysr@777 | 4184 | |
ysr@777 | 4185 | // Let's iterate over the bitmap of the part of the |
ysr@777 | 4186 | // region that is left. |
ysr@777 | 4187 | bitmap_closure.set_scanning_heap_region(true); |
ysr@777 | 4188 | if (mr.is_empty() || |
ysr@777 | 4189 | _nextMarkBitMap->iterate(&bitmap_closure, mr)) { |
ysr@777 | 4190 | // We successfully completed iterating over the region. Now, |
ysr@777 | 4191 | // let's give up the region. |
ysr@777 | 4192 | giveup_current_region(); |
ysr@777 | 4193 | regular_clock_call(); |
ysr@777 | 4194 | } else { |
tonyp@1458 | 4195 | assert(has_aborted(), "currently the only way to do so"); |
ysr@777 | 4196 | // The only way to abort the bitmap iteration is to return |
ysr@777 | 4197 | // false from the do_bit() method. However, inside the |
ysr@777 | 4198 | // do_bit() method we move the _finger to point to the |
ysr@777 | 4199 | // object currently being looked at. So, if we bail out, we |
ysr@777 | 4200 | // have definitely set _finger to something non-null. |
tonyp@1458 | 4201 | assert(_finger != NULL, "invariant"); |
ysr@777 | 4202 | |
ysr@777 | 4203 | // Region iteration was actually aborted. So now _finger |
ysr@777 | 4204 | // points to the address of the object we last scanned. If we |
ysr@777 | 4205 | // leave it there, when we restart this task, we will rescan |
ysr@777 | 4206 | // the object. It is easy to avoid this. We move the finger by |
ysr@777 | 4207 | // enough to point to the next possible object header (the |
ysr@777 | 4208 | // bitmap knows by how much we need to move it as it knows its |
ysr@777 | 4209 | // granularity). |
apetrusenko@1749 | 4210 | assert(_finger < _region_limit, "invariant"); |
apetrusenko@1749 | 4211 | HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); |
apetrusenko@1749 | 4212 | // Check if bitmap iteration was aborted while scanning the last object |
apetrusenko@1749 | 4213 | if (new_finger >= _region_limit) { |
apetrusenko@1749 | 4214 | giveup_current_region(); |
apetrusenko@1749 | 4215 | } else { |
apetrusenko@1749 | 4216 | move_finger_to(new_finger); |
apetrusenko@1749 | 4217 | } |
ysr@777 | 4218 | } |
ysr@777 | 4219 | } |
ysr@777 | 4220 | // At this point we have either completed iterating over the |
ysr@777 | 4221 | // region we were holding on to, or we have aborted. |
ysr@777 | 4222 | |
ysr@777 | 4223 | // We then partially drain the local queue and the global stack. |
ysr@777 | 4224 | // (Do we really need this?) |
ysr@777 | 4225 | drain_local_queue(true); |
ysr@777 | 4226 | drain_global_stack(true); |
ysr@777 | 4227 | |
ysr@777 | 4228 | // Read the note on the claim_region() method on why it might |
ysr@777 | 4229 | // return NULL with potentially more regions available for |
ysr@777 | 4230 | // claiming and why we have to check out_of_regions() to determine |
ysr@777 | 4231 | // whether we're done or not. |
ysr@777 | 4232 | while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { |
ysr@777 | 4233 | // We are going to try to claim a new region. We should have |
ysr@777 | 4234 | // given up on the previous one. |
tonyp@1458 | 4235 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 4236 | assert(_curr_region == NULL, "invariant"); |
tonyp@1458 | 4237 | assert(_finger == NULL, "invariant"); |
tonyp@1458 | 4238 | assert(_region_limit == NULL, "invariant"); |
tonyp@2973 | 4239 | if (_cm->verbose_low()) { |
ysr@777 | 4240 | gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); |
tonyp@2973 | 4241 | } |
ysr@777 | 4242 | HeapRegion* claimed_region = _cm->claim_region(_task_id); |
ysr@777 | 4243 | if (claimed_region != NULL) { |
ysr@777 | 4244 | // Yes, we managed to claim one |
ysr@777 | 4245 | statsOnly( ++_regions_claimed ); |
ysr@777 | 4246 | |
tonyp@2973 | 4247 | if (_cm->verbose_low()) { |
ysr@777 | 4248 | gclog_or_tty->print_cr("[%d] we successfully claimed " |
ysr@777 | 4249 | "region "PTR_FORMAT, |
ysr@777 | 4250 | _task_id, claimed_region); |
tonyp@2973 | 4251 | } |
ysr@777 | 4252 | |
ysr@777 | 4253 | setup_for_region(claimed_region); |
tonyp@1458 | 4254 | assert(_curr_region == claimed_region, "invariant"); |
ysr@777 | 4255 | } |
ysr@777 | 4256 | // It is important to call the regular clock here. It might take |
ysr@777 | 4257 | // a while to claim a region if, for example, we hit a large |
ysr@777 | 4258 | // block of empty regions. So we need to call the regular clock |
ysr@777 | 4259 | // method once round the loop to make sure it's called |
ysr@777 | 4260 | // frequently enough. |
ysr@777 | 4261 | regular_clock_call(); |
ysr@777 | 4262 | } |
ysr@777 | 4263 | |
ysr@777 | 4264 | if (!has_aborted() && _curr_region == NULL) { |
tonyp@1458 | 4265 | assert(_cm->out_of_regions(), |
tonyp@1458 | 4266 | "at this point we should be out of regions"); |
ysr@777 | 4267 | } |
ysr@777 | 4268 | } while ( _curr_region != NULL && !has_aborted()); |
ysr@777 | 4269 | |
ysr@777 | 4270 | if (!has_aborted()) { |
ysr@777 | 4271 | // We cannot check whether the global stack is empty, since other |
iveresov@778 | 4272 | // tasks might be pushing objects to it concurrently. We also cannot |
iveresov@778 | 4273 | // check if the region stack is empty because if a thread is aborting |
iveresov@778 | 4274 | // it can push a partially done region back. |
tonyp@1458 | 4275 | assert(_cm->out_of_regions(), |
tonyp@1458 | 4276 | "at this point we should be out of regions"); |
ysr@777 | 4277 | |
tonyp@2973 | 4278 | if (_cm->verbose_low()) { |
ysr@777 | 4279 | gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); |
tonyp@2973 | 4280 | } |
ysr@777 | 4281 | |
ysr@777 | 4282 | // Try to reduce the number of available SATB buffers so that |
ysr@777 | 4283 | // remark has less work to do. |
ysr@777 | 4284 | drain_satb_buffers(); |
ysr@777 | 4285 | } |
ysr@777 | 4286 | |
ysr@777 | 4287 | // Since we've done everything else, we can now totally drain the |
ysr@777 | 4288 | // local queue and global stack. |
ysr@777 | 4289 | drain_local_queue(false); |
ysr@777 | 4290 | drain_global_stack(false); |
ysr@777 | 4291 | |
ysr@777 | 4292 | // Attempt at work stealing from other task's queues. |
johnc@2494 | 4293 | if (do_stealing && !has_aborted()) { |
ysr@777 | 4294 | // We have not aborted. This means that we have finished all that |
ysr@777 | 4295 | // we could. Let's try to do some stealing... |
ysr@777 | 4296 | |
ysr@777 | 4297 | // We cannot check whether the global stack is empty, since other |
iveresov@778 | 4298 | // tasks might be pushing objects to it concurrently. We also cannot |
iveresov@778 | 4299 | // check if the region stack is empty because if a thread is aborting |
iveresov@778 | 4300 | // it can push a partially done region back. |
tonyp@1458 | 4301 | assert(_cm->out_of_regions() && _task_queue->size() == 0, |
tonyp@1458 | 4302 | "only way to reach here"); |
ysr@777 | 4303 | |
tonyp@2973 | 4304 | if (_cm->verbose_low()) { |
ysr@777 | 4305 | gclog_or_tty->print_cr("[%d] starting to steal", _task_id); |
tonyp@2973 | 4306 | } |
ysr@777 | 4307 | |
ysr@777 | 4308 | while (!has_aborted()) { |
ysr@777 | 4309 | oop obj; |
ysr@777 | 4310 | statsOnly( ++_steal_attempts ); |
ysr@777 | 4311 | |
ysr@777 | 4312 | if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { |
tonyp@2973 | 4313 | if (_cm->verbose_medium()) { |
ysr@777 | 4314 | gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", |
ysr@777 | 4315 | _task_id, (void*) obj); |
tonyp@2973 | 4316 | } |
ysr@777 | 4317 | |
ysr@777 | 4318 | statsOnly( ++_steals ); |
ysr@777 | 4319 | |
tonyp@1458 | 4320 | assert(_nextMarkBitMap->isMarked((HeapWord*) obj), |
tonyp@1458 | 4321 | "any stolen object should be marked"); |
ysr@777 | 4322 | scan_object(obj); |
ysr@777 | 4323 | |
ysr@777 | 4324 | // And since we're towards the end, let's totally drain the |
ysr@777 | 4325 | // local queue and global stack. |
ysr@777 | 4326 | drain_local_queue(false); |
ysr@777 | 4327 | drain_global_stack(false); |
ysr@777 | 4328 | } else { |
ysr@777 | 4329 | break; |
ysr@777 | 4330 | } |
ysr@777 | 4331 | } |
ysr@777 | 4332 | } |
ysr@777 | 4333 | |
tonyp@2848 | 4334 | // If we are about to wrap up and go into termination, check if we |
tonyp@2848 | 4335 | // should raise the overflow flag. |
tonyp@2848 | 4336 | if (do_termination && !has_aborted()) { |
tonyp@2848 | 4337 | if (_cm->force_overflow()->should_force()) { |
tonyp@2848 | 4338 | _cm->set_has_overflown(); |
tonyp@2848 | 4339 | regular_clock_call(); |
tonyp@2848 | 4340 | } |
tonyp@2848 | 4341 | } |
tonyp@2848 | 4342 | |
ysr@777 | 4343 | // We still haven't aborted. Now, let's try to get into the |
ysr@777 | 4344 | // termination protocol. |
johnc@2494 | 4345 | if (do_termination && !has_aborted()) { |
ysr@777 | 4346 | // We cannot check whether the global stack is empty, since other |
iveresov@778 | 4347 | // tasks might be concurrently pushing objects on it. We also cannot |
iveresov@778 | 4348 | // check if the region stack is empty because if a thread is aborting |
iveresov@778 | 4349 | // it can push a partially done region back. |
tonyp@1458 | 4350 | // Separated the asserts so that we know which one fires. |
tonyp@1458 | 4351 | assert(_cm->out_of_regions(), "only way to reach here"); |
tonyp@1458 | 4352 | assert(_task_queue->size() == 0, "only way to reach here"); |
ysr@777 | 4353 | |
tonyp@2973 | 4354 | if (_cm->verbose_low()) { |
ysr@777 | 4355 | gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); |
tonyp@2973 | 4356 | } |
ysr@777 | 4357 | |
ysr@777 | 4358 | _termination_start_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4359 | // The CMTask class also extends the TerminatorTerminator class, |
ysr@777 | 4360 | // hence its should_exit_termination() method will also decide |
ysr@777 | 4361 | // whether to exit the termination protocol or not. |
ysr@777 | 4362 | bool finished = _cm->terminator()->offer_termination(this); |
ysr@777 | 4363 | double termination_end_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4364 | _termination_time_ms += |
ysr@777 | 4365 | termination_end_time_ms - _termination_start_time_ms; |
ysr@777 | 4366 | |
ysr@777 | 4367 | if (finished) { |
ysr@777 | 4368 | // We're all done. |
ysr@777 | 4369 | |
ysr@777 | 4370 | if (_task_id == 0) { |
ysr@777 | 4371 | // let's allow task 0 to do this |
ysr@777 | 4372 | if (concurrent()) { |
tonyp@1458 | 4373 | assert(_cm->concurrent_marking_in_progress(), "invariant"); |
ysr@777 | 4374 | // we need to set this to false before the next |
ysr@777 | 4375 | // safepoint. This way we ensure that the marking phase |
ysr@777 | 4376 | // doesn't observe any more heap expansions. |
ysr@777 | 4377 | _cm->clear_concurrent_marking_in_progress(); |
ysr@777 | 4378 | } |
ysr@777 | 4379 | } |
ysr@777 | 4380 | |
ysr@777 | 4381 | // We can now guarantee that the global stack is empty, since |
tonyp@1458 | 4382 | // all other tasks have finished. We separated the guarantees so |
tonyp@1458 | 4383 | // that, if a condition is false, we can immediately find out |
tonyp@1458 | 4384 | // which one. |
tonyp@1458 | 4385 | guarantee(_cm->out_of_regions(), "only way to reach here"); |
johnc@2190 | 4386 | guarantee(_aborted_region.is_empty(), "only way to reach here"); |
tonyp@1458 | 4387 | guarantee(_cm->region_stack_empty(), "only way to reach here"); |
tonyp@1458 | 4388 | guarantee(_cm->mark_stack_empty(), "only way to reach here"); |
tonyp@1458 | 4389 | guarantee(_task_queue->size() == 0, "only way to reach here"); |
tonyp@1458 | 4390 | guarantee(!_cm->has_overflown(), "only way to reach here"); |
tonyp@1458 | 4391 | guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); |
tonyp@1458 | 4392 | guarantee(!_cm->region_stack_overflow(), "only way to reach here"); |
ysr@777 | 4393 | |
tonyp@2973 | 4394 | if (_cm->verbose_low()) { |
ysr@777 | 4395 | gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); |
tonyp@2973 | 4396 | } |
ysr@777 | 4397 | } else { |
ysr@777 | 4398 | // Apparently there's more work to do. Let's abort this task. It |
ysr@777 | 4399 | // will restart it and we can hopefully find more things to do. |
ysr@777 | 4400 | |
tonyp@2973 | 4401 | if (_cm->verbose_low()) { |
tonyp@2973 | 4402 | gclog_or_tty->print_cr("[%d] apparently there is more work to do", |
tonyp@2973 | 4403 | _task_id); |
tonyp@2973 | 4404 | } |
ysr@777 | 4405 | |
ysr@777 | 4406 | set_has_aborted(); |
ysr@777 | 4407 | statsOnly( ++_aborted_termination ); |
ysr@777 | 4408 | } |
ysr@777 | 4409 | } |
ysr@777 | 4410 | |
ysr@777 | 4411 | // Mainly for debugging purposes to make sure that a pointer to the |
ysr@777 | 4412 | // closure which was statically allocated in this frame doesn't |
ysr@777 | 4413 | // escape it by accident. |
tonyp@2968 | 4414 | set_cm_oop_closure(NULL); |
ysr@777 | 4415 | double end_time_ms = os::elapsedVTime() * 1000.0; |
ysr@777 | 4416 | double elapsed_time_ms = end_time_ms - _start_time_ms; |
ysr@777 | 4417 | // Update the step history. |
ysr@777 | 4418 | _step_times_ms.add(elapsed_time_ms); |
ysr@777 | 4419 | |
ysr@777 | 4420 | if (has_aborted()) { |
ysr@777 | 4421 | // The task was aborted for some reason. |
ysr@777 | 4422 | |
ysr@777 | 4423 | statsOnly( ++_aborted ); |
ysr@777 | 4424 | |
johnc@2494 | 4425 | if (_has_timed_out) { |
ysr@777 | 4426 | double diff_ms = elapsed_time_ms - _time_target_ms; |
ysr@777 | 4427 | // Keep statistics of how well we did with respect to hitting |
ysr@777 | 4428 | // our target only if we actually timed out (if we aborted for |
ysr@777 | 4429 | // other reasons, then the results might get skewed). |
ysr@777 | 4430 | _marking_step_diffs_ms.add(diff_ms); |
ysr@777 | 4431 | } |
ysr@777 | 4432 | |
ysr@777 | 4433 | if (_cm->has_overflown()) { |
ysr@777 | 4434 | // This is the interesting one. We aborted because a global |
ysr@777 | 4435 | // overflow was raised. This means we have to restart the |
ysr@777 | 4436 | // marking phase and start iterating over regions. However, in |
ysr@777 | 4437 | // order to do this we have to make sure that all tasks stop |
ysr@777 | 4438 | // what they are doing and re-initialise in a safe manner. We |
ysr@777 | 4439 | // will achieve this with the use of two barrier sync points. |
ysr@777 | 4440 | |
tonyp@2973 | 4441 | if (_cm->verbose_low()) { |
ysr@777 | 4442 | gclog_or_tty->print_cr("[%d] detected overflow", _task_id); |
tonyp@2973 | 4443 | } |
ysr@777 | 4444 | |
ysr@777 | 4445 | _cm->enter_first_sync_barrier(_task_id); |
ysr@777 | 4446 | // When we exit this sync barrier we know that all tasks have |
ysr@777 | 4447 | // stopped doing marking work. So, it's now safe to |
ysr@777 | 4448 | // re-initialise our data structures. At the end of this method, |
ysr@777 | 4449 | // task 0 will clear the global data structures. |
ysr@777 | 4450 | |
ysr@777 | 4451 | statsOnly( ++_aborted_overflow ); |
ysr@777 | 4452 | |
ysr@777 | 4453 | // We clear the local state of this task... |
ysr@777 | 4454 | clear_region_fields(); |
ysr@777 | 4455 | |
ysr@777 | 4456 | // ...and enter the second barrier. |
ysr@777 | 4457 | _cm->enter_second_sync_barrier(_task_id); |
ysr@777 | 4458 | // At this point everything has bee re-initialised and we're |
ysr@777 | 4459 | // ready to restart. |
ysr@777 | 4460 | } |
ysr@777 | 4461 | |
ysr@777 | 4462 | if (_cm->verbose_low()) { |
ysr@777 | 4463 | gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " |
ysr@777 | 4464 | "elapsed = %1.2lfms <<<<<<<<<<", |
ysr@777 | 4465 | _task_id, _time_target_ms, elapsed_time_ms); |
tonyp@2973 | 4466 | if (_cm->has_aborted()) { |
ysr@777 | 4467 | gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", |
ysr@777 | 4468 | _task_id); |
tonyp@2973 | 4469 | } |
ysr@777 | 4470 | } |
ysr@777 | 4471 | } else { |
tonyp@2973 | 4472 | if (_cm->verbose_low()) { |
ysr@777 | 4473 | gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " |
ysr@777 | 4474 | "elapsed = %1.2lfms <<<<<<<<<<", |
ysr@777 | 4475 | _task_id, _time_target_ms, elapsed_time_ms); |
tonyp@2973 | 4476 | } |
ysr@777 | 4477 | } |
ysr@777 | 4478 | |
ysr@777 | 4479 | _claimed = false; |
ysr@777 | 4480 | } |
ysr@777 | 4481 | |
ysr@777 | 4482 | CMTask::CMTask(int task_id, |
ysr@777 | 4483 | ConcurrentMark* cm, |
ysr@777 | 4484 | CMTaskQueue* task_queue, |
ysr@777 | 4485 | CMTaskQueueSet* task_queues) |
ysr@777 | 4486 | : _g1h(G1CollectedHeap::heap()), |
ysr@777 | 4487 | _task_id(task_id), _cm(cm), |
ysr@777 | 4488 | _claimed(false), |
ysr@777 | 4489 | _nextMarkBitMap(NULL), _hash_seed(17), |
ysr@777 | 4490 | _task_queue(task_queue), |
ysr@777 | 4491 | _task_queues(task_queues), |
tonyp@2968 | 4492 | _cm_oop_closure(NULL), |
johnc@2190 | 4493 | _aborted_region(MemRegion()) { |
tonyp@1458 | 4494 | guarantee(task_queue != NULL, "invariant"); |
tonyp@1458 | 4495 | guarantee(task_queues != NULL, "invariant"); |
ysr@777 | 4496 | |
ysr@777 | 4497 | statsOnly( _clock_due_to_scanning = 0; |
ysr@777 | 4498 | _clock_due_to_marking = 0 ); |
ysr@777 | 4499 | |
ysr@777 | 4500 | _marking_step_diffs_ms.add(0.5); |
ysr@777 | 4501 | } |
tonyp@2717 | 4502 | |
tonyp@2717 | 4503 | // These are formatting macros that are used below to ensure |
tonyp@2717 | 4504 | // consistent formatting. The *_H_* versions are used to format the |
tonyp@2717 | 4505 | // header for a particular value and they should be kept consistent |
tonyp@2717 | 4506 | // with the corresponding macro. Also note that most of the macros add |
tonyp@2717 | 4507 | // the necessary white space (as a prefix) which makes them a bit |
tonyp@2717 | 4508 | // easier to compose. |
tonyp@2717 | 4509 | |
tonyp@2717 | 4510 | // All the output lines are prefixed with this string to be able to |
tonyp@2717 | 4511 | // identify them easily in a large log file. |
tonyp@2717 | 4512 | #define G1PPRL_LINE_PREFIX "###" |
tonyp@2717 | 4513 | |
tonyp@2717 | 4514 | #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT |
tonyp@2717 | 4515 | #ifdef _LP64 |
tonyp@2717 | 4516 | #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" |
tonyp@2717 | 4517 | #else // _LP64 |
tonyp@2717 | 4518 | #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" |
tonyp@2717 | 4519 | #endif // _LP64 |
tonyp@2717 | 4520 | |
tonyp@2717 | 4521 | // For per-region info |
tonyp@2717 | 4522 | #define G1PPRL_TYPE_FORMAT " %-4s" |
tonyp@2717 | 4523 | #define G1PPRL_TYPE_H_FORMAT " %4s" |
tonyp@2717 | 4524 | #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) |
tonyp@2717 | 4525 | #define G1PPRL_BYTE_H_FORMAT " %9s" |
tonyp@2717 | 4526 | #define G1PPRL_DOUBLE_FORMAT " %14.1f" |
tonyp@2717 | 4527 | #define G1PPRL_DOUBLE_H_FORMAT " %14s" |
tonyp@2717 | 4528 | |
tonyp@2717 | 4529 | // For summary info |
tonyp@2717 | 4530 | #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT |
tonyp@2717 | 4531 | #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT |
tonyp@2717 | 4532 | #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" |
tonyp@2717 | 4533 | #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" |
tonyp@2717 | 4534 | |
tonyp@2717 | 4535 | G1PrintRegionLivenessInfoClosure:: |
tonyp@2717 | 4536 | G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) |
tonyp@2717 | 4537 | : _out(out), |
tonyp@2717 | 4538 | _total_used_bytes(0), _total_capacity_bytes(0), |
tonyp@2717 | 4539 | _total_prev_live_bytes(0), _total_next_live_bytes(0), |
tonyp@2717 | 4540 | _hum_used_bytes(0), _hum_capacity_bytes(0), |
tonyp@2717 | 4541 | _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { |
tonyp@2717 | 4542 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2717 | 4543 | MemRegion g1_committed = g1h->g1_committed(); |
tonyp@2717 | 4544 | MemRegion g1_reserved = g1h->g1_reserved(); |
tonyp@2717 | 4545 | double now = os::elapsedTime(); |
tonyp@2717 | 4546 | |
tonyp@2717 | 4547 | // Print the header of the output. |
tonyp@2717 | 4548 | _out->cr(); |
tonyp@2717 | 4549 | _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); |
tonyp@2717 | 4550 | _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" |
tonyp@2717 | 4551 | G1PPRL_SUM_ADDR_FORMAT("committed") |
tonyp@2717 | 4552 | G1PPRL_SUM_ADDR_FORMAT("reserved") |
tonyp@2717 | 4553 | G1PPRL_SUM_BYTE_FORMAT("region-size"), |
tonyp@2717 | 4554 | g1_committed.start(), g1_committed.end(), |
tonyp@2717 | 4555 | g1_reserved.start(), g1_reserved.end(), |
tonyp@2717 | 4556 | HeapRegion::GrainBytes); |
tonyp@2717 | 4557 | _out->print_cr(G1PPRL_LINE_PREFIX); |
tonyp@2717 | 4558 | _out->print_cr(G1PPRL_LINE_PREFIX |
tonyp@2717 | 4559 | G1PPRL_TYPE_H_FORMAT |
tonyp@2717 | 4560 | G1PPRL_ADDR_BASE_H_FORMAT |
tonyp@2717 | 4561 | G1PPRL_BYTE_H_FORMAT |
tonyp@2717 | 4562 | G1PPRL_BYTE_H_FORMAT |
tonyp@2717 | 4563 | G1PPRL_BYTE_H_FORMAT |
tonyp@2717 | 4564 | G1PPRL_DOUBLE_H_FORMAT, |
tonyp@2717 | 4565 | "type", "address-range", |
tonyp@2717 | 4566 | "used", "prev-live", "next-live", "gc-eff"); |
tonyp@2717 | 4567 | } |
tonyp@2717 | 4568 | |
tonyp@2717 | 4569 | // It takes as a parameter a reference to one of the _hum_* fields, it |
tonyp@2717 | 4570 | // deduces the corresponding value for a region in a humongous region |
tonyp@2717 | 4571 | // series (either the region size, or what's left if the _hum_* field |
tonyp@2717 | 4572 | // is < the region size), and updates the _hum_* field accordingly. |
tonyp@2717 | 4573 | size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { |
tonyp@2717 | 4574 | size_t bytes = 0; |
tonyp@2717 | 4575 | // The > 0 check is to deal with the prev and next live bytes which |
tonyp@2717 | 4576 | // could be 0. |
tonyp@2717 | 4577 | if (*hum_bytes > 0) { |
tonyp@2717 | 4578 | bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes); |
tonyp@2717 | 4579 | *hum_bytes -= bytes; |
tonyp@2717 | 4580 | } |
tonyp@2717 | 4581 | return bytes; |
tonyp@2717 | 4582 | } |
tonyp@2717 | 4583 | |
tonyp@2717 | 4584 | // It deduces the values for a region in a humongous region series |
tonyp@2717 | 4585 | // from the _hum_* fields and updates those accordingly. It assumes |
tonyp@2717 | 4586 | // that that _hum_* fields have already been set up from the "starts |
tonyp@2717 | 4587 | // humongous" region and we visit the regions in address order. |
tonyp@2717 | 4588 | void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, |
tonyp@2717 | 4589 | size_t* capacity_bytes, |
tonyp@2717 | 4590 | size_t* prev_live_bytes, |
tonyp@2717 | 4591 | size_t* next_live_bytes) { |
tonyp@2717 | 4592 | assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); |
tonyp@2717 | 4593 | *used_bytes = get_hum_bytes(&_hum_used_bytes); |
tonyp@2717 | 4594 | *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); |
tonyp@2717 | 4595 | *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); |
tonyp@2717 | 4596 | *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); |
tonyp@2717 | 4597 | } |
tonyp@2717 | 4598 | |
tonyp@2717 | 4599 | bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { |
tonyp@2717 | 4600 | const char* type = ""; |
tonyp@2717 | 4601 | HeapWord* bottom = r->bottom(); |
tonyp@2717 | 4602 | HeapWord* end = r->end(); |
tonyp@2717 | 4603 | size_t capacity_bytes = r->capacity(); |
tonyp@2717 | 4604 | size_t used_bytes = r->used(); |
tonyp@2717 | 4605 | size_t prev_live_bytes = r->live_bytes(); |
tonyp@2717 | 4606 | size_t next_live_bytes = r->next_live_bytes(); |
tonyp@2717 | 4607 | double gc_eff = r->gc_efficiency(); |
tonyp@2717 | 4608 | if (r->used() == 0) { |
tonyp@2717 | 4609 | type = "FREE"; |
tonyp@2717 | 4610 | } else if (r->is_survivor()) { |
tonyp@2717 | 4611 | type = "SURV"; |
tonyp@2717 | 4612 | } else if (r->is_young()) { |
tonyp@2717 | 4613 | type = "EDEN"; |
tonyp@2717 | 4614 | } else if (r->startsHumongous()) { |
tonyp@2717 | 4615 | type = "HUMS"; |
tonyp@2717 | 4616 | |
tonyp@2717 | 4617 | assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && |
tonyp@2717 | 4618 | _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, |
tonyp@2717 | 4619 | "they should have been zeroed after the last time we used them"); |
tonyp@2717 | 4620 | // Set up the _hum_* fields. |
tonyp@2717 | 4621 | _hum_capacity_bytes = capacity_bytes; |
tonyp@2717 | 4622 | _hum_used_bytes = used_bytes; |
tonyp@2717 | 4623 | _hum_prev_live_bytes = prev_live_bytes; |
tonyp@2717 | 4624 | _hum_next_live_bytes = next_live_bytes; |
tonyp@2717 | 4625 | get_hum_bytes(&used_bytes, &capacity_bytes, |
tonyp@2717 | 4626 | &prev_live_bytes, &next_live_bytes); |
tonyp@2717 | 4627 | end = bottom + HeapRegion::GrainWords; |
tonyp@2717 | 4628 | } else if (r->continuesHumongous()) { |
tonyp@2717 | 4629 | type = "HUMC"; |
tonyp@2717 | 4630 | get_hum_bytes(&used_bytes, &capacity_bytes, |
tonyp@2717 | 4631 | &prev_live_bytes, &next_live_bytes); |
tonyp@2717 | 4632 | assert(end == bottom + HeapRegion::GrainWords, "invariant"); |
tonyp@2717 | 4633 | } else { |
tonyp@2717 | 4634 | type = "OLD"; |
tonyp@2717 | 4635 | } |
tonyp@2717 | 4636 | |
tonyp@2717 | 4637 | _total_used_bytes += used_bytes; |
tonyp@2717 | 4638 | _total_capacity_bytes += capacity_bytes; |
tonyp@2717 | 4639 | _total_prev_live_bytes += prev_live_bytes; |
tonyp@2717 | 4640 | _total_next_live_bytes += next_live_bytes; |
tonyp@2717 | 4641 | |
tonyp@2717 | 4642 | // Print a line for this particular region. |
tonyp@2717 | 4643 | _out->print_cr(G1PPRL_LINE_PREFIX |
tonyp@2717 | 4644 | G1PPRL_TYPE_FORMAT |
tonyp@2717 | 4645 | G1PPRL_ADDR_BASE_FORMAT |
tonyp@2717 | 4646 | G1PPRL_BYTE_FORMAT |
tonyp@2717 | 4647 | G1PPRL_BYTE_FORMAT |
tonyp@2717 | 4648 | G1PPRL_BYTE_FORMAT |
tonyp@2717 | 4649 | G1PPRL_DOUBLE_FORMAT, |
tonyp@2717 | 4650 | type, bottom, end, |
tonyp@2717 | 4651 | used_bytes, prev_live_bytes, next_live_bytes, gc_eff); |
tonyp@2717 | 4652 | |
tonyp@2717 | 4653 | return false; |
tonyp@2717 | 4654 | } |
tonyp@2717 | 4655 | |
tonyp@2717 | 4656 | G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { |
tonyp@2717 | 4657 | // Print the footer of the output. |
tonyp@2717 | 4658 | _out->print_cr(G1PPRL_LINE_PREFIX); |
tonyp@2717 | 4659 | _out->print_cr(G1PPRL_LINE_PREFIX |
tonyp@2717 | 4660 | " SUMMARY" |
tonyp@2717 | 4661 | G1PPRL_SUM_MB_FORMAT("capacity") |
tonyp@2717 | 4662 | G1PPRL_SUM_MB_PERC_FORMAT("used") |
tonyp@2717 | 4663 | G1PPRL_SUM_MB_PERC_FORMAT("prev-live") |
tonyp@2717 | 4664 | G1PPRL_SUM_MB_PERC_FORMAT("next-live"), |
tonyp@2717 | 4665 | bytes_to_mb(_total_capacity_bytes), |
tonyp@2717 | 4666 | bytes_to_mb(_total_used_bytes), |
tonyp@2717 | 4667 | perc(_total_used_bytes, _total_capacity_bytes), |
tonyp@2717 | 4668 | bytes_to_mb(_total_prev_live_bytes), |
tonyp@2717 | 4669 | perc(_total_prev_live_bytes, _total_capacity_bytes), |
tonyp@2717 | 4670 | bytes_to_mb(_total_next_live_bytes), |
tonyp@2717 | 4671 | perc(_total_next_live_bytes, _total_capacity_bytes)); |
tonyp@2717 | 4672 | _out->cr(); |
tonyp@2717 | 4673 | } |