Tue, 19 May 2015 15:49:27 +0200
8061715: gc/g1/TestShrinkAuxiliaryData15.java fails with java.lang.RuntimeException: heap decommit failed - after > before
Summary: added WhiteBox methods to count regions and exact aux data sizes
Reviewed-by: jwilhelm, brutisso
ysr@777 | 1 | /* |
mlarsson@7686 | 2 | * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMark.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/g1CollectedHeap.hpp" |
tonyp@2715 | 30 | #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
tonyp@2315 | 31 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
mgerdin@5860 | 32 | #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
tschatzl@7091 | 33 | #include "gc_implementation/g1/heapRegionManager.inline.hpp" |
brutisso@6385 | 34 | #include "gc_implementation/g1/heapRegionSet.inline.hpp" |
goetz@6911 | 35 | #include "runtime/orderAccess.inline.hpp" |
stefank@2314 | 36 | #include "utilities/taskqueue.hpp" |
stefank@2314 | 37 | |
tschatzl@7651 | 38 | PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { |
tschatzl@7651 | 39 | switch (dest.value()) { |
tschatzl@7651 | 40 | case InCSetState::Young: |
tschatzl@7651 | 41 | return &_survivor_plab_stats; |
tschatzl@7651 | 42 | case InCSetState::Old: |
tschatzl@7651 | 43 | return &_old_plab_stats; |
tschatzl@7651 | 44 | default: |
tschatzl@7651 | 45 | ShouldNotReachHere(); |
tschatzl@7651 | 46 | return NULL; // Keep some compilers happy |
tschatzl@7651 | 47 | } |
tschatzl@7651 | 48 | } |
tschatzl@7651 | 49 | |
tschatzl@7651 | 50 | size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { |
tschatzl@7651 | 51 | size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(); |
tschatzl@7651 | 52 | // Prevent humongous PLAB sizes for two reasons: |
tschatzl@7651 | 53 | // * PLABs are allocated using a similar paths as oops, but should |
tschatzl@7651 | 54 | // never be in a humongous region |
tschatzl@7651 | 55 | // * Allowing humongous PLABs needlessly churns the region free lists |
tschatzl@7651 | 56 | return MIN2(_humongous_object_threshold_in_words, gclab_word_size); |
tschatzl@7651 | 57 | } |
tschatzl@7651 | 58 | |
tschatzl@7651 | 59 | HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest, |
tschatzl@7651 | 60 | size_t word_size, |
tschatzl@7651 | 61 | AllocationContext_t context) { |
tschatzl@7651 | 62 | switch (dest.value()) { |
tschatzl@7651 | 63 | case InCSetState::Young: |
tschatzl@7651 | 64 | return survivor_attempt_allocation(word_size, context); |
tschatzl@7651 | 65 | case InCSetState::Old: |
tschatzl@7651 | 66 | return old_attempt_allocation(word_size, context); |
tschatzl@7651 | 67 | default: |
tschatzl@7651 | 68 | ShouldNotReachHere(); |
tschatzl@7651 | 69 | return NULL; // Keep some compilers happy |
tschatzl@7651 | 70 | } |
tschatzl@7651 | 71 | } |
tschatzl@7651 | 72 | |
ysr@777 | 73 | // Inline functions for G1CollectedHeap |
ysr@777 | 74 | |
jcoomes@7159 | 75 | inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { |
jcoomes@7159 | 76 | return _allocation_context_stats; |
jcoomes@7159 | 77 | } |
jcoomes@7159 | 78 | |
tschatzl@6541 | 79 | // Return the region with the given index. It assumes the index is valid. |
tschatzl@7091 | 80 | inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } |
tschatzl@6541 | 81 | |
tschatzl@7010 | 82 | inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { |
tschatzl@7010 | 83 | assert(is_in_reserved(addr), |
tschatzl@7010 | 84 | err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", |
tschatzl@7010 | 85 | p2i(addr), p2i(_reserved.start()), p2i(_reserved.end()))); |
tschatzl@7010 | 86 | return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); |
tschatzl@7010 | 87 | } |
tschatzl@7010 | 88 | |
tschatzl@7050 | 89 | inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { |
tschatzl@7091 | 90 | return _hrm.reserved().start() + index * HeapRegion::GrainWords; |
tschatzl@7050 | 91 | } |
tschatzl@7050 | 92 | |
tonyp@2963 | 93 | template <class T> |
tschatzl@7050 | 94 | inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { |
brutisso@7049 | 95 | assert(addr != NULL, "invariant"); |
tschatzl@7050 | 96 | assert(is_in_g1_reserved((const void*) addr), |
brutisso@7049 | 97 | err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", |
tschatzl@7050 | 98 | p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); |
tschatzl@7091 | 99 | return _hrm.addr_to_region((HeapWord*) addr); |
ysr@777 | 100 | } |
ysr@777 | 101 | |
tonyp@2963 | 102 | template <class T> |
tschatzl@7050 | 103 | inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { |
brutisso@7049 | 104 | HeapRegion* hr = heap_region_containing_raw(addr); |
brutisso@7049 | 105 | if (hr->continuesHumongous()) { |
brutisso@7049 | 106 | return hr->humongous_start_region(); |
brutisso@7049 | 107 | } |
brutisso@7049 | 108 | return hr; |
ysr@777 | 109 | } |
ysr@777 | 110 | |
goetz@6911 | 111 | inline void G1CollectedHeap::reset_gc_time_stamp() { |
goetz@6911 | 112 | _gc_time_stamp = 0; |
goetz@6911 | 113 | OrderAccess::fence(); |
goetz@6911 | 114 | // Clear the cached CSet starting regions and time stamps. |
goetz@6911 | 115 | // Their validity is dependent on the GC timestamp. |
goetz@6911 | 116 | clear_cset_start_regions(); |
goetz@6911 | 117 | } |
goetz@6911 | 118 | |
goetz@6911 | 119 | inline void G1CollectedHeap::increment_gc_time_stamp() { |
goetz@6911 | 120 | ++_gc_time_stamp; |
goetz@6911 | 121 | OrderAccess::fence(); |
goetz@6911 | 122 | } |
goetz@6911 | 123 | |
tschatzl@6541 | 124 | inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { |
tschatzl@6541 | 125 | _old_set.remove(hr); |
tschatzl@6541 | 126 | } |
tschatzl@6541 | 127 | |
ysr@777 | 128 | inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
tschatzl@7091 | 129 | HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); |
ysr@777 | 130 | return r != NULL && r->in_collection_set(); |
ysr@777 | 131 | } |
ysr@777 | 132 | |
tschatzl@7050 | 133 | inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, |
mlarsson@7686 | 134 | uint* gc_count_before_ret, |
mlarsson@7686 | 135 | uint* gclocker_retry_count_ret) { |
tonyp@2715 | 136 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2715 | 137 | assert(!isHumongous(word_size), "attempt_allocation() should not " |
tonyp@2715 | 138 | "be called for humongous allocation requests"); |
ysr@777 | 139 | |
sjohanss@7118 | 140 | AllocationContext_t context = AllocationContext::current(); |
sjohanss@7118 | 141 | HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, |
sjohanss@7118 | 142 | false /* bot_updates */); |
tonyp@2715 | 143 | if (result == NULL) { |
mgerdin@4853 | 144 | result = attempt_allocation_slow(word_size, |
sjohanss@7118 | 145 | context, |
mgerdin@4853 | 146 | gc_count_before_ret, |
mgerdin@4853 | 147 | gclocker_retry_count_ret); |
tonyp@2715 | 148 | } |
tonyp@2715 | 149 | assert_heap_not_locked(); |
tonyp@2315 | 150 | if (result != NULL) { |
tonyp@2315 | 151 | dirty_young_block(result, word_size); |
tonyp@2315 | 152 | } |
tonyp@2715 | 153 | return result; |
tonyp@2454 | 154 | } |
tonyp@2454 | 155 | |
sjohanss@7118 | 156 | inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, |
sjohanss@7118 | 157 | AllocationContext_t context) { |
tonyp@3028 | 158 | assert(!isHumongous(word_size), |
tonyp@3028 | 159 | "we should not be seeing humongous-size allocations in this path"); |
tonyp@3028 | 160 | |
sjohanss@7118 | 161 | HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, |
sjohanss@7118 | 162 | false /* bot_updates */); |
tonyp@3028 | 163 | if (result == NULL) { |
tonyp@3028 | 164 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
sjohanss@7118 | 165 | result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
sjohanss@7118 | 166 | false /* bot_updates */); |
tonyp@3028 | 167 | } |
tonyp@3028 | 168 | if (result != NULL) { |
tonyp@3028 | 169 | dirty_young_block(result, word_size); |
tonyp@3028 | 170 | } |
tonyp@3028 | 171 | return result; |
tonyp@3028 | 172 | } |
tonyp@3028 | 173 | |
sjohanss@7118 | 174 | inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, |
sjohanss@7118 | 175 | AllocationContext_t context) { |
tonyp@3028 | 176 | assert(!isHumongous(word_size), |
tonyp@3028 | 177 | "we should not be seeing humongous-size allocations in this path"); |
tonyp@3028 | 178 | |
sjohanss@7118 | 179 | HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, |
sjohanss@7118 | 180 | true /* bot_updates */); |
tonyp@3028 | 181 | if (result == NULL) { |
tonyp@3028 | 182 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
sjohanss@7118 | 183 | result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
sjohanss@7118 | 184 | true /* bot_updates */); |
tonyp@3028 | 185 | } |
tonyp@3028 | 186 | return result; |
tonyp@3028 | 187 | } |
tonyp@3028 | 188 | |
tonyp@2315 | 189 | // It dirties the cards that cover the block so that so that the post |
tonyp@2315 | 190 | // write barrier never queues anything when updating objects on this |
tonyp@2315 | 191 | // block. It is assumed (and in fact we assert) that the block |
tonyp@2315 | 192 | // belongs to a young region. |
tonyp@2315 | 193 | inline void |
tonyp@2315 | 194 | G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { |
tonyp@2315 | 195 | assert_heap_not_locked(); |
tonyp@2315 | 196 | |
tonyp@2315 | 197 | // Assign the containing region to containing_hr so that we don't |
tonyp@2315 | 198 | // have to keep calling heap_region_containing_raw() in the |
tonyp@2315 | 199 | // asserts below. |
tonyp@2315 | 200 | DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) |
brutisso@7049 | 201 | assert(word_size > 0, "pre-condition"); |
tonyp@2315 | 202 | assert(containing_hr->is_in(start), "it should contain start"); |
tonyp@2315 | 203 | assert(containing_hr->is_young(), "it should be young"); |
tonyp@2315 | 204 | assert(!containing_hr->isHumongous(), "it should not be humongous"); |
tonyp@2315 | 205 | |
tonyp@2315 | 206 | HeapWord* end = start + word_size; |
tonyp@2315 | 207 | assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); |
tonyp@2315 | 208 | |
tonyp@2315 | 209 | MemRegion mr(start, end); |
mgerdin@5860 | 210 | g1_barrier_set()->g1_mark_as_young(mr); |
ysr@777 | 211 | } |
ysr@777 | 212 | |
jcoomes@2064 | 213 | inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { |
ysr@777 | 214 | return _task_queues->queue(i); |
ysr@777 | 215 | } |
ysr@777 | 216 | |
johnc@4016 | 217 | inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { |
ysr@777 | 218 | return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 219 | } |
ysr@777 | 220 | |
ysr@777 | 221 | inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
ysr@777 | 222 | return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 223 | } |
stefank@2314 | 224 | |
tschatzl@6541 | 225 | // This is a fast test on whether a reference points into the |
tschatzl@6541 | 226 | // collection set or not. Assume that the reference |
tschatzl@6541 | 227 | // points into the heap. |
tschatzl@7010 | 228 | inline bool G1CollectedHeap::is_in_cset(oop obj) { |
tschatzl@7010 | 229 | bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); |
tschatzl@6541 | 230 | // let's make sure the result is consistent with what the slower |
tschatzl@6541 | 231 | // test returns |
tschatzl@6541 | 232 | assert( ret || !obj_in_cs(obj), "sanity"); |
tschatzl@6541 | 233 | assert(!ret || obj_in_cs(obj), "sanity"); |
tschatzl@6541 | 234 | return ret; |
tschatzl@6541 | 235 | } |
tschatzl@6541 | 236 | |
tschatzl@7010 | 237 | bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { |
tschatzl@7010 | 238 | return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); |
tschatzl@7010 | 239 | } |
tschatzl@7010 | 240 | |
tschatzl@7651 | 241 | InCSetState G1CollectedHeap::in_cset_state(const oop obj) { |
tschatzl@7010 | 242 | return _in_cset_fast_test.at((HeapWord*)obj); |
tschatzl@7010 | 243 | } |
tschatzl@7010 | 244 | |
tschatzl@7010 | 245 | void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) { |
tschatzl@7010 | 246 | _in_cset_fast_test.set_humongous(index); |
tschatzl@7010 | 247 | } |
tschatzl@7010 | 248 | |
johnc@4016 | 249 | #ifndef PRODUCT |
johnc@4016 | 250 | // Support for G1EvacuationFailureALot |
johnc@4016 | 251 | |
johnc@4016 | 252 | inline bool |
johnc@4016 | 253 | G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, |
johnc@4016 | 254 | bool during_initial_mark, |
johnc@4016 | 255 | bool during_marking) { |
johnc@4016 | 256 | bool res = false; |
johnc@4016 | 257 | if (during_marking) { |
johnc@4016 | 258 | res |= G1EvacuationFailureALotDuringConcMark; |
johnc@4016 | 259 | } |
johnc@4016 | 260 | if (during_initial_mark) { |
johnc@4016 | 261 | res |= G1EvacuationFailureALotDuringInitialMark; |
johnc@4016 | 262 | } |
johnc@4016 | 263 | if (gcs_are_young) { |
johnc@4016 | 264 | res |= G1EvacuationFailureALotDuringYoungGC; |
johnc@4016 | 265 | } else { |
johnc@4016 | 266 | // GCs are mixed |
johnc@4016 | 267 | res |= G1EvacuationFailureALotDuringMixedGC; |
johnc@4016 | 268 | } |
johnc@4016 | 269 | return res; |
johnc@4016 | 270 | } |
johnc@4016 | 271 | |
johnc@4016 | 272 | inline void |
johnc@4016 | 273 | G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { |
johnc@4016 | 274 | if (G1EvacuationFailureALot) { |
johnc@4016 | 275 | // Note we can't assert that _evacuation_failure_alot_for_current_gc |
johnc@4016 | 276 | // is clear here. It may have been set during a previous GC but that GC |
johnc@4016 | 277 | // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to |
johnc@4016 | 278 | // trigger an evacuation failure and clear the flags and and counts. |
johnc@4016 | 279 | |
johnc@4016 | 280 | // Check if we have gone over the interval. |
johnc@4016 | 281 | const size_t gc_num = total_collections(); |
johnc@4016 | 282 | const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; |
johnc@4016 | 283 | |
johnc@4016 | 284 | _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); |
johnc@4016 | 285 | |
johnc@4016 | 286 | // Now check if G1EvacuationFailureALot is enabled for the current GC type. |
johnc@4016 | 287 | const bool gcs_are_young = g1_policy()->gcs_are_young(); |
johnc@4016 | 288 | const bool during_im = g1_policy()->during_initial_mark_pause(); |
johnc@4016 | 289 | const bool during_marking = mark_in_progress(); |
johnc@4016 | 290 | |
johnc@4016 | 291 | _evacuation_failure_alot_for_current_gc &= |
johnc@4016 | 292 | evacuation_failure_alot_for_gc_type(gcs_are_young, |
johnc@4016 | 293 | during_im, |
johnc@4016 | 294 | during_marking); |
johnc@4016 | 295 | } |
johnc@4016 | 296 | } |
johnc@4016 | 297 | |
tschatzl@7050 | 298 | inline bool G1CollectedHeap::evacuation_should_fail() { |
johnc@4016 | 299 | if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { |
johnc@4016 | 300 | return false; |
johnc@4016 | 301 | } |
johnc@4016 | 302 | // G1EvacuationFailureALot is in effect for current GC |
johnc@4016 | 303 | // Access to _evacuation_failure_alot_count is not atomic; |
johnc@4016 | 304 | // the value does not have to be exact. |
johnc@4016 | 305 | if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { |
johnc@4016 | 306 | return false; |
johnc@4016 | 307 | } |
johnc@4016 | 308 | _evacuation_failure_alot_count = 0; |
johnc@4016 | 309 | return true; |
johnc@4016 | 310 | } |
johnc@4016 | 311 | |
johnc@4016 | 312 | inline void G1CollectedHeap::reset_evacuation_should_fail() { |
johnc@4016 | 313 | if (G1EvacuationFailureALot) { |
johnc@4016 | 314 | _evacuation_failure_alot_gc_number = total_collections(); |
johnc@4016 | 315 | _evacuation_failure_alot_count = 0; |
johnc@4016 | 316 | _evacuation_failure_alot_for_current_gc = false; |
johnc@4016 | 317 | } |
johnc@4016 | 318 | } |
johnc@4016 | 319 | #endif // #ifndef PRODUCT |
johnc@4016 | 320 | |
tschatzl@6541 | 321 | inline bool G1CollectedHeap::is_in_young(const oop obj) { |
brutisso@7049 | 322 | if (obj == NULL) { |
brutisso@7049 | 323 | return false; |
brutisso@7049 | 324 | } |
brutisso@7049 | 325 | return heap_region_containing(obj)->is_young(); |
tschatzl@6541 | 326 | } |
tschatzl@6541 | 327 | |
tschatzl@6541 | 328 | // We don't need barriers for initializing stores to objects |
tschatzl@6541 | 329 | // in the young gen: for the SATB pre-barrier, there is no |
tschatzl@6541 | 330 | // pre-value that needs to be remembered; for the remembered-set |
tschatzl@6541 | 331 | // update logging post-barrier, we don't maintain remembered set |
tschatzl@6541 | 332 | // information for young gen objects. |
tschatzl@6541 | 333 | inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { |
tschatzl@6541 | 334 | return is_in_young(new_obj); |
tschatzl@6541 | 335 | } |
tschatzl@6541 | 336 | |
tschatzl@6541 | 337 | inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { |
brutisso@7049 | 338 | if (obj == NULL) { |
brutisso@7049 | 339 | return false; |
tschatzl@6541 | 340 | } |
brutisso@7049 | 341 | return is_obj_dead(obj, heap_region_containing(obj)); |
tschatzl@6541 | 342 | } |
tschatzl@6541 | 343 | |
tschatzl@6541 | 344 | inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { |
brutisso@7049 | 345 | if (obj == NULL) { |
brutisso@7049 | 346 | return false; |
tschatzl@6541 | 347 | } |
brutisso@7049 | 348 | return is_obj_ill(obj, heap_region_containing(obj)); |
tschatzl@6541 | 349 | } |
tschatzl@6541 | 350 | |
kbarrett@7830 | 351 | inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) { |
kbarrett@7830 | 352 | assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object"); |
kbarrett@7830 | 353 | _humongous_reclaim_candidates.set_candidate(region, value); |
kbarrett@7830 | 354 | } |
kbarrett@7830 | 355 | |
kbarrett@7830 | 356 | inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) { |
kbarrett@7830 | 357 | assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object"); |
kbarrett@7830 | 358 | return _humongous_reclaim_candidates.is_candidate(region); |
kbarrett@7830 | 359 | } |
kbarrett@7830 | 360 | |
tschatzl@7010 | 361 | inline void G1CollectedHeap::set_humongous_is_live(oop obj) { |
tschatzl@7010 | 362 | uint region = addr_to_region((HeapWord*)obj); |
kbarrett@7830 | 363 | // Clear the flag in the humongous_reclaim_candidates table. Also |
tschatzl@7010 | 364 | // reset the entry in the _in_cset_fast_test table so that subsequent references |
tschatzl@7010 | 365 | // to the same humongous object do not go into the slow path again. |
tschatzl@7010 | 366 | // This is racy, as multiple threads may at the same time enter here, but this |
tschatzl@7010 | 367 | // is benign. |
kbarrett@7830 | 368 | // During collection we only ever clear the "candidate" flag, and only ever clear the |
tschatzl@7010 | 369 | // entry in the in_cset_fast_table. |
tschatzl@7010 | 370 | // We only ever evaluate the contents of these tables (in the VM thread) after |
tschatzl@7010 | 371 | // having synchronized the worker threads with the VM thread, or in the same |
tschatzl@7010 | 372 | // thread (i.e. within the VM thread). |
kbarrett@7830 | 373 | if (is_humongous_reclaim_candidate(region)) { |
kbarrett@7830 | 374 | set_humongous_reclaim_candidate(region, false); |
tschatzl@7010 | 375 | _in_cset_fast_test.clear_humongous(region); |
tschatzl@7010 | 376 | } |
tschatzl@7010 | 377 | } |
tschatzl@7010 | 378 | |
stefank@2314 | 379 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |