Thu, 28 Mar 2013 10:27:28 +0100
7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine
Summary: Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit.
Reviewed-by: brutisso, ehelin, johnc
ysr@777 | 1 | /* |
johnc@4016 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMark.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/g1CollectedHeap.hpp" |
tonyp@2715 | 30 | #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
tonyp@2315 | 31 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
tonyp@2469 | 32 | #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
stefank@2314 | 33 | #include "utilities/taskqueue.hpp" |
stefank@2314 | 34 | |
ysr@777 | 35 | // Inline functions for G1CollectedHeap |
ysr@777 | 36 | |
tonyp@2963 | 37 | template <class T> |
ysr@777 | 38 | inline HeapRegion* |
tonyp@2963 | 39 | G1CollectedHeap::heap_region_containing(const T addr) const { |
tonyp@2963 | 40 | HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); |
ysr@777 | 41 | // hr can be null if addr in perm_gen |
ysr@777 | 42 | if (hr != NULL && hr->continuesHumongous()) { |
ysr@777 | 43 | hr = hr->humongous_start_region(); |
ysr@777 | 44 | } |
ysr@777 | 45 | return hr; |
ysr@777 | 46 | } |
ysr@777 | 47 | |
tonyp@2963 | 48 | template <class T> |
ysr@777 | 49 | inline HeapRegion* |
tonyp@2963 | 50 | G1CollectedHeap::heap_region_containing_raw(const T addr) const { |
tonyp@2963 | 51 | assert(_g1_reserved.contains((const void*) addr), "invariant"); |
tonyp@2963 | 52 | HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); |
ysr@777 | 53 | return res; |
ysr@777 | 54 | } |
ysr@777 | 55 | |
ysr@777 | 56 | inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
tonyp@2963 | 57 | HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); |
ysr@777 | 58 | return r != NULL && r->in_collection_set(); |
ysr@777 | 59 | } |
ysr@777 | 60 | |
tonyp@2315 | 61 | inline HeapWord* |
tonyp@2715 | 62 | G1CollectedHeap::attempt_allocation(size_t word_size, |
mgerdin@4853 | 63 | unsigned int* gc_count_before_ret, |
mgerdin@4853 | 64 | int* gclocker_retry_count_ret) { |
tonyp@2715 | 65 | assert_heap_not_locked_and_not_at_safepoint(); |
tonyp@2715 | 66 | assert(!isHumongous(word_size), "attempt_allocation() should not " |
tonyp@2715 | 67 | "be called for humongous allocation requests"); |
ysr@777 | 68 | |
tonyp@2715 | 69 | HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, |
tonyp@2715 | 70 | false /* bot_updates */); |
tonyp@2715 | 71 | if (result == NULL) { |
mgerdin@4853 | 72 | result = attempt_allocation_slow(word_size, |
mgerdin@4853 | 73 | gc_count_before_ret, |
mgerdin@4853 | 74 | gclocker_retry_count_ret); |
tonyp@2715 | 75 | } |
tonyp@2715 | 76 | assert_heap_not_locked(); |
tonyp@2315 | 77 | if (result != NULL) { |
tonyp@2315 | 78 | dirty_young_block(result, word_size); |
tonyp@2315 | 79 | } |
tonyp@2715 | 80 | return result; |
tonyp@2454 | 81 | } |
tonyp@2454 | 82 | |
tonyp@3028 | 83 | inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t |
tonyp@3028 | 84 | word_size) { |
tonyp@3028 | 85 | assert(!isHumongous(word_size), |
tonyp@3028 | 86 | "we should not be seeing humongous-size allocations in this path"); |
tonyp@3028 | 87 | |
tonyp@3028 | 88 | HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, |
tonyp@3028 | 89 | false /* bot_updates */); |
tonyp@3028 | 90 | if (result == NULL) { |
tonyp@3028 | 91 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@3028 | 92 | result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, |
tonyp@3028 | 93 | false /* bot_updates */); |
tonyp@3028 | 94 | } |
tonyp@3028 | 95 | if (result != NULL) { |
tonyp@3028 | 96 | dirty_young_block(result, word_size); |
tonyp@3028 | 97 | } |
tonyp@3028 | 98 | return result; |
tonyp@3028 | 99 | } |
tonyp@3028 | 100 | |
tonyp@3028 | 101 | inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { |
tonyp@3028 | 102 | assert(!isHumongous(word_size), |
tonyp@3028 | 103 | "we should not be seeing humongous-size allocations in this path"); |
tonyp@3028 | 104 | |
tonyp@3028 | 105 | HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, |
tonyp@3028 | 106 | true /* bot_updates */); |
tonyp@3028 | 107 | if (result == NULL) { |
tonyp@3028 | 108 | MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@3028 | 109 | result = _old_gc_alloc_region.attempt_allocation_locked(word_size, |
tonyp@3028 | 110 | true /* bot_updates */); |
tonyp@3028 | 111 | } |
tonyp@3028 | 112 | return result; |
tonyp@3028 | 113 | } |
tonyp@3028 | 114 | |
tonyp@2315 | 115 | // It dirties the cards that cover the block so that so that the post |
tonyp@2315 | 116 | // write barrier never queues anything when updating objects on this |
tonyp@2315 | 117 | // block. It is assumed (and in fact we assert) that the block |
tonyp@2315 | 118 | // belongs to a young region. |
tonyp@2315 | 119 | inline void |
tonyp@2315 | 120 | G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { |
tonyp@2315 | 121 | assert_heap_not_locked(); |
tonyp@2315 | 122 | |
tonyp@2315 | 123 | // Assign the containing region to containing_hr so that we don't |
tonyp@2315 | 124 | // have to keep calling heap_region_containing_raw() in the |
tonyp@2315 | 125 | // asserts below. |
tonyp@2315 | 126 | DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) |
tonyp@2315 | 127 | assert(containing_hr != NULL && start != NULL && word_size > 0, |
tonyp@2315 | 128 | "pre-condition"); |
tonyp@2315 | 129 | assert(containing_hr->is_in(start), "it should contain start"); |
tonyp@2315 | 130 | assert(containing_hr->is_young(), "it should be young"); |
tonyp@2315 | 131 | assert(!containing_hr->isHumongous(), "it should not be humongous"); |
tonyp@2315 | 132 | |
tonyp@2315 | 133 | HeapWord* end = start + word_size; |
tonyp@2315 | 134 | assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); |
tonyp@2315 | 135 | |
tonyp@2315 | 136 | MemRegion mr(start, end); |
tonyp@2315 | 137 | ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); |
ysr@777 | 138 | } |
ysr@777 | 139 | |
jcoomes@2064 | 140 | inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { |
ysr@777 | 141 | return _task_queues->queue(i); |
ysr@777 | 142 | } |
ysr@777 | 143 | |
johnc@4016 | 144 | inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { |
ysr@777 | 145 | return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 146 | } |
ysr@777 | 147 | |
ysr@777 | 148 | inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
ysr@777 | 149 | return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 150 | } |
stefank@2314 | 151 | |
johnc@4016 | 152 | #ifndef PRODUCT |
johnc@4016 | 153 | // Support for G1EvacuationFailureALot |
johnc@4016 | 154 | |
johnc@4016 | 155 | inline bool |
johnc@4016 | 156 | G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, |
johnc@4016 | 157 | bool during_initial_mark, |
johnc@4016 | 158 | bool during_marking) { |
johnc@4016 | 159 | bool res = false; |
johnc@4016 | 160 | if (during_marking) { |
johnc@4016 | 161 | res |= G1EvacuationFailureALotDuringConcMark; |
johnc@4016 | 162 | } |
johnc@4016 | 163 | if (during_initial_mark) { |
johnc@4016 | 164 | res |= G1EvacuationFailureALotDuringInitialMark; |
johnc@4016 | 165 | } |
johnc@4016 | 166 | if (gcs_are_young) { |
johnc@4016 | 167 | res |= G1EvacuationFailureALotDuringYoungGC; |
johnc@4016 | 168 | } else { |
johnc@4016 | 169 | // GCs are mixed |
johnc@4016 | 170 | res |= G1EvacuationFailureALotDuringMixedGC; |
johnc@4016 | 171 | } |
johnc@4016 | 172 | return res; |
johnc@4016 | 173 | } |
johnc@4016 | 174 | |
johnc@4016 | 175 | inline void |
johnc@4016 | 176 | G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { |
johnc@4016 | 177 | if (G1EvacuationFailureALot) { |
johnc@4016 | 178 | // Note we can't assert that _evacuation_failure_alot_for_current_gc |
johnc@4016 | 179 | // is clear here. It may have been set during a previous GC but that GC |
johnc@4016 | 180 | // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to |
johnc@4016 | 181 | // trigger an evacuation failure and clear the flags and and counts. |
johnc@4016 | 182 | |
johnc@4016 | 183 | // Check if we have gone over the interval. |
johnc@4016 | 184 | const size_t gc_num = total_collections(); |
johnc@4016 | 185 | const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; |
johnc@4016 | 186 | |
johnc@4016 | 187 | _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); |
johnc@4016 | 188 | |
johnc@4016 | 189 | // Now check if G1EvacuationFailureALot is enabled for the current GC type. |
johnc@4016 | 190 | const bool gcs_are_young = g1_policy()->gcs_are_young(); |
johnc@4016 | 191 | const bool during_im = g1_policy()->during_initial_mark_pause(); |
johnc@4016 | 192 | const bool during_marking = mark_in_progress(); |
johnc@4016 | 193 | |
johnc@4016 | 194 | _evacuation_failure_alot_for_current_gc &= |
johnc@4016 | 195 | evacuation_failure_alot_for_gc_type(gcs_are_young, |
johnc@4016 | 196 | during_im, |
johnc@4016 | 197 | during_marking); |
johnc@4016 | 198 | } |
johnc@4016 | 199 | } |
johnc@4016 | 200 | |
johnc@4016 | 201 | inline bool |
johnc@4016 | 202 | G1CollectedHeap::evacuation_should_fail() { |
johnc@4016 | 203 | if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { |
johnc@4016 | 204 | return false; |
johnc@4016 | 205 | } |
johnc@4016 | 206 | // G1EvacuationFailureALot is in effect for current GC |
johnc@4016 | 207 | // Access to _evacuation_failure_alot_count is not atomic; |
johnc@4016 | 208 | // the value does not have to be exact. |
johnc@4016 | 209 | if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { |
johnc@4016 | 210 | return false; |
johnc@4016 | 211 | } |
johnc@4016 | 212 | _evacuation_failure_alot_count = 0; |
johnc@4016 | 213 | return true; |
johnc@4016 | 214 | } |
johnc@4016 | 215 | |
johnc@4016 | 216 | inline void G1CollectedHeap::reset_evacuation_should_fail() { |
johnc@4016 | 217 | if (G1EvacuationFailureALot) { |
johnc@4016 | 218 | _evacuation_failure_alot_gc_number = total_collections(); |
johnc@4016 | 219 | _evacuation_failure_alot_count = 0; |
johnc@4016 | 220 | _evacuation_failure_alot_for_current_gc = false; |
johnc@4016 | 221 | } |
johnc@4016 | 222 | } |
johnc@4016 | 223 | #endif // #ifndef PRODUCT |
johnc@4016 | 224 | |
stefank@2314 | 225 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |