Tue, 07 Dec 2010 16:47:42 -0500
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
Summary: Allow the eden to the expanded up to a point when the GC locker is active.
Reviewed-by: jwilhelm, johnc, ysr, jcoomes
ysr@777 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMark.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/g1CollectedHeap.hpp" |
tonyp@2315 | 30 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
stefank@2314 | 31 | #include "gc_implementation/g1/heapRegionSeq.hpp" |
stefank@2314 | 32 | #include "utilities/taskqueue.hpp" |
stefank@2314 | 33 | |
ysr@777 | 34 | // Inline functions for G1CollectedHeap |
ysr@777 | 35 | |
ysr@777 | 36 | inline HeapRegion* |
ysr@777 | 37 | G1CollectedHeap::heap_region_containing(const void* addr) const { |
ysr@777 | 38 | HeapRegion* hr = _hrs->addr_to_region(addr); |
ysr@777 | 39 | // hr can be null if addr in perm_gen |
ysr@777 | 40 | if (hr != NULL && hr->continuesHumongous()) { |
ysr@777 | 41 | hr = hr->humongous_start_region(); |
ysr@777 | 42 | } |
ysr@777 | 43 | return hr; |
ysr@777 | 44 | } |
ysr@777 | 45 | |
ysr@777 | 46 | inline HeapRegion* |
ysr@777 | 47 | G1CollectedHeap::heap_region_containing_raw(const void* addr) const { |
tonyp@961 | 48 | assert(_g1_reserved.contains(addr), "invariant"); |
johnc@1187 | 49 | size_t index = pointer_delta(addr, _g1_reserved.start(), 1) |
johnc@1187 | 50 | >> HeapRegion::LogOfHRGrainBytes; |
johnc@1187 | 51 | |
tonyp@961 | 52 | HeapRegion* res = _hrs->at(index); |
tonyp@961 | 53 | assert(res == _hrs->addr_to_region(addr), "sanity"); |
ysr@777 | 54 | return res; |
ysr@777 | 55 | } |
ysr@777 | 56 | |
ysr@777 | 57 | inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
ysr@777 | 58 | HeapRegion* r = _hrs->addr_to_region(obj); |
ysr@777 | 59 | return r != NULL && r->in_collection_set(); |
ysr@777 | 60 | } |
ysr@777 | 61 | |
tonyp@2315 | 62 | // See the comment in the .hpp file about the locking protocol and |
tonyp@2315 | 63 | // assumptions of this method (and other related ones). |
tonyp@2315 | 64 | inline HeapWord* |
tonyp@2315 | 65 | G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, |
tonyp@2315 | 66 | size_t word_size) { |
tonyp@2315 | 67 | assert_heap_locked_and_not_at_safepoint(); |
tonyp@2315 | 68 | assert(cur_alloc_region != NULL, "pre-condition of the method"); |
tonyp@2315 | 69 | assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method"); |
tonyp@2315 | 70 | assert(cur_alloc_region->is_young(), |
tonyp@2315 | 71 | "we only support young current alloc regions"); |
tonyp@2315 | 72 | assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " |
tonyp@2315 | 73 | "should not be used for humongous allocations"); |
tonyp@2315 | 74 | assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug."); |
ysr@777 | 75 | |
tonyp@2315 | 76 | assert(!cur_alloc_region->is_empty(), |
tonyp@2315 | 77 | err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", |
tonyp@2315 | 78 | cur_alloc_region->bottom(), cur_alloc_region->end())); |
tonyp@2315 | 79 | // This allocate method does BOT updates and we don't need them in |
tonyp@2315 | 80 | // the young generation. This will be fixed in the near future by |
tonyp@2315 | 81 | // CR 6994297. |
tonyp@2315 | 82 | HeapWord* result = cur_alloc_region->allocate(word_size); |
tonyp@2315 | 83 | if (result != NULL) { |
tonyp@2315 | 84 | assert(is_in(result), "result should be in the heap"); |
tonyp@2315 | 85 | Heap_lock->unlock(); |
ysr@777 | 86 | |
tonyp@2315 | 87 | // Do the dirtying after we release the Heap_lock. |
tonyp@2315 | 88 | dirty_young_block(result, word_size); |
tonyp@2315 | 89 | return result; |
tonyp@2315 | 90 | } |
ysr@777 | 91 | |
tonyp@2315 | 92 | assert_heap_locked(); |
tonyp@2315 | 93 | return NULL; |
tonyp@2315 | 94 | } |
tonyp@2315 | 95 | |
tonyp@2315 | 96 | // See the comment in the .hpp file about the locking protocol and |
tonyp@2315 | 97 | // assumptions of this method (and other related ones). |
tonyp@2315 | 98 | inline HeapWord* |
tonyp@2315 | 99 | G1CollectedHeap::attempt_allocation(size_t word_size) { |
tonyp@2315 | 100 | assert_heap_locked_and_not_at_safepoint(); |
tonyp@2315 | 101 | assert(!isHumongous(word_size), "attempt_allocation() should not be called " |
tonyp@2315 | 102 | "for humongous allocation requests"); |
tonyp@2315 | 103 | |
tonyp@2315 | 104 | HeapRegion* cur_alloc_region = _cur_alloc_region; |
tonyp@2315 | 105 | if (cur_alloc_region != NULL) { |
tonyp@2315 | 106 | HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, |
tonyp@2315 | 107 | word_size); |
tonyp@2315 | 108 | if (result != NULL) { |
tonyp@2315 | 109 | assert_heap_not_locked(); |
tonyp@2315 | 110 | return result; |
ysr@777 | 111 | } |
tonyp@2073 | 112 | |
tonyp@2315 | 113 | assert_heap_locked(); |
tonyp@2315 | 114 | |
tonyp@2315 | 115 | // Since we couldn't successfully allocate into it, retire the |
tonyp@2315 | 116 | // current alloc region. |
tonyp@2315 | 117 | retire_cur_alloc_region(cur_alloc_region); |
ysr@777 | 118 | } |
tonyp@2315 | 119 | |
tonyp@2315 | 120 | // Try to get a new region and allocate out of it |
tonyp@2315 | 121 | HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, |
tonyp@2333 | 122 | false, /* at_safepoint */ |
tonyp@2333 | 123 | true, /* do_dirtying */ |
tonyp@2333 | 124 | false /* can_expand */); |
tonyp@2315 | 125 | if (result != NULL) { |
tonyp@2315 | 126 | assert_heap_not_locked(); |
tonyp@2315 | 127 | return result; |
tonyp@2315 | 128 | } |
tonyp@2315 | 129 | |
tonyp@2315 | 130 | assert_heap_locked(); |
tonyp@2315 | 131 | return NULL; |
tonyp@2315 | 132 | } |
tonyp@2315 | 133 | |
tonyp@2315 | 134 | inline void |
tonyp@2315 | 135 | G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) { |
tonyp@2315 | 136 | assert_heap_locked_or_at_safepoint(); |
tonyp@2315 | 137 | assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region, |
tonyp@2315 | 138 | "pre-condition of the call"); |
tonyp@2315 | 139 | assert(cur_alloc_region->is_young(), |
tonyp@2315 | 140 | "we only support young current alloc regions"); |
tonyp@2315 | 141 | |
tonyp@2315 | 142 | // The region is guaranteed to be young |
tonyp@2315 | 143 | g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region); |
tonyp@2315 | 144 | _summary_bytes_used += cur_alloc_region->used(); |
tonyp@2315 | 145 | _cur_alloc_region = NULL; |
tonyp@2315 | 146 | } |
tonyp@2315 | 147 | |
tonyp@2315 | 148 | // It dirties the cards that cover the block so that so that the post |
tonyp@2315 | 149 | // write barrier never queues anything when updating objects on this |
tonyp@2315 | 150 | // block. It is assumed (and in fact we assert) that the block |
tonyp@2315 | 151 | // belongs to a young region. |
tonyp@2315 | 152 | inline void |
tonyp@2315 | 153 | G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { |
tonyp@2315 | 154 | assert_heap_not_locked(); |
tonyp@2315 | 155 | |
tonyp@2315 | 156 | // Assign the containing region to containing_hr so that we don't |
tonyp@2315 | 157 | // have to keep calling heap_region_containing_raw() in the |
tonyp@2315 | 158 | // asserts below. |
tonyp@2315 | 159 | DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) |
tonyp@2315 | 160 | assert(containing_hr != NULL && start != NULL && word_size > 0, |
tonyp@2315 | 161 | "pre-condition"); |
tonyp@2315 | 162 | assert(containing_hr->is_in(start), "it should contain start"); |
tonyp@2315 | 163 | assert(containing_hr->is_young(), "it should be young"); |
tonyp@2315 | 164 | assert(!containing_hr->isHumongous(), "it should not be humongous"); |
tonyp@2315 | 165 | |
tonyp@2315 | 166 | HeapWord* end = start + word_size; |
tonyp@2315 | 167 | assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); |
tonyp@2315 | 168 | |
tonyp@2315 | 169 | MemRegion mr(start, end); |
tonyp@2315 | 170 | ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); |
ysr@777 | 171 | } |
ysr@777 | 172 | |
jcoomes@2064 | 173 | inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { |
ysr@777 | 174 | return _task_queues->queue(i); |
ysr@777 | 175 | } |
ysr@777 | 176 | |
ysr@777 | 177 | inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { |
ysr@777 | 178 | return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 179 | } |
ysr@777 | 180 | |
ysr@777 | 181 | inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
ysr@777 | 182 | return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
ysr@777 | 183 | } |
stefank@2314 | 184 | |
stefank@2314 | 185 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |