src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Wed, 19 Jan 2011 19:30:42 -0500

author
tonyp
date
Wed, 19 Jan 2011 19:30:42 -0500
changeset 2472
0fa27f37d4d4
parent 2469
7e37af9d69ef
child 2715
abdfc822206f
permissions
-rw-r--r--

6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc

ysr@777 1 /*
tonyp@2454 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
tonyp@2315 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@2469 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 32 #include "utilities/taskqueue.hpp"
stefank@2314 33
ysr@777 34 // Inline functions for G1CollectedHeap
ysr@777 35
ysr@777 36 inline HeapRegion*
ysr@777 37 G1CollectedHeap::heap_region_containing(const void* addr) const {
ysr@777 38 HeapRegion* hr = _hrs->addr_to_region(addr);
ysr@777 39 // hr can be null if addr in perm_gen
ysr@777 40 if (hr != NULL && hr->continuesHumongous()) {
ysr@777 41 hr = hr->humongous_start_region();
ysr@777 42 }
ysr@777 43 return hr;
ysr@777 44 }
ysr@777 45
ysr@777 46 inline HeapRegion*
ysr@777 47 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
tonyp@961 48 assert(_g1_reserved.contains(addr), "invariant");
johnc@1187 49 size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
johnc@1187 50 >> HeapRegion::LogOfHRGrainBytes;
johnc@1187 51
tonyp@961 52 HeapRegion* res = _hrs->at(index);
tonyp@961 53 assert(res == _hrs->addr_to_region(addr), "sanity");
ysr@777 54 return res;
ysr@777 55 }
ysr@777 56
ysr@777 57 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
ysr@777 58 HeapRegion* r = _hrs->addr_to_region(obj);
ysr@777 59 return r != NULL && r->in_collection_set();
ysr@777 60 }
ysr@777 61
tonyp@2315 62 // See the comment in the .hpp file about the locking protocol and
tonyp@2315 63 // assumptions of this method (and other related ones).
tonyp@2315 64 inline HeapWord*
tonyp@2315 65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
tonyp@2454 66 size_t word_size,
tonyp@2454 67 bool with_heap_lock) {
tonyp@2454 68 assert_not_at_safepoint();
tonyp@2454 69 assert(with_heap_lock == Heap_lock->owned_by_self(),
tonyp@2454 70 "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
tonyp@2315 71 assert(cur_alloc_region != NULL, "pre-condition of the method");
tonyp@2315 72 assert(cur_alloc_region->is_young(),
tonyp@2315 73 "we only support young current alloc regions");
tonyp@2315 74 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
tonyp@2315 75 "should not be used for humongous allocations");
tonyp@2315 76 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
ysr@777 77
tonyp@2315 78 assert(!cur_alloc_region->is_empty(),
tonyp@2315 79 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
tonyp@2315 80 cur_alloc_region->bottom(), cur_alloc_region->end()));
tonyp@2454 81 HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
tonyp@2315 82 if (result != NULL) {
tonyp@2315 83 assert(is_in(result), "result should be in the heap");
ysr@777 84
tonyp@2454 85 if (with_heap_lock) {
tonyp@2454 86 Heap_lock->unlock();
tonyp@2454 87 }
tonyp@2454 88 assert_heap_not_locked();
tonyp@2315 89 // Do the dirtying after we release the Heap_lock.
tonyp@2315 90 dirty_young_block(result, word_size);
tonyp@2315 91 return result;
tonyp@2315 92 }
ysr@777 93
tonyp@2454 94 if (with_heap_lock) {
tonyp@2454 95 assert_heap_locked();
tonyp@2454 96 } else {
tonyp@2454 97 assert_heap_not_locked();
tonyp@2454 98 }
tonyp@2315 99 return NULL;
tonyp@2315 100 }
tonyp@2315 101
tonyp@2315 102 // See the comment in the .hpp file about the locking protocol and
tonyp@2315 103 // assumptions of this method (and other related ones).
tonyp@2315 104 inline HeapWord*
tonyp@2315 105 G1CollectedHeap::attempt_allocation(size_t word_size) {
tonyp@2454 106 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2315 107 assert(!isHumongous(word_size), "attempt_allocation() should not be called "
tonyp@2315 108 "for humongous allocation requests");
tonyp@2315 109
tonyp@2315 110 HeapRegion* cur_alloc_region = _cur_alloc_region;
tonyp@2315 111 if (cur_alloc_region != NULL) {
tonyp@2315 112 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
tonyp@2454 113 word_size,
tonyp@2454 114 false /* with_heap_lock */);
tonyp@2454 115 assert_heap_not_locked();
tonyp@2315 116 if (result != NULL) {
tonyp@2315 117 return result;
ysr@777 118 }
ysr@777 119 }
tonyp@2315 120
tonyp@2454 121 // Our attempt to allocate lock-free failed as the current
tonyp@2454 122 // allocation region is either NULL or full. So, we'll now take the
tonyp@2454 123 // Heap_lock and retry.
tonyp@2454 124 Heap_lock->lock();
tonyp@2454 125
tonyp@2454 126 HeapWord* result = attempt_allocation_locked(word_size);
tonyp@2315 127 if (result != NULL) {
tonyp@2315 128 assert_heap_not_locked();
tonyp@2315 129 return result;
tonyp@2315 130 }
tonyp@2315 131
tonyp@2315 132 assert_heap_locked();
tonyp@2315 133 return NULL;
tonyp@2315 134 }
tonyp@2315 135
tonyp@2315 136 inline void
tonyp@2315 137 G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
tonyp@2472 138 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
tonyp@2315 139 assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
tonyp@2315 140 "pre-condition of the call");
tonyp@2315 141 assert(cur_alloc_region->is_young(),
tonyp@2315 142 "we only support young current alloc regions");
tonyp@2315 143
tonyp@2315 144 // The region is guaranteed to be young
tonyp@2315 145 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
tonyp@2315 146 _summary_bytes_used += cur_alloc_region->used();
tonyp@2315 147 _cur_alloc_region = NULL;
tonyp@2315 148 }
tonyp@2315 149
tonyp@2454 150 inline HeapWord*
tonyp@2454 151 G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
tonyp@2454 152 assert_heap_locked_and_not_at_safepoint();
tonyp@2454 153 assert(!isHumongous(word_size), "attempt_allocation_locked() "
tonyp@2454 154 "should not be called for humongous allocation requests");
tonyp@2454 155
tonyp@2454 156 // First, reread the current alloc region and retry the allocation
tonyp@2454 157 // in case somebody replaced it while we were waiting to get the
tonyp@2454 158 // Heap_lock.
tonyp@2454 159 HeapRegion* cur_alloc_region = _cur_alloc_region;
tonyp@2454 160 if (cur_alloc_region != NULL) {
tonyp@2454 161 HeapWord* result = allocate_from_cur_alloc_region(
tonyp@2454 162 cur_alloc_region, word_size,
tonyp@2454 163 true /* with_heap_lock */);
tonyp@2454 164 if (result != NULL) {
tonyp@2454 165 assert_heap_not_locked();
tonyp@2454 166 return result;
tonyp@2454 167 }
tonyp@2454 168
tonyp@2454 169 // We failed to allocate out of the current alloc region, so let's
tonyp@2454 170 // retire it before getting a new one.
tonyp@2454 171 retire_cur_alloc_region(cur_alloc_region);
tonyp@2454 172 }
tonyp@2454 173
tonyp@2454 174 assert_heap_locked();
tonyp@2454 175 // Try to get a new region and allocate out of it
tonyp@2454 176 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
tonyp@2454 177 false, /* at_safepoint */
tonyp@2454 178 true, /* do_dirtying */
tonyp@2454 179 false /* can_expand */);
tonyp@2454 180 if (result != NULL) {
tonyp@2454 181 assert_heap_not_locked();
tonyp@2454 182 return result;
tonyp@2454 183 }
tonyp@2454 184
tonyp@2454 185 assert_heap_locked();
tonyp@2454 186 return NULL;
tonyp@2454 187 }
tonyp@2454 188
tonyp@2315 189 // It dirties the cards that cover the block so that so that the post
tonyp@2315 190 // write barrier never queues anything when updating objects on this
tonyp@2315 191 // block. It is assumed (and in fact we assert) that the block
tonyp@2315 192 // belongs to a young region.
tonyp@2315 193 inline void
tonyp@2315 194 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
tonyp@2315 195 assert_heap_not_locked();
tonyp@2315 196
tonyp@2315 197 // Assign the containing region to containing_hr so that we don't
tonyp@2315 198 // have to keep calling heap_region_containing_raw() in the
tonyp@2315 199 // asserts below.
tonyp@2315 200 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
tonyp@2315 201 assert(containing_hr != NULL && start != NULL && word_size > 0,
tonyp@2315 202 "pre-condition");
tonyp@2315 203 assert(containing_hr->is_in(start), "it should contain start");
tonyp@2315 204 assert(containing_hr->is_young(), "it should be young");
tonyp@2315 205 assert(!containing_hr->isHumongous(), "it should not be humongous");
tonyp@2315 206
tonyp@2315 207 HeapWord* end = start + word_size;
tonyp@2315 208 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
tonyp@2315 209
tonyp@2315 210 MemRegion mr(start, end);
tonyp@2315 211 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
ysr@777 212 }
ysr@777 213
jcoomes@2064 214 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 215 return _task_queues->queue(i);
ysr@777 216 }
ysr@777 217
ysr@777 218 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 219 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 220 }
ysr@777 221
ysr@777 222 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 223 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 224 }
stefank@2314 225
stefank@2314 226 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial