src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

ysr@777 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
tonyp@2315 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 31 #include "gc_implementation/g1/heapRegionSeq.hpp"
stefank@2314 32 #include "utilities/taskqueue.hpp"
stefank@2314 33
ysr@777 34 // Inline functions for G1CollectedHeap
ysr@777 35
ysr@777 36 inline HeapRegion*
ysr@777 37 G1CollectedHeap::heap_region_containing(const void* addr) const {
ysr@777 38 HeapRegion* hr = _hrs->addr_to_region(addr);
ysr@777 39 // hr can be null if addr in perm_gen
ysr@777 40 if (hr != NULL && hr->continuesHumongous()) {
ysr@777 41 hr = hr->humongous_start_region();
ysr@777 42 }
ysr@777 43 return hr;
ysr@777 44 }
ysr@777 45
ysr@777 46 inline HeapRegion*
ysr@777 47 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
tonyp@961 48 assert(_g1_reserved.contains(addr), "invariant");
johnc@1187 49 size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
johnc@1187 50 >> HeapRegion::LogOfHRGrainBytes;
johnc@1187 51
tonyp@961 52 HeapRegion* res = _hrs->at(index);
tonyp@961 53 assert(res == _hrs->addr_to_region(addr), "sanity");
ysr@777 54 return res;
ysr@777 55 }
ysr@777 56
ysr@777 57 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
ysr@777 58 HeapRegion* r = _hrs->addr_to_region(obj);
ysr@777 59 return r != NULL && r->in_collection_set();
ysr@777 60 }
ysr@777 61
tonyp@2315 62 // See the comment in the .hpp file about the locking protocol and
tonyp@2315 63 // assumptions of this method (and other related ones).
tonyp@2315 64 inline HeapWord*
tonyp@2315 65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
tonyp@2315 66 size_t word_size) {
tonyp@2315 67 assert_heap_locked_and_not_at_safepoint();
tonyp@2315 68 assert(cur_alloc_region != NULL, "pre-condition of the method");
tonyp@2315 69 assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
tonyp@2315 70 assert(cur_alloc_region->is_young(),
tonyp@2315 71 "we only support young current alloc regions");
tonyp@2315 72 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
tonyp@2315 73 "should not be used for humongous allocations");
tonyp@2315 74 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
ysr@777 75
tonyp@2315 76 assert(!cur_alloc_region->is_empty(),
tonyp@2315 77 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
tonyp@2315 78 cur_alloc_region->bottom(), cur_alloc_region->end()));
tonyp@2315 79 // This allocate method does BOT updates and we don't need them in
tonyp@2315 80 // the young generation. This will be fixed in the near future by
tonyp@2315 81 // CR 6994297.
tonyp@2315 82 HeapWord* result = cur_alloc_region->allocate(word_size);
tonyp@2315 83 if (result != NULL) {
tonyp@2315 84 assert(is_in(result), "result should be in the heap");
tonyp@2315 85 Heap_lock->unlock();
ysr@777 86
tonyp@2315 87 // Do the dirtying after we release the Heap_lock.
tonyp@2315 88 dirty_young_block(result, word_size);
tonyp@2315 89 return result;
tonyp@2315 90 }
ysr@777 91
tonyp@2315 92 assert_heap_locked();
tonyp@2315 93 return NULL;
tonyp@2315 94 }
tonyp@2315 95
tonyp@2315 96 // See the comment in the .hpp file about the locking protocol and
tonyp@2315 97 // assumptions of this method (and other related ones).
tonyp@2315 98 inline HeapWord*
tonyp@2315 99 G1CollectedHeap::attempt_allocation(size_t word_size) {
tonyp@2315 100 assert_heap_locked_and_not_at_safepoint();
tonyp@2315 101 assert(!isHumongous(word_size), "attempt_allocation() should not be called "
tonyp@2315 102 "for humongous allocation requests");
tonyp@2315 103
tonyp@2315 104 HeapRegion* cur_alloc_region = _cur_alloc_region;
tonyp@2315 105 if (cur_alloc_region != NULL) {
tonyp@2315 106 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
tonyp@2315 107 word_size);
tonyp@2315 108 if (result != NULL) {
tonyp@2315 109 assert_heap_not_locked();
tonyp@2315 110 return result;
ysr@777 111 }
tonyp@2073 112
tonyp@2315 113 assert_heap_locked();
tonyp@2315 114
tonyp@2315 115 // Since we couldn't successfully allocate into it, retire the
tonyp@2315 116 // current alloc region.
tonyp@2315 117 retire_cur_alloc_region(cur_alloc_region);
ysr@777 118 }
tonyp@2315 119
tonyp@2315 120 // Try to get a new region and allocate out of it
tonyp@2315 121 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
tonyp@2315 122 false, /* at safepoint */
tonyp@2315 123 true /* do_dirtying */);
tonyp@2315 124 if (result != NULL) {
tonyp@2315 125 assert_heap_not_locked();
tonyp@2315 126 return result;
tonyp@2315 127 }
tonyp@2315 128
tonyp@2315 129 assert_heap_locked();
tonyp@2315 130 return NULL;
tonyp@2315 131 }
tonyp@2315 132
tonyp@2315 133 inline void
tonyp@2315 134 G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
tonyp@2315 135 assert_heap_locked_or_at_safepoint();
tonyp@2315 136 assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
tonyp@2315 137 "pre-condition of the call");
tonyp@2315 138 assert(cur_alloc_region->is_young(),
tonyp@2315 139 "we only support young current alloc regions");
tonyp@2315 140
tonyp@2315 141 // The region is guaranteed to be young
tonyp@2315 142 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
tonyp@2315 143 _summary_bytes_used += cur_alloc_region->used();
tonyp@2315 144 _cur_alloc_region = NULL;
tonyp@2315 145 }
tonyp@2315 146
tonyp@2315 147 // It dirties the cards that cover the block so that so that the post
tonyp@2315 148 // write barrier never queues anything when updating objects on this
tonyp@2315 149 // block. It is assumed (and in fact we assert) that the block
tonyp@2315 150 // belongs to a young region.
tonyp@2315 151 inline void
tonyp@2315 152 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
tonyp@2315 153 assert_heap_not_locked();
tonyp@2315 154
tonyp@2315 155 // Assign the containing region to containing_hr so that we don't
tonyp@2315 156 // have to keep calling heap_region_containing_raw() in the
tonyp@2315 157 // asserts below.
tonyp@2315 158 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
tonyp@2315 159 assert(containing_hr != NULL && start != NULL && word_size > 0,
tonyp@2315 160 "pre-condition");
tonyp@2315 161 assert(containing_hr->is_in(start), "it should contain start");
tonyp@2315 162 assert(containing_hr->is_young(), "it should be young");
tonyp@2315 163 assert(!containing_hr->isHumongous(), "it should not be humongous");
tonyp@2315 164
tonyp@2315 165 HeapWord* end = start + word_size;
tonyp@2315 166 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
tonyp@2315 167
tonyp@2315 168 MemRegion mr(start, end);
tonyp@2315 169 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
ysr@777 170 }
ysr@777 171
jcoomes@2064 172 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 173 return _task_queues->queue(i);
ysr@777 174 }
ysr@777 175
ysr@777 176 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 177 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 178 }
ysr@777 179
ysr@777 180 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 181 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 182 }
stefank@2314 183
stefank@2314 184 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial