Tue, 01 Mar 2011 14:56:48 -0800
6627983: G1: Bad oop deference during marking
Summary: Bulk zeroing reduction didn't work with G1, because arraycopy would call pre-barriers on uninitialized oops. The solution is to have version of arraycopy stubs that don't have pre-barriers. Also refactored arraycopy stubs generation on SPARC to be more readable and reduced the number of stubs necessary in some cases.
Reviewed-by: jrose, kvn, never
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "utilities/taskqueue.hpp"
34 // Inline functions for G1CollectedHeap
36 inline HeapRegion*
37 G1CollectedHeap::heap_region_containing(const void* addr) const {
38 HeapRegion* hr = _hrs->addr_to_region(addr);
39 // hr can be null if addr in perm_gen
40 if (hr != NULL && hr->continuesHumongous()) {
41 hr = hr->humongous_start_region();
42 }
43 return hr;
44 }
46 inline HeapRegion*
47 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
48 assert(_g1_reserved.contains(addr), "invariant");
49 size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
50 >> HeapRegion::LogOfHRGrainBytes;
52 HeapRegion* res = _hrs->at(index);
53 assert(res == _hrs->addr_to_region(addr), "sanity");
54 return res;
55 }
57 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
58 HeapRegion* r = _hrs->addr_to_region(obj);
59 return r != NULL && r->in_collection_set();
60 }
62 // See the comment in the .hpp file about the locking protocol and
63 // assumptions of this method (and other related ones).
64 inline HeapWord*
65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
66 size_t word_size,
67 bool with_heap_lock) {
68 assert_not_at_safepoint();
69 assert(with_heap_lock == Heap_lock->owned_by_self(),
70 "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
71 assert(cur_alloc_region != NULL, "pre-condition of the method");
72 assert(cur_alloc_region->is_young(),
73 "we only support young current alloc regions");
74 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
75 "should not be used for humongous allocations");
76 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
78 assert(!cur_alloc_region->is_empty(),
79 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
80 cur_alloc_region->bottom(), cur_alloc_region->end()));
81 HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
82 if (result != NULL) {
83 assert(is_in(result), "result should be in the heap");
85 if (with_heap_lock) {
86 Heap_lock->unlock();
87 }
88 assert_heap_not_locked();
89 // Do the dirtying after we release the Heap_lock.
90 dirty_young_block(result, word_size);
91 return result;
92 }
94 if (with_heap_lock) {
95 assert_heap_locked();
96 } else {
97 assert_heap_not_locked();
98 }
99 return NULL;
100 }
102 // See the comment in the .hpp file about the locking protocol and
103 // assumptions of this method (and other related ones).
104 inline HeapWord*
105 G1CollectedHeap::attempt_allocation(size_t word_size) {
106 assert_heap_not_locked_and_not_at_safepoint();
107 assert(!isHumongous(word_size), "attempt_allocation() should not be called "
108 "for humongous allocation requests");
110 HeapRegion* cur_alloc_region = _cur_alloc_region;
111 if (cur_alloc_region != NULL) {
112 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
113 word_size,
114 false /* with_heap_lock */);
115 assert_heap_not_locked();
116 if (result != NULL) {
117 return result;
118 }
119 }
121 // Our attempt to allocate lock-free failed as the current
122 // allocation region is either NULL or full. So, we'll now take the
123 // Heap_lock and retry.
124 Heap_lock->lock();
126 HeapWord* result = attempt_allocation_locked(word_size);
127 if (result != NULL) {
128 assert_heap_not_locked();
129 return result;
130 }
132 assert_heap_locked();
133 return NULL;
134 }
136 inline void
137 G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
138 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
139 assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
140 "pre-condition of the call");
141 assert(cur_alloc_region->is_young(),
142 "we only support young current alloc regions");
144 // The region is guaranteed to be young
145 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
146 _summary_bytes_used += cur_alloc_region->used();
147 _cur_alloc_region = NULL;
148 }
150 inline HeapWord*
151 G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
152 assert_heap_locked_and_not_at_safepoint();
153 assert(!isHumongous(word_size), "attempt_allocation_locked() "
154 "should not be called for humongous allocation requests");
156 // First, reread the current alloc region and retry the allocation
157 // in case somebody replaced it while we were waiting to get the
158 // Heap_lock.
159 HeapRegion* cur_alloc_region = _cur_alloc_region;
160 if (cur_alloc_region != NULL) {
161 HeapWord* result = allocate_from_cur_alloc_region(
162 cur_alloc_region, word_size,
163 true /* with_heap_lock */);
164 if (result != NULL) {
165 assert_heap_not_locked();
166 return result;
167 }
169 // We failed to allocate out of the current alloc region, so let's
170 // retire it before getting a new one.
171 retire_cur_alloc_region(cur_alloc_region);
172 }
174 assert_heap_locked();
175 // Try to get a new region and allocate out of it
176 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
177 false, /* at_safepoint */
178 true, /* do_dirtying */
179 false /* can_expand */);
180 if (result != NULL) {
181 assert_heap_not_locked();
182 return result;
183 }
185 assert_heap_locked();
186 return NULL;
187 }
189 // It dirties the cards that cover the block so that so that the post
190 // write barrier never queues anything when updating objects on this
191 // block. It is assumed (and in fact we assert) that the block
192 // belongs to a young region.
193 inline void
194 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
195 assert_heap_not_locked();
197 // Assign the containing region to containing_hr so that we don't
198 // have to keep calling heap_region_containing_raw() in the
199 // asserts below.
200 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
201 assert(containing_hr != NULL && start != NULL && word_size > 0,
202 "pre-condition");
203 assert(containing_hr->is_in(start), "it should contain start");
204 assert(containing_hr->is_young(), "it should be young");
205 assert(!containing_hr->isHumongous(), "it should not be humongous");
207 HeapWord* end = start + word_size;
208 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
210 MemRegion mr(start, end);
211 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
212 }
214 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
215 return _task_queues->queue(i);
216 }
218 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
219 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
220 }
222 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
223 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
224 }
226 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP