Tue, 16 Sep 2014 14:27:40 +0200
8057768: Make heap region region type in G1 HeapRegion explicit
Reviewed-by: brutisso, tschatzl
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "utilities/taskqueue.hpp"
38 // Inline functions for G1CollectedHeap
40 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
41 return _allocation_context_stats;
42 }
44 // Return the region with the given index. It assumes the index is valid.
45 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
47 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
48 assert(is_in_reserved(addr),
49 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
50 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
51 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
52 }
54 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
55 return _hrm.reserved().start() + index * HeapRegion::GrainWords;
56 }
58 template <class T>
59 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
60 assert(addr != NULL, "invariant");
61 assert(is_in_g1_reserved((const void*) addr),
62 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
63 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
64 return _hrm.addr_to_region((HeapWord*) addr);
65 }
67 template <class T>
68 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
69 HeapRegion* hr = heap_region_containing_raw(addr);
70 if (hr->continuesHumongous()) {
71 return hr->humongous_start_region();
72 }
73 return hr;
74 }
76 inline void G1CollectedHeap::reset_gc_time_stamp() {
77 _gc_time_stamp = 0;
78 OrderAccess::fence();
79 // Clear the cached CSet starting regions and time stamps.
80 // Their validity is dependent on the GC timestamp.
81 clear_cset_start_regions();
82 }
84 inline void G1CollectedHeap::increment_gc_time_stamp() {
85 ++_gc_time_stamp;
86 OrderAccess::fence();
87 }
89 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
90 _old_set.remove(hr);
91 }
93 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
94 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
95 return r != NULL && r->in_collection_set();
96 }
98 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
99 unsigned int* gc_count_before_ret,
100 int* gclocker_retry_count_ret) {
101 assert_heap_not_locked_and_not_at_safepoint();
102 assert(!isHumongous(word_size), "attempt_allocation() should not "
103 "be called for humongous allocation requests");
105 AllocationContext_t context = AllocationContext::current();
106 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
107 false /* bot_updates */);
108 if (result == NULL) {
109 result = attempt_allocation_slow(word_size,
110 context,
111 gc_count_before_ret,
112 gclocker_retry_count_ret);
113 }
114 assert_heap_not_locked();
115 if (result != NULL) {
116 dirty_young_block(result, word_size);
117 }
118 return result;
119 }
121 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
122 AllocationContext_t context) {
123 assert(!isHumongous(word_size),
124 "we should not be seeing humongous-size allocations in this path");
126 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
127 false /* bot_updates */);
128 if (result == NULL) {
129 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
130 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
131 false /* bot_updates */);
132 }
133 if (result != NULL) {
134 dirty_young_block(result, word_size);
135 }
136 return result;
137 }
139 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
140 AllocationContext_t context) {
141 assert(!isHumongous(word_size),
142 "we should not be seeing humongous-size allocations in this path");
144 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
145 true /* bot_updates */);
146 if (result == NULL) {
147 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
148 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
149 true /* bot_updates */);
150 }
151 return result;
152 }
154 // It dirties the cards that cover the block so that so that the post
155 // write barrier never queues anything when updating objects on this
156 // block. It is assumed (and in fact we assert) that the block
157 // belongs to a young region.
158 inline void
159 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
160 assert_heap_not_locked();
162 // Assign the containing region to containing_hr so that we don't
163 // have to keep calling heap_region_containing_raw() in the
164 // asserts below.
165 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
166 assert(word_size > 0, "pre-condition");
167 assert(containing_hr->is_in(start), "it should contain start");
168 assert(containing_hr->is_young(), "it should be young");
169 assert(!containing_hr->isHumongous(), "it should not be humongous");
171 HeapWord* end = start + word_size;
172 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
174 MemRegion mr(start, end);
175 g1_barrier_set()->g1_mark_as_young(mr);
176 }
178 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
179 return _task_queues->queue(i);
180 }
182 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
183 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
184 }
186 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
187 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
188 }
190 // This is a fast test on whether a reference points into the
191 // collection set or not. Assume that the reference
192 // points into the heap.
193 inline bool G1CollectedHeap::is_in_cset(oop obj) {
194 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
195 // let's make sure the result is consistent with what the slower
196 // test returns
197 assert( ret || !obj_in_cs(obj), "sanity");
198 assert(!ret || obj_in_cs(obj), "sanity");
199 return ret;
200 }
202 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
203 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
204 }
206 G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
207 return _in_cset_fast_test.at((HeapWord*)obj);
208 }
210 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
211 _in_cset_fast_test.set_humongous(index);
212 }
214 #ifndef PRODUCT
215 // Support for G1EvacuationFailureALot
217 inline bool
218 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
219 bool during_initial_mark,
220 bool during_marking) {
221 bool res = false;
222 if (during_marking) {
223 res |= G1EvacuationFailureALotDuringConcMark;
224 }
225 if (during_initial_mark) {
226 res |= G1EvacuationFailureALotDuringInitialMark;
227 }
228 if (gcs_are_young) {
229 res |= G1EvacuationFailureALotDuringYoungGC;
230 } else {
231 // GCs are mixed
232 res |= G1EvacuationFailureALotDuringMixedGC;
233 }
234 return res;
235 }
237 inline void
238 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
239 if (G1EvacuationFailureALot) {
240 // Note we can't assert that _evacuation_failure_alot_for_current_gc
241 // is clear here. It may have been set during a previous GC but that GC
242 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
243 // trigger an evacuation failure and clear the flags and and counts.
245 // Check if we have gone over the interval.
246 const size_t gc_num = total_collections();
247 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
249 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
251 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
252 const bool gcs_are_young = g1_policy()->gcs_are_young();
253 const bool during_im = g1_policy()->during_initial_mark_pause();
254 const bool during_marking = mark_in_progress();
256 _evacuation_failure_alot_for_current_gc &=
257 evacuation_failure_alot_for_gc_type(gcs_are_young,
258 during_im,
259 during_marking);
260 }
261 }
263 inline bool G1CollectedHeap::evacuation_should_fail() {
264 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
265 return false;
266 }
267 // G1EvacuationFailureALot is in effect for current GC
268 // Access to _evacuation_failure_alot_count is not atomic;
269 // the value does not have to be exact.
270 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
271 return false;
272 }
273 _evacuation_failure_alot_count = 0;
274 return true;
275 }
277 inline void G1CollectedHeap::reset_evacuation_should_fail() {
278 if (G1EvacuationFailureALot) {
279 _evacuation_failure_alot_gc_number = total_collections();
280 _evacuation_failure_alot_count = 0;
281 _evacuation_failure_alot_for_current_gc = false;
282 }
283 }
284 #endif // #ifndef PRODUCT
286 inline bool G1CollectedHeap::is_in_young(const oop obj) {
287 if (obj == NULL) {
288 return false;
289 }
290 return heap_region_containing(obj)->is_young();
291 }
293 // We don't need barriers for initializing stores to objects
294 // in the young gen: for the SATB pre-barrier, there is no
295 // pre-value that needs to be remembered; for the remembered-set
296 // update logging post-barrier, we don't maintain remembered set
297 // information for young gen objects.
298 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
299 return is_in_young(new_obj);
300 }
302 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
303 if (obj == NULL) {
304 return false;
305 }
306 return is_obj_dead(obj, heap_region_containing(obj));
307 }
309 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
310 if (obj == NULL) {
311 return false;
312 }
313 return is_obj_ill(obj, heap_region_containing(obj));
314 }
316 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
317 uint region = addr_to_region((HeapWord*)obj);
318 // We not only set the "live" flag in the humongous_is_live table, but also
319 // reset the entry in the _in_cset_fast_test table so that subsequent references
320 // to the same humongous object do not go into the slow path again.
321 // This is racy, as multiple threads may at the same time enter here, but this
322 // is benign.
323 // During collection we only ever set the "live" flag, and only ever clear the
324 // entry in the in_cset_fast_table.
325 // We only ever evaluate the contents of these tables (in the VM thread) after
326 // having synchronized the worker threads with the VM thread, or in the same
327 // thread (i.e. within the VM thread).
328 if (!_humongous_is_live.is_live(region)) {
329 _humongous_is_live.set_live(region);
330 _in_cset_fast_test.clear_humongous(region);
331 }
332 }
334 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP