Tue, 09 Sep 2014 00:05:25 +0200
8057658: Enable G1 FullGC extensions
Summary: Refactored the G1 FullGC code to enable it to be extended.
Reviewed-by: mgerdin, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "utilities/taskqueue.hpp"
38 // Inline functions for G1CollectedHeap
40 // Return the region with the given index. It assumes the index is valid.
41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
43 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
44 assert(is_in_reserved(addr),
45 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
46 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
47 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
48 }
50 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
51 return _hrm.reserved().start() + index * HeapRegion::GrainWords;
52 }
54 template <class T>
55 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
56 assert(addr != NULL, "invariant");
57 assert(is_in_g1_reserved((const void*) addr),
58 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
59 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
60 return _hrm.addr_to_region((HeapWord*) addr);
61 }
63 template <class T>
64 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
65 HeapRegion* hr = heap_region_containing_raw(addr);
66 if (hr->continuesHumongous()) {
67 return hr->humongous_start_region();
68 }
69 return hr;
70 }
72 inline void G1CollectedHeap::reset_gc_time_stamp() {
73 _gc_time_stamp = 0;
74 OrderAccess::fence();
75 // Clear the cached CSet starting regions and time stamps.
76 // Their validity is dependent on the GC timestamp.
77 clear_cset_start_regions();
78 }
80 inline void G1CollectedHeap::increment_gc_time_stamp() {
81 ++_gc_time_stamp;
82 OrderAccess::fence();
83 }
85 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
86 _old_set.remove(hr);
87 }
89 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
90 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
91 return r != NULL && r->in_collection_set();
92 }
94 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
95 unsigned int* gc_count_before_ret,
96 int* gclocker_retry_count_ret) {
97 assert_heap_not_locked_and_not_at_safepoint();
98 assert(!isHumongous(word_size), "attempt_allocation() should not "
99 "be called for humongous allocation requests");
101 AllocationContext_t context = AllocationContext::current();
102 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
103 false /* bot_updates */);
104 if (result == NULL) {
105 result = attempt_allocation_slow(word_size,
106 context,
107 gc_count_before_ret,
108 gclocker_retry_count_ret);
109 }
110 assert_heap_not_locked();
111 if (result != NULL) {
112 dirty_young_block(result, word_size);
113 }
114 return result;
115 }
117 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
118 AllocationContext_t context) {
119 assert(!isHumongous(word_size),
120 "we should not be seeing humongous-size allocations in this path");
122 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
123 false /* bot_updates */);
124 if (result == NULL) {
125 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
126 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
127 false /* bot_updates */);
128 }
129 if (result != NULL) {
130 dirty_young_block(result, word_size);
131 }
132 return result;
133 }
135 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
136 AllocationContext_t context) {
137 assert(!isHumongous(word_size),
138 "we should not be seeing humongous-size allocations in this path");
140 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
141 true /* bot_updates */);
142 if (result == NULL) {
143 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
144 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
145 true /* bot_updates */);
146 }
147 return result;
148 }
150 // It dirties the cards that cover the block so that so that the post
151 // write barrier never queues anything when updating objects on this
152 // block. It is assumed (and in fact we assert) that the block
153 // belongs to a young region.
154 inline void
155 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
156 assert_heap_not_locked();
158 // Assign the containing region to containing_hr so that we don't
159 // have to keep calling heap_region_containing_raw() in the
160 // asserts below.
161 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
162 assert(word_size > 0, "pre-condition");
163 assert(containing_hr->is_in(start), "it should contain start");
164 assert(containing_hr->is_young(), "it should be young");
165 assert(!containing_hr->isHumongous(), "it should not be humongous");
167 HeapWord* end = start + word_size;
168 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
170 MemRegion mr(start, end);
171 g1_barrier_set()->g1_mark_as_young(mr);
172 }
174 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
175 return _task_queues->queue(i);
176 }
178 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
179 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
180 }
182 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
183 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
184 }
186 // This is a fast test on whether a reference points into the
187 // collection set or not. Assume that the reference
188 // points into the heap.
189 inline bool G1CollectedHeap::is_in_cset(oop obj) {
190 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
191 // let's make sure the result is consistent with what the slower
192 // test returns
193 assert( ret || !obj_in_cs(obj), "sanity");
194 assert(!ret || obj_in_cs(obj), "sanity");
195 return ret;
196 }
198 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
199 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
200 }
202 G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
203 return _in_cset_fast_test.at((HeapWord*)obj);
204 }
206 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
207 _in_cset_fast_test.set_humongous(index);
208 }
210 #ifndef PRODUCT
211 // Support for G1EvacuationFailureALot
213 inline bool
214 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
215 bool during_initial_mark,
216 bool during_marking) {
217 bool res = false;
218 if (during_marking) {
219 res |= G1EvacuationFailureALotDuringConcMark;
220 }
221 if (during_initial_mark) {
222 res |= G1EvacuationFailureALotDuringInitialMark;
223 }
224 if (gcs_are_young) {
225 res |= G1EvacuationFailureALotDuringYoungGC;
226 } else {
227 // GCs are mixed
228 res |= G1EvacuationFailureALotDuringMixedGC;
229 }
230 return res;
231 }
233 inline void
234 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
235 if (G1EvacuationFailureALot) {
236 // Note we can't assert that _evacuation_failure_alot_for_current_gc
237 // is clear here. It may have been set during a previous GC but that GC
238 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
239 // trigger an evacuation failure and clear the flags and and counts.
241 // Check if we have gone over the interval.
242 const size_t gc_num = total_collections();
243 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
245 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
247 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
248 const bool gcs_are_young = g1_policy()->gcs_are_young();
249 const bool during_im = g1_policy()->during_initial_mark_pause();
250 const bool during_marking = mark_in_progress();
252 _evacuation_failure_alot_for_current_gc &=
253 evacuation_failure_alot_for_gc_type(gcs_are_young,
254 during_im,
255 during_marking);
256 }
257 }
259 inline bool G1CollectedHeap::evacuation_should_fail() {
260 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
261 return false;
262 }
263 // G1EvacuationFailureALot is in effect for current GC
264 // Access to _evacuation_failure_alot_count is not atomic;
265 // the value does not have to be exact.
266 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
267 return false;
268 }
269 _evacuation_failure_alot_count = 0;
270 return true;
271 }
273 inline void G1CollectedHeap::reset_evacuation_should_fail() {
274 if (G1EvacuationFailureALot) {
275 _evacuation_failure_alot_gc_number = total_collections();
276 _evacuation_failure_alot_count = 0;
277 _evacuation_failure_alot_for_current_gc = false;
278 }
279 }
280 #endif // #ifndef PRODUCT
282 inline bool G1CollectedHeap::is_in_young(const oop obj) {
283 if (obj == NULL) {
284 return false;
285 }
286 return heap_region_containing(obj)->is_young();
287 }
289 // We don't need barriers for initializing stores to objects
290 // in the young gen: for the SATB pre-barrier, there is no
291 // pre-value that needs to be remembered; for the remembered-set
292 // update logging post-barrier, we don't maintain remembered set
293 // information for young gen objects.
294 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
295 return is_in_young(new_obj);
296 }
298 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
299 if (obj == NULL) {
300 return false;
301 }
302 return is_obj_dead(obj, heap_region_containing(obj));
303 }
305 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
306 if (obj == NULL) {
307 return false;
308 }
309 return is_obj_ill(obj, heap_region_containing(obj));
310 }
312 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
313 uint region = addr_to_region((HeapWord*)obj);
314 // We not only set the "live" flag in the humongous_is_live table, but also
315 // reset the entry in the _in_cset_fast_test table so that subsequent references
316 // to the same humongous object do not go into the slow path again.
317 // This is racy, as multiple threads may at the same time enter here, but this
318 // is benign.
319 // During collection we only ever set the "live" flag, and only ever clear the
320 // entry in the in_cset_fast_table.
321 // We only ever evaluate the contents of these tables (in the VM thread) after
322 // having synchronized the worker threads with the VM thread, or in the same
323 // thread (i.e. within the VM thread).
324 if (!_humongous_is_live.is_live(region)) {
325 _humongous_is_live.set_live(region);
326 _in_cset_fast_test.clear_humongous(region);
327 }
328 }
330 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP