Mon, 21 Jul 2014 09:40:19 +0200
8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1RemSet.inline.hpp"
33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "utilities/taskqueue.hpp"
39 // Inline functions for G1CollectedHeap
41 // Return the region with the given index. It assumes the index is valid.
42 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
44 template <class T>
45 inline HeapRegion*
46 G1CollectedHeap::heap_region_containing(const T addr) const {
47 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
48 // hr can be null if addr in perm_gen
49 if (hr != NULL && hr->continuesHumongous()) {
50 hr = hr->humongous_start_region();
51 }
52 return hr;
53 }
55 template <class T>
56 inline HeapRegion*
57 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
58 assert(_g1_reserved.contains((const void*) addr), "invariant");
59 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
60 return res;
61 }
63 inline void G1CollectedHeap::reset_gc_time_stamp() {
64 _gc_time_stamp = 0;
65 OrderAccess::fence();
66 // Clear the cached CSet starting regions and time stamps.
67 // Their validity is dependent on the GC timestamp.
68 clear_cset_start_regions();
69 }
71 inline void G1CollectedHeap::increment_gc_time_stamp() {
72 ++_gc_time_stamp;
73 OrderAccess::fence();
74 }
76 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
77 _old_set.remove(hr);
78 }
80 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
81 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
82 return r != NULL && r->in_collection_set();
83 }
85 inline HeapWord*
86 G1CollectedHeap::attempt_allocation(size_t word_size,
87 unsigned int* gc_count_before_ret,
88 int* gclocker_retry_count_ret) {
89 assert_heap_not_locked_and_not_at_safepoint();
90 assert(!isHumongous(word_size), "attempt_allocation() should not "
91 "be called for humongous allocation requests");
93 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
94 false /* bot_updates */);
95 if (result == NULL) {
96 result = attempt_allocation_slow(word_size,
97 gc_count_before_ret,
98 gclocker_retry_count_ret);
99 }
100 assert_heap_not_locked();
101 if (result != NULL) {
102 dirty_young_block(result, word_size);
103 }
104 return result;
105 }
107 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
108 word_size) {
109 assert(!isHumongous(word_size),
110 "we should not be seeing humongous-size allocations in this path");
112 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
113 false /* bot_updates */);
114 if (result == NULL) {
115 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
116 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
117 false /* bot_updates */);
118 }
119 if (result != NULL) {
120 dirty_young_block(result, word_size);
121 }
122 return result;
123 }
125 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
126 assert(!isHumongous(word_size),
127 "we should not be seeing humongous-size allocations in this path");
129 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
130 true /* bot_updates */);
131 if (result == NULL) {
132 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
133 result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
134 true /* bot_updates */);
135 }
136 return result;
137 }
139 // It dirties the cards that cover the block so that so that the post
140 // write barrier never queues anything when updating objects on this
141 // block. It is assumed (and in fact we assert) that the block
142 // belongs to a young region.
143 inline void
144 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
145 assert_heap_not_locked();
147 // Assign the containing region to containing_hr so that we don't
148 // have to keep calling heap_region_containing_raw() in the
149 // asserts below.
150 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
151 assert(containing_hr != NULL && start != NULL && word_size > 0,
152 "pre-condition");
153 assert(containing_hr->is_in(start), "it should contain start");
154 assert(containing_hr->is_young(), "it should be young");
155 assert(!containing_hr->isHumongous(), "it should not be humongous");
157 HeapWord* end = start + word_size;
158 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
160 MemRegion mr(start, end);
161 g1_barrier_set()->g1_mark_as_young(mr);
162 }
164 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
165 return _task_queues->queue(i);
166 }
168 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
169 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
170 }
172 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
173 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
174 }
177 // This is a fast test on whether a reference points into the
178 // collection set or not. Assume that the reference
179 // points into the heap.
180 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
181 bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj);
182 // let's make sure the result is consistent with what the slower
183 // test returns
184 assert( ret || !obj_in_cs(obj), "sanity");
185 assert(!ret || obj_in_cs(obj), "sanity");
186 return ret;
187 }
189 #ifndef PRODUCT
190 // Support for G1EvacuationFailureALot
192 inline bool
193 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
194 bool during_initial_mark,
195 bool during_marking) {
196 bool res = false;
197 if (during_marking) {
198 res |= G1EvacuationFailureALotDuringConcMark;
199 }
200 if (during_initial_mark) {
201 res |= G1EvacuationFailureALotDuringInitialMark;
202 }
203 if (gcs_are_young) {
204 res |= G1EvacuationFailureALotDuringYoungGC;
205 } else {
206 // GCs are mixed
207 res |= G1EvacuationFailureALotDuringMixedGC;
208 }
209 return res;
210 }
212 inline void
213 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
214 if (G1EvacuationFailureALot) {
215 // Note we can't assert that _evacuation_failure_alot_for_current_gc
216 // is clear here. It may have been set during a previous GC but that GC
217 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
218 // trigger an evacuation failure and clear the flags and and counts.
220 // Check if we have gone over the interval.
221 const size_t gc_num = total_collections();
222 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
224 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
226 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
227 const bool gcs_are_young = g1_policy()->gcs_are_young();
228 const bool during_im = g1_policy()->during_initial_mark_pause();
229 const bool during_marking = mark_in_progress();
231 _evacuation_failure_alot_for_current_gc &=
232 evacuation_failure_alot_for_gc_type(gcs_are_young,
233 during_im,
234 during_marking);
235 }
236 }
238 inline bool
239 G1CollectedHeap::evacuation_should_fail() {
240 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
241 return false;
242 }
243 // G1EvacuationFailureALot is in effect for current GC
244 // Access to _evacuation_failure_alot_count is not atomic;
245 // the value does not have to be exact.
246 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
247 return false;
248 }
249 _evacuation_failure_alot_count = 0;
250 return true;
251 }
253 inline void G1CollectedHeap::reset_evacuation_should_fail() {
254 if (G1EvacuationFailureALot) {
255 _evacuation_failure_alot_gc_number = total_collections();
256 _evacuation_failure_alot_count = 0;
257 _evacuation_failure_alot_for_current_gc = false;
258 }
259 }
260 #endif // #ifndef PRODUCT
262 inline bool G1CollectedHeap::is_in_young(const oop obj) {
263 HeapRegion* hr = heap_region_containing(obj);
264 return hr != NULL && hr->is_young();
265 }
267 // We don't need barriers for initializing stores to objects
268 // in the young gen: for the SATB pre-barrier, there is no
269 // pre-value that needs to be remembered; for the remembered-set
270 // update logging post-barrier, we don't maintain remembered set
271 // information for young gen objects.
272 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
273 return is_in_young(new_obj);
274 }
276 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
277 const HeapRegion* hr = heap_region_containing(obj);
278 if (hr == NULL) {
279 if (obj == NULL) return false;
280 else return true;
281 }
282 else return is_obj_dead(obj, hr);
283 }
285 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
286 const HeapRegion* hr = heap_region_containing(obj);
287 if (hr == NULL) {
288 if (obj == NULL) return false;
289 else return true;
290 }
291 else return is_obj_ill(obj, hr);
292 }
294 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
295 if (!from->is_survivor()) {
296 _g1_rem->par_write_ref(from, p, tid);
297 }
298 }
300 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
301 if (G1DeferredRSUpdate) {
302 deferred_rs_update(from, p, tid);
303 } else {
304 immediate_rs_update(from, p, tid);
305 }
306 }
309 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
310 assert(has_partial_array_mask(p), "invariant");
311 oop from_obj = clear_partial_array_mask(p);
313 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
314 assert(from_obj->is_objArray(), "must be obj array");
315 objArrayOop from_obj_array = objArrayOop(from_obj);
316 // The from-space object contains the real length.
317 int length = from_obj_array->length();
319 assert(from_obj->is_forwarded(), "must be forwarded");
320 oop to_obj = from_obj->forwardee();
321 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
322 objArrayOop to_obj_array = objArrayOop(to_obj);
323 // We keep track of the next start index in the length field of the
324 // to-space object.
325 int next_index = to_obj_array->length();
326 assert(0 <= next_index && next_index < length,
327 err_msg("invariant, next index: %d, length: %d", next_index, length));
329 int start = next_index;
330 int end = length;
331 int remainder = end - start;
332 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
333 if (remainder > 2 * ParGCArrayScanChunk) {
334 end = start + ParGCArrayScanChunk;
335 to_obj_array->set_length(end);
336 // Push the remainder before we process the range in case another
337 // worker has run out of things to do and can steal it.
338 oop* from_obj_p = set_partial_array_mask(from_obj);
339 push_on_queue(from_obj_p);
340 } else {
341 assert(length == end, "sanity");
342 // We'll process the final range for this object. Restore the length
343 // so that the heap remains parsable in case of evacuation failure.
344 to_obj_array->set_length(end);
345 }
346 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
347 // Process indexes [start,end). It will also process the header
348 // along with the first chunk (i.e., the chunk with start == 0).
349 // Note that at this point the length field of to_obj_array is not
350 // correct given that we are using it to keep track of the next
351 // start index. oop_iterate_range() (thankfully!) ignores the length
352 // field and only relies on the start / end parameters. It does
353 // however return the size of the object which will be incorrect. So
354 // we have to ignore it even if we wanted to use it.
355 to_obj_array->oop_iterate_range(&_scanner, start, end);
356 }
358 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
359 if (!has_partial_array_mask(ref_to_scan)) {
360 // Note: we can use "raw" versions of "region_containing" because
361 // "obj_to_scan" is definitely in the heap, and is not in a
362 // humongous region.
363 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
364 do_oop_evac(ref_to_scan, r);
365 } else {
366 do_oop_partial_array((oop*)ref_to_scan);
367 }
368 }
370 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
371 assert(verify_task(ref), "sanity");
372 if (ref.is_narrow()) {
373 deal_with_reference((narrowOop*)ref);
374 } else {
375 deal_with_reference((oop*)ref);
376 }
377 }
379 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP