27 |
27 |
28 #include "gc_implementation/g1/concurrentMark.hpp" |
28 #include "gc_implementation/g1/concurrentMark.hpp" |
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" |
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" |
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
|
32 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
33 #include "gc_implementation/g1/heapRegionSet.inline.hpp" |
34 #include "gc_implementation/g1/heapRegionSet.inline.hpp" |
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
35 #include "utilities/taskqueue.hpp" |
36 #include "utilities/taskqueue.hpp" |
36 |
37 |
37 // Inline functions for G1CollectedHeap |
38 // Inline functions for G1CollectedHeap |
|
39 |
|
40 // Return the region with the given index. It assumes the index is valid. |
|
41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } |
38 |
42 |
39 template <class T> |
43 template <class T> |
40 inline HeapRegion* |
44 inline HeapRegion* |
41 G1CollectedHeap::heap_region_containing(const T addr) const { |
45 G1CollectedHeap::heap_region_containing(const T addr) const { |
42 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); |
46 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); |
149 |
157 |
150 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
158 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
151 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
159 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
152 } |
160 } |
153 |
161 |
|
162 |
|
163 // This is a fast test on whether a reference points into the |
|
164 // collection set or not. Assume that the reference |
|
165 // points into the heap. |
|
166 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { |
|
167 assert(_in_cset_fast_test != NULL, "sanity"); |
|
168 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj)); |
|
169 // no need to subtract the bottom of the heap from obj, |
|
170 // _in_cset_fast_test is biased |
|
171 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes; |
|
172 bool ret = _in_cset_fast_test[index]; |
|
173 // let's make sure the result is consistent with what the slower |
|
174 // test returns |
|
175 assert( ret || !obj_in_cs(obj), "sanity"); |
|
176 assert(!ret || obj_in_cs(obj), "sanity"); |
|
177 return ret; |
|
178 } |
|
179 |
154 #ifndef PRODUCT |
180 #ifndef PRODUCT |
155 // Support for G1EvacuationFailureALot |
181 // Support for G1EvacuationFailureALot |
156 |
182 |
157 inline bool |
183 inline bool |
158 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, |
184 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, |
222 _evacuation_failure_alot_for_current_gc = false; |
248 _evacuation_failure_alot_for_current_gc = false; |
223 } |
249 } |
224 } |
250 } |
225 #endif // #ifndef PRODUCT |
251 #endif // #ifndef PRODUCT |
226 |
252 |
|
253 inline bool G1CollectedHeap::is_in_young(const oop obj) { |
|
254 HeapRegion* hr = heap_region_containing(obj); |
|
255 return hr != NULL && hr->is_young(); |
|
256 } |
|
257 |
|
258 // We don't need barriers for initializing stores to objects |
|
259 // in the young gen: for the SATB pre-barrier, there is no |
|
260 // pre-value that needs to be remembered; for the remembered-set |
|
261 // update logging post-barrier, we don't maintain remembered set |
|
262 // information for young gen objects. |
|
263 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { |
|
264 return is_in_young(new_obj); |
|
265 } |
|
266 |
|
267 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { |
|
268 const HeapRegion* hr = heap_region_containing(obj); |
|
269 if (hr == NULL) { |
|
270 if (obj == NULL) return false; |
|
271 else return true; |
|
272 } |
|
273 else return is_obj_dead(obj, hr); |
|
274 } |
|
275 |
|
276 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { |
|
277 const HeapRegion* hr = heap_region_containing(obj); |
|
278 if (hr == NULL) { |
|
279 if (obj == NULL) return false; |
|
280 else return true; |
|
281 } |
|
282 else return is_obj_ill(obj, hr); |
|
283 } |
|
284 |
|
285 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { |
|
286 if (!from->is_survivor()) { |
|
287 _g1_rem->par_write_ref(from, p, tid); |
|
288 } |
|
289 } |
|
290 |
|
291 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { |
|
292 if (G1DeferredRSUpdate) { |
|
293 deferred_rs_update(from, p, tid); |
|
294 } else { |
|
295 immediate_rs_update(from, p, tid); |
|
296 } |
|
297 } |
|
298 |
|
299 |
|
300 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { |
|
301 assert(has_partial_array_mask(p), "invariant"); |
|
302 oop from_obj = clear_partial_array_mask(p); |
|
303 |
|
304 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); |
|
305 assert(from_obj->is_objArray(), "must be obj array"); |
|
306 objArrayOop from_obj_array = objArrayOop(from_obj); |
|
307 // The from-space object contains the real length. |
|
308 int length = from_obj_array->length(); |
|
309 |
|
310 assert(from_obj->is_forwarded(), "must be forwarded"); |
|
311 oop to_obj = from_obj->forwardee(); |
|
312 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); |
|
313 objArrayOop to_obj_array = objArrayOop(to_obj); |
|
314 // We keep track of the next start index in the length field of the |
|
315 // to-space object. |
|
316 int next_index = to_obj_array->length(); |
|
317 assert(0 <= next_index && next_index < length, |
|
318 err_msg("invariant, next index: %d, length: %d", next_index, length)); |
|
319 |
|
320 int start = next_index; |
|
321 int end = length; |
|
322 int remainder = end - start; |
|
323 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. |
|
324 if (remainder > 2 * ParGCArrayScanChunk) { |
|
325 end = start + ParGCArrayScanChunk; |
|
326 to_obj_array->set_length(end); |
|
327 // Push the remainder before we process the range in case another |
|
328 // worker has run out of things to do and can steal it. |
|
329 oop* from_obj_p = set_partial_array_mask(from_obj); |
|
330 push_on_queue(from_obj_p); |
|
331 } else { |
|
332 assert(length == end, "sanity"); |
|
333 // We'll process the final range for this object. Restore the length |
|
334 // so that the heap remains parsable in case of evacuation failure. |
|
335 to_obj_array->set_length(end); |
|
336 } |
|
337 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); |
|
338 // Process indexes [start,end). It will also process the header |
|
339 // along with the first chunk (i.e., the chunk with start == 0). |
|
340 // Note that at this point the length field of to_obj_array is not |
|
341 // correct given that we are using it to keep track of the next |
|
342 // start index. oop_iterate_range() (thankfully!) ignores the length |
|
343 // field and only relies on the start / end parameters. It does |
|
344 // however return the size of the object which will be incorrect. So |
|
345 // we have to ignore it even if we wanted to use it. |
|
346 to_obj_array->oop_iterate_range(&_scanner, start, end); |
|
347 } |
|
348 |
|
349 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { |
|
350 if (!has_partial_array_mask(ref_to_scan)) { |
|
351 // Note: we can use "raw" versions of "region_containing" because |
|
352 // "obj_to_scan" is definitely in the heap, and is not in a |
|
353 // humongous region. |
|
354 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); |
|
355 do_oop_evac(ref_to_scan, r); |
|
356 } else { |
|
357 do_oop_partial_array((oop*)ref_to_scan); |
|
358 } |
|
359 } |
|
360 |
|
361 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { |
|
362 assert(verify_task(ref), "sanity"); |
|
363 if (ref.is_narrow()) { |
|
364 deal_with_reference((narrowOop*)ref); |
|
365 } else { |
|
366 deal_with_reference((oop*)ref); |
|
367 } |
|
368 } |
|
369 |
227 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
370 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |