1.1 --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Mon Jul 21 09:41:04 2014 +0200 1.2 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Mon Jul 21 09:41:06 2014 +0200 1.3 @@ -43,6 +43,30 @@ 1.4 } 1.5 } 1.6 1.7 +template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) { 1.8 + assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), 1.9 + "Reference should not be NULL here as such are never pushed to the task queue."); 1.10 + oop obj = oopDesc::load_decode_heap_oop_not_null(p); 1.11 + 1.12 + // Although we never intentionally push references outside of the collection 1.13 + // set, due to (benign) races in the claim mechanism during RSet scanning more 1.14 + // than one thread might claim the same card. So the same card may be 1.15 + // processed multiple times. So redo this check. 1.16 + if (_g1h->in_cset_fast_test(obj)) { 1.17 + oop forwardee; 1.18 + if (obj->is_forwarded()) { 1.19 + forwardee = obj->forwardee(); 1.20 + } else { 1.21 + forwardee = copy_to_survivor_space(obj); 1.22 + } 1.23 + assert(forwardee != NULL, "forwardee should not be NULL"); 1.24 + oopDesc::encode_store_heap_oop(p, forwardee); 1.25 + } 1.26 + 1.27 + assert(obj != NULL, "Must be"); 1.28 + update_rs(from, p, queue_num()); 1.29 +} 1.30 + 1.31 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { 1.32 assert(has_partial_array_mask(p), "invariant"); 1.33 oop from_obj = clear_partial_array_mask(p); 1.34 @@ -104,7 +128,7 @@ 1.35 } 1.36 } 1.37 1.38 -inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { 1.39 +inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { 1.40 assert(verify_task(ref), "sanity"); 1.41 if (ref.is_narrow()) { 1.42 deal_with_reference((narrowOop*)ref); 1.43 @@ -113,5 +137,18 @@ 1.44 } 1.45 } 1.46 1.47 +void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { 1.48 + StarTask stolen_task; 1.49 + while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) { 1.50 + assert(verify_task(stolen_task), "sanity"); 1.51 + dispatch_reference(stolen_task); 1.52 + 1.53 + // We've just processed a reference and we might have made 1.54 + // available new entries on the queues. So we have to make sure 1.55 + // we drain the queues as necessary. 1.56 + trim_queue(); 1.57 + } 1.58 +} 1.59 + 1.60 #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */ 1.61