1 /* |
1 /* |
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
83 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { |
83 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { |
84 _g1_rem_set->par_write_ref(_from, p, 0); |
84 _g1_rem_set->par_write_ref(_from, p, 0); |
85 } |
85 } |
86 } |
86 } |
87 |
87 |
88 template <class T> |
|
89 inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) { |
|
90 oop obj = oopDesc::load_decode_heap_oop(p); |
|
91 #ifdef ASSERT |
|
92 // can't do because of races |
|
93 // assert(obj == NULL || obj->is_oop(), "expected an oop"); |
|
94 |
|
95 // Do the safe subset of is_oop |
|
96 if (obj != NULL) { |
|
97 #ifdef CHECK_UNHANDLED_OOPS |
|
98 oopDesc* o = obj.obj(); |
|
99 #else |
|
100 oopDesc* o = obj; |
|
101 #endif // CHECK_UNHANDLED_OOPS |
|
102 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); |
|
103 assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); |
|
104 } |
|
105 #endif // ASSERT |
|
106 |
|
107 assert(_from != NULL, "from region must be non-NULL"); |
|
108 |
|
109 HeapRegion* to = _g1->heap_region_containing(obj); |
|
110 if (to != NULL && _from != to) { |
|
111 // The _record_refs_into_cset flag is true during the RSet |
|
112 // updating part of an evacuation pause. It is false at all |
|
113 // other times: |
|
114 // * rebuilding the rembered sets after a full GC |
|
115 // * during concurrent refinement. |
|
116 // * updating the remembered sets of regions in the collection |
|
117 // set in the event of an evacuation failure (when deferred |
|
118 // updates are enabled). |
|
119 |
|
120 if (_record_refs_into_cset && to->in_collection_set()) { |
|
121 // We are recording references that point into the collection |
|
122 // set and this particular reference does exactly that... |
|
123 // If the referenced object has already been forwarded |
|
124 // to itself, we are handling an evacuation failure and |
|
125 // we have already visited/tried to copy this object |
|
126 // there is no need to retry. |
|
127 if (!self_forwarded(obj)) { |
|
128 assert(_push_ref_cl != NULL, "should not be null"); |
|
129 // Push the reference in the refs queue of the G1ParScanThreadState |
|
130 // instance for this worker thread. |
|
131 _push_ref_cl->do_oop(p); |
|
132 } |
|
133 |
|
134 // Deferred updates to the CSet are either discarded (in the normal case), |
|
135 // or processed (if an evacuation failure occurs) at the end |
|
136 // of the collection. |
|
137 // See G1RemSet::cleanup_after_oops_into_collection_set_do(). |
|
138 } else { |
|
139 // We either don't care about pushing references that point into the |
|
140 // collection set (i.e. we're not during an evacuation pause) _or_ |
|
141 // the reference doesn't point into the collection set. Either way |
|
142 // we add the reference directly to the RSet of the region containing |
|
143 // the referenced object. |
|
144 _g1_rem_set->par_write_ref(_from, p, _worker_i); |
|
145 } |
|
146 } |
|
147 } |
|
148 |
|
149 |
|
150 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP |
88 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP |