140 _cm->grayRoot(obj, obj->size(), _worker_id, hr); |
140 _cm->grayRoot(obj, obj->size(), _worker_id, hr); |
141 } |
141 } |
142 } |
142 } |
143 } |
143 } |
144 |
144 |
|
145 template <class T> |
|
146 inline void G1Mux2Closure::do_oop_nv(T* p) { |
|
147 // Apply first closure; then apply the second. |
|
148 _c1->do_oop(p); |
|
149 _c2->do_oop(p); |
|
150 } |
|
151 |
|
152 template <class T> |
|
153 inline void G1TriggerClosure::do_oop_nv(T* p) { |
|
154 // Record that this closure was actually applied (triggered). |
|
155 _triggered = true; |
|
156 } |
|
157 |
|
158 template <class T> |
|
159 inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) { |
|
160 if (!_trigger_cl->triggered()) { |
|
161 _oop_cl->do_oop(p); |
|
162 } |
|
163 } |
|
164 |
|
165 template <class T> |
|
166 inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) { |
|
167 oop obj = oopDesc::load_decode_heap_oop(p); |
|
168 #ifdef ASSERT |
|
169 // can't do because of races |
|
170 // assert(obj == NULL || obj->is_oop(), "expected an oop"); |
|
171 |
|
172 // Do the safe subset of is_oop |
|
173 if (obj != NULL) { |
|
174 #ifdef CHECK_UNHANDLED_OOPS |
|
175 oopDesc* o = obj.obj(); |
|
176 #else |
|
177 oopDesc* o = obj; |
|
178 #endif // CHECK_UNHANDLED_OOPS |
|
179 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); |
|
180 assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); |
|
181 } |
|
182 #endif // ASSERT |
|
183 |
|
184 assert(_from != NULL, "from region must be non-NULL"); |
|
185 |
|
186 HeapRegion* to = _g1->heap_region_containing(obj); |
|
187 if (to != NULL && _from != to) { |
|
188 // The _record_refs_into_cset flag is true during the RSet |
|
189 // updating part of an evacuation pause. It is false at all |
|
190 // other times: |
|
191 // * rebuilding the rembered sets after a full GC |
|
192 // * during concurrent refinement. |
|
193 // * updating the remembered sets of regions in the collection |
|
194 // set in the event of an evacuation failure (when deferred |
|
195 // updates are enabled). |
|
196 |
|
197 if (_record_refs_into_cset && to->in_collection_set()) { |
|
198 // We are recording references that point into the collection |
|
199 // set and this particular reference does exactly that... |
|
200 // If the referenced object has already been forwarded |
|
201 // to itself, we are handling an evacuation failure and |
|
202 // we have already visited/tried to copy this object |
|
203 // there is no need to retry. |
|
204 if (!self_forwarded(obj)) { |
|
205 assert(_push_ref_cl != NULL, "should not be null"); |
|
206 // Push the reference in the refs queue of the G1ParScanThreadState |
|
207 // instance for this worker thread. |
|
208 _push_ref_cl->do_oop(p); |
|
209 } |
|
210 |
|
211 // Deferred updates to the CSet are either discarded (in the normal case), |
|
212 // or processed (if an evacuation failure occurs) at the end |
|
213 // of the collection. |
|
214 // See G1RemSet::cleanup_after_oops_into_collection_set_do(). |
|
215 } else { |
|
216 // We either don't care about pushing references that point into the |
|
217 // collection set (i.e. we're not during an evacuation pause) _or_ |
|
218 // the reference doesn't point into the collection set. Either way |
|
219 // we add the reference directly to the RSet of the region containing |
|
220 // the referenced object. |
|
221 _g1_rem_set->par_write_ref(_from, p, _worker_i); |
|
222 } |
|
223 } |
|
224 } |
|
225 |
145 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP |
226 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP |