Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
28 #include "gc_implementation/g1/g1RemSet.hpp"
29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
30 #include "oops/oop.inline.hpp"
32 inline size_t G1RemSet::n_workers() {
33 if (_g1->workers() != NULL) {
34 return _g1->workers()->total_workers();
35 } else {
36 return 1;
37 }
38 }
40 template <class T>
41 inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
42 par_write_ref(from, p, 0);
43 }
45 template <class T>
46 inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
47 oop obj = oopDesc::load_decode_heap_oop(p);
48 #ifdef ASSERT
49 // can't do because of races
50 // assert(obj == NULL || obj->is_oop(), "expected an oop");
52 // Do the safe subset of is_oop
53 if (obj != NULL) {
54 #ifdef CHECK_UNHANDLED_OOPS
55 oopDesc* o = obj.obj();
56 #else
57 oopDesc* o = obj;
58 #endif // CHECK_UNHANDLED_OOPS
59 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
60 assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
61 }
62 #endif // ASSERT
64 assert(from == NULL || from->is_in_reserved(p), "p is not in from");
66 HeapRegion* to = _g1->heap_region_containing(obj);
67 if (to != NULL && from != to) {
68 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
69 to->rem_set()->add_reference(p, tid);
70 }
71 }
73 template <class T>
74 inline void UpdateRSOopClosure::do_oop_work(T* p) {
75 assert(_from != NULL, "from region must be non-NULL");
76 _rs->par_write_ref(_from, p, _worker_i);
77 }
79 template <class T>
80 inline void UpdateRSetImmediate::do_oop_work(T* p) {
81 assert(_from->is_in_reserved(p), "paranoia");
82 T heap_oop = oopDesc::load_heap_oop(p);
83 if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
84 _g1_rem_set->par_write_ref(_from, p, 0);
85 }
86 }
88 template <class T>
89 inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
90 oop obj = oopDesc::load_decode_heap_oop(p);
91 #ifdef ASSERT
92 // can't do because of races
93 // assert(obj == NULL || obj->is_oop(), "expected an oop");
95 // Do the safe subset of is_oop
96 if (obj != NULL) {
97 #ifdef CHECK_UNHANDLED_OOPS
98 oopDesc* o = obj.obj();
99 #else
100 oopDesc* o = obj;
101 #endif // CHECK_UNHANDLED_OOPS
102 assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
103 assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
104 }
105 #endif // ASSERT
107 assert(_from != NULL, "from region must be non-NULL");
109 HeapRegion* to = _g1->heap_region_containing(obj);
110 if (to != NULL && _from != to) {
111 // The _record_refs_into_cset flag is true during the RSet
112 // updating part of an evacuation pause. It is false at all
113 // other times:
114 // * rebuilding the rembered sets after a full GC
115 // * during concurrent refinement.
116 // * updating the remembered sets of regions in the collection
117 // set in the event of an evacuation failure (when deferred
118 // updates are enabled).
120 if (_record_refs_into_cset && to->in_collection_set()) {
121 // We are recording references that point into the collection
122 // set and this particular reference does exactly that...
123 // If the referenced object has already been forwarded
124 // to itself, we are handling an evacuation failure and
125 // we have already visited/tried to copy this object
126 // there is no need to retry.
127 if (!self_forwarded(obj)) {
128 assert(_push_ref_cl != NULL, "should not be null");
129 // Push the reference in the refs queue of the G1ParScanThreadState
130 // instance for this worker thread.
131 _push_ref_cl->do_oop(p);
132 }
134 // Deferred updates to the CSet are either discarded (in the normal case),
135 // or processed (if an evacuation failure occurs) at the end
136 // of the collection.
137 // See G1RemSet::cleanup_after_oops_into_collection_set_do().
138 } else {
139 // We either don't care about pushing references that point into the
140 // collection set (i.e. we're not during an evacuation pause) _or_
141 // the reference doesn't point into the collection set. Either way
142 // we add the reference directly to the RSet of the region containing
143 // the referenced object.
144 _g1_rem_set->par_write_ref(_from, p, _worker_i);
145 }
146 }
147 }
150 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP