Mon, 24 Mar 2014 15:30:14 +0100
8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by: mgerdin, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
28 class HeapRegion;
29 class G1CollectedHeap;
30 class G1RemSet;
31 class ConcurrentMark;
32 class DirtyCardToOopClosure;
33 class CMBitMap;
34 class CMMarkStack;
35 class G1ParScanThreadState;
36 class CMTask;
37 class ReferenceProcessor;
39 // A class that scans oops in a given heap region (much as OopsInGenClosure
40 // scans oops in a generation.)
41 class OopsInHeapRegionClosure: public ExtendedOopClosure {
42 protected:
43 HeapRegion* _from;
44 public:
45 void set_region(HeapRegion* from) { _from = from; }
46 };
48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
49 protected:
50 G1CollectedHeap* _g1;
51 G1ParScanThreadState* _par_scan_state;
52 uint _worker_id;
53 public:
54 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
55 bool apply_to_weak_ref_discovered_field() { return true; }
56 };
58 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
59 public:
60 G1ParPushHeapRSClosure(G1CollectedHeap* g1,
61 G1ParScanThreadState* par_scan_state):
62 G1ParClosureSuper(g1, par_scan_state) { }
64 template <class T> void do_oop_nv(T* p);
65 virtual void do_oop(oop* p) { do_oop_nv(p); }
66 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
67 };
69 class G1ParScanClosure : public G1ParClosureSuper {
70 public:
71 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
72 G1ParClosureSuper(g1, par_scan_state)
73 {
74 assert(_ref_processor == NULL, "sanity");
75 _ref_processor = rp;
76 }
78 template <class T> void do_oop_nv(T* p);
79 virtual void do_oop(oop* p) { do_oop_nv(p); }
80 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
81 };
83 #define G1_PARTIAL_ARRAY_MASK 0x2
85 inline bool has_partial_array_mask(oop* ref) {
86 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
87 }
89 // We never encode partial array oops as narrowOop*, so return false immediately.
90 // This allows the compiler to create optimized code when popping references from
91 // the work queue.
92 inline bool has_partial_array_mask(narrowOop* ref) {
93 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
94 return false;
95 }
97 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
98 // We always encode partial arrays as regular oop, to allow the
99 // specialization for has_partial_array_mask() for narrowOops above.
100 // This means that unintentional use of this method with narrowOops are caught
101 // by the compiler.
102 inline oop* set_partial_array_mask(oop obj) {
103 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
104 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
105 }
107 template <class T> inline oop clear_partial_array_mask(T* ref) {
108 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
109 }
111 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
112 G1ParScanClosure _scanner;
114 public:
115 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
116 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
117 {
118 assert(_ref_processor == NULL, "sanity");
119 }
121 G1ParScanClosure* scanner() {
122 return &_scanner;
123 }
125 template <class T> void do_oop_nv(T* p);
126 virtual void do_oop(oop* p) { do_oop_nv(p); }
127 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
128 };
130 // Add back base class for metadata
131 class G1ParCopyHelper : public G1ParClosureSuper {
132 protected:
133 Klass* _scanned_klass;
134 ConcurrentMark* _cm;
136 // Mark the object if it's not already marked. This is used to mark
137 // objects pointed to by roots that are guaranteed not to move
138 // during the GC (i.e., non-CSet objects). It is MT-safe.
139 void mark_object(oop obj);
141 // Mark the object if it's not already marked. This is used to mark
142 // objects pointed to by roots that have been forwarded during a
143 // GC. It is MT-safe.
144 void mark_forwarded_object(oop from_obj, oop to_obj);
145 public:
146 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
148 void set_scanned_klass(Klass* k) { _scanned_klass = k; }
149 template <class T> void do_klass_barrier(T* p, oop new_obj);
150 };
152 template <G1Barrier barrier, bool do_mark_object>
153 class G1ParCopyClosure : public G1ParCopyHelper {
154 private:
155 template <class T> void do_oop_work(T* p);
157 public:
158 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
159 ReferenceProcessor* rp) :
160 G1ParCopyHelper(g1, par_scan_state) {
161 assert(_ref_processor == NULL, "sanity");
162 }
164 template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
165 virtual void do_oop(oop* p) { do_oop_nv(p); }
166 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
167 };
169 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
170 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
173 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
174 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
176 // The following closure type is defined in g1_specialized_oop_closures.hpp:
177 //
178 // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
180 // We use a separate closure to handle references during evacuation
181 // failure processing.
182 // We could have used another instance of G1ParScanHeapEvacClosure
183 // (since that closure no longer assumes that the references it
184 // handles point into the collection set).
186 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
188 class FilterIntoCSClosure: public ExtendedOopClosure {
189 G1CollectedHeap* _g1;
190 OopClosure* _oc;
191 DirtyCardToOopClosure* _dcto_cl;
192 public:
193 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
194 G1CollectedHeap* g1,
195 OopClosure* oc) :
196 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
198 template <class T> void do_oop_nv(T* p);
199 virtual void do_oop(oop* p) { do_oop_nv(p); }
200 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
201 bool apply_to_weak_ref_discovered_field() { return true; }
202 };
204 class FilterOutOfRegionClosure: public ExtendedOopClosure {
205 HeapWord* _r_bottom;
206 HeapWord* _r_end;
207 OopClosure* _oc;
208 public:
209 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
210 template <class T> void do_oop_nv(T* p);
211 virtual void do_oop(oop* p) { do_oop_nv(p); }
212 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
213 bool apply_to_weak_ref_discovered_field() { return true; }
214 };
216 // Closure for iterating over object fields during concurrent marking
217 class G1CMOopClosure : public ExtendedOopClosure {
218 private:
219 G1CollectedHeap* _g1h;
220 ConcurrentMark* _cm;
221 CMTask* _task;
222 public:
223 G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
224 template <class T> void do_oop_nv(T* p);
225 virtual void do_oop( oop* p) { do_oop_nv(p); }
226 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
227 };
229 // Closure to scan the root regions during concurrent marking
230 class G1RootRegionScanClosure : public ExtendedOopClosure {
231 private:
232 G1CollectedHeap* _g1h;
233 ConcurrentMark* _cm;
234 uint _worker_id;
235 public:
236 G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
237 uint worker_id) :
238 _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
239 template <class T> void do_oop_nv(T* p);
240 virtual void do_oop( oop* p) { do_oop_nv(p); }
241 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
242 };
244 // Closure that applies the given two closures in sequence.
245 // Used by the RSet refinement code (when updating RSets
246 // during an evacuation pause) to record cards containing
247 // pointers into the collection set.
249 class G1Mux2Closure : public ExtendedOopClosure {
250 OopClosure* _c1;
251 OopClosure* _c2;
252 public:
253 G1Mux2Closure(OopClosure *c1, OopClosure *c2);
254 template <class T> void do_oop_nv(T* p);
255 virtual void do_oop(oop* p) { do_oop_nv(p); }
256 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
257 };
259 // A closure that returns true if it is actually applied
260 // to a reference
262 class G1TriggerClosure : public ExtendedOopClosure {
263 bool _triggered;
264 public:
265 G1TriggerClosure();
266 bool triggered() const { return _triggered; }
267 template <class T> void do_oop_nv(T* p);
268 virtual void do_oop(oop* p) { do_oop_nv(p); }
269 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
270 };
272 // A closure which uses a triggering closure to determine
273 // whether to apply an oop closure.
275 class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure {
276 G1TriggerClosure* _trigger_cl;
277 OopClosure* _oop_cl;
278 public:
279 G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
280 template <class T> void do_oop_nv(T* p);
281 virtual void do_oop(oop* p) { do_oop_nv(p); }
282 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
283 };
285 class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
286 G1CollectedHeap* _g1;
287 G1RemSet* _g1_rem_set;
288 HeapRegion* _from;
289 OopsInHeapRegionClosure* _push_ref_cl;
290 bool _record_refs_into_cset;
291 int _worker_i;
293 public:
294 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
295 G1RemSet* rs,
296 OopsInHeapRegionClosure* push_ref_cl,
297 bool record_refs_into_cset,
298 int worker_i = 0);
300 void set_from(HeapRegion* from) {
301 assert(from != NULL, "from region must be non-NULL");
302 _from = from;
303 }
305 bool self_forwarded(oop obj) {
306 bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
307 return result;
308 }
310 bool apply_to_weak_ref_discovered_field() { return true; }
312 template <class T> void do_oop_nv(T* p);
313 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
314 virtual void do_oop(oop* p) { do_oop_nv(p); }
315 };
317 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP