Tue, 24 Jun 2014 16:20:15 +0200
8046670: Make CMS metadata aware closures applicable for other collectors
Reviewed-by: ehelin, mgerdin
1 /*
2 * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
28 #include "memory/genOopClosures.hpp"
29 #include "memory/iterator.hpp"
31 /////////////////////////////////////////////////////////////////
32 // Closures used by ConcurrentMarkSweepGeneration's collector
33 /////////////////////////////////////////////////////////////////
34 class ConcurrentMarkSweepGeneration;
35 class CMSBitMap;
36 class CMSMarkStack;
37 class CMSCollector;
38 class MarkFromRootsClosure;
39 class Par_MarkFromRootsClosure;
41 // Decode the oop and call do_oop on it.
42 #define DO_OOP_WORK_DEFN \
43 void do_oop(oop obj); \
44 template <class T> inline void do_oop_work(T* p) { \
45 T heap_oop = oopDesc::load_heap_oop(p); \
46 if (!oopDesc::is_null(heap_oop)) { \
47 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
48 do_oop(obj); \
49 } \
50 }
52 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
53 // because some CMS OopClosures derive from OopsInGenClosure. It would be
54 // good to get rid of them completely.
55 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
56 KlassToOopClosure _klass_closure;
57 public:
58 MetadataAwareOopsInGenClosure() {
59 _klass_closure.initialize(this);
60 }
62 virtual bool do_metadata() { return do_metadata_nv(); }
63 inline bool do_metadata_nv() { return true; }
65 virtual void do_klass(Klass* k);
66 void do_klass_nv(Klass* k);
68 virtual void do_class_loader_data(ClassLoaderData* cld);
69 };
71 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
72 private:
73 const MemRegion _span;
74 CMSBitMap* _bitMap;
75 protected:
76 DO_OOP_WORK_DEFN
77 public:
78 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
79 virtual void do_oop(oop* p);
80 virtual void do_oop(narrowOop* p);
82 Prefetch::style prefetch_style() {
83 return Prefetch::do_read;
84 }
85 };
87 class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
88 private:
89 const MemRegion _span;
90 CMSBitMap* _bitMap;
91 protected:
92 DO_OOP_WORK_DEFN
93 public:
94 Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
95 virtual void do_oop(oop* p);
96 virtual void do_oop(narrowOop* p);
98 Prefetch::style prefetch_style() {
99 return Prefetch::do_read;
100 }
101 };
103 // A variant of the above used in certain kinds of CMS
104 // marking verification.
105 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
106 private:
107 const MemRegion _span;
108 CMSBitMap* _verification_bm;
109 CMSBitMap* _cms_bm;
110 protected:
111 DO_OOP_WORK_DEFN
112 public:
113 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
114 CMSBitMap* cms_bm);
115 virtual void do_oop(oop* p);
116 virtual void do_oop(narrowOop* p);
118 Prefetch::style prefetch_style() {
119 return Prefetch::do_read;
120 }
121 };
123 // The non-parallel version (the parallel version appears further below).
124 class PushAndMarkClosure: public MetadataAwareOopClosure {
125 private:
126 CMSCollector* _collector;
127 MemRegion _span;
128 CMSBitMap* _bit_map;
129 CMSBitMap* _mod_union_table;
130 CMSMarkStack* _mark_stack;
131 bool _concurrent_precleaning;
132 protected:
133 DO_OOP_WORK_DEFN
134 public:
135 PushAndMarkClosure(CMSCollector* collector,
136 MemRegion span,
137 ReferenceProcessor* rp,
138 CMSBitMap* bit_map,
139 CMSBitMap* mod_union_table,
140 CMSMarkStack* mark_stack,
141 bool concurrent_precleaning);
142 virtual void do_oop(oop* p);
143 virtual void do_oop(narrowOop* p);
144 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
145 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
147 Prefetch::style prefetch_style() {
148 return Prefetch::do_read;
149 }
150 };
152 // In the parallel case, the bit map and the
153 // reference processor are currently all shared. Access to
154 // these shared mutable structures must use appropriate
155 // synchronization (for instance, via CAS). The marking stack
156 // used in the non-parallel case above is here replaced with
157 // an OopTaskQueue structure to allow efficient work stealing.
158 class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
159 private:
160 CMSCollector* _collector;
161 MemRegion _span;
162 CMSBitMap* _bit_map;
163 OopTaskQueue* _work_queue;
164 protected:
165 DO_OOP_WORK_DEFN
166 public:
167 Par_PushAndMarkClosure(CMSCollector* collector,
168 MemRegion span,
169 ReferenceProcessor* rp,
170 CMSBitMap* bit_map,
171 OopTaskQueue* work_queue);
172 virtual void do_oop(oop* p);
173 virtual void do_oop(narrowOop* p);
174 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
175 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
177 Prefetch::style prefetch_style() {
178 return Prefetch::do_read;
179 }
180 };
182 // The non-parallel version (the parallel version appears further below).
183 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
184 private:
185 MemRegion _span;
186 CMSBitMap* _bit_map;
187 CMSMarkStack* _mark_stack;
188 PushAndMarkClosure _pushAndMarkClosure;
189 CMSCollector* _collector;
190 Mutex* _freelistLock;
191 bool _yield;
192 // Whether closure is being used for concurrent precleaning
193 bool _concurrent_precleaning;
194 protected:
195 DO_OOP_WORK_DEFN
196 public:
197 MarkRefsIntoAndScanClosure(MemRegion span,
198 ReferenceProcessor* rp,
199 CMSBitMap* bit_map,
200 CMSBitMap* mod_union_table,
201 CMSMarkStack* mark_stack,
202 CMSCollector* collector,
203 bool should_yield,
204 bool concurrent_precleaning);
205 virtual void do_oop(oop* p);
206 virtual void do_oop(narrowOop* p);
207 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
208 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
210 Prefetch::style prefetch_style() {
211 return Prefetch::do_read;
212 }
213 void set_freelistLock(Mutex* m) {
214 _freelistLock = m;
215 }
217 private:
218 inline void do_yield_check();
219 void do_yield_work();
220 bool take_from_overflow_list();
221 };
223 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
224 // stack and the bitMap are shared, so access needs to be suitably
225 // sycnhronized. An OopTaskQueue structure, supporting efficient
226 // workstealing, replaces a CMSMarkStack for storing grey objects.
227 class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
228 private:
229 MemRegion _span;
230 CMSBitMap* _bit_map;
231 OopTaskQueue* _work_queue;
232 const uint _low_water_mark;
233 Par_PushAndMarkClosure _par_pushAndMarkClosure;
234 protected:
235 DO_OOP_WORK_DEFN
236 public:
237 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
238 MemRegion span,
239 ReferenceProcessor* rp,
240 CMSBitMap* bit_map,
241 OopTaskQueue* work_queue);
242 virtual void do_oop(oop* p);
243 virtual void do_oop(narrowOop* p);
244 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
245 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
247 Prefetch::style prefetch_style() {
248 return Prefetch::do_read;
249 }
250 void trim_queue(uint size);
251 };
253 // This closure is used during the concurrent marking phase
254 // following the first checkpoint. Its use is buried in
255 // the closure MarkFromRootsClosure.
256 class PushOrMarkClosure: public MetadataAwareOopClosure {
257 private:
258 CMSCollector* _collector;
259 MemRegion _span;
260 CMSBitMap* _bitMap;
261 CMSMarkStack* _markStack;
262 HeapWord* const _finger;
263 MarkFromRootsClosure* const
264 _parent;
265 protected:
266 DO_OOP_WORK_DEFN
267 public:
268 PushOrMarkClosure(CMSCollector* cms_collector,
269 MemRegion span,
270 CMSBitMap* bitMap,
271 CMSMarkStack* markStack,
272 HeapWord* finger,
273 MarkFromRootsClosure* parent);
274 virtual void do_oop(oop* p);
275 virtual void do_oop(narrowOop* p);
276 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
277 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
279 // Deal with a stack overflow condition
280 void handle_stack_overflow(HeapWord* lost);
281 private:
282 inline void do_yield_check();
283 };
285 // A parallel (MT) version of the above.
286 // This closure is used during the concurrent marking phase
287 // following the first checkpoint. Its use is buried in
288 // the closure Par_MarkFromRootsClosure.
289 class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
290 private:
291 CMSCollector* _collector;
292 MemRegion _whole_span;
293 MemRegion _span; // local chunk
294 CMSBitMap* _bit_map;
295 OopTaskQueue* _work_queue;
296 CMSMarkStack* _overflow_stack;
297 HeapWord* const _finger;
298 HeapWord** const _global_finger_addr;
299 Par_MarkFromRootsClosure* const
300 _parent;
301 protected:
302 DO_OOP_WORK_DEFN
303 public:
304 Par_PushOrMarkClosure(CMSCollector* cms_collector,
305 MemRegion span,
306 CMSBitMap* bit_map,
307 OopTaskQueue* work_queue,
308 CMSMarkStack* mark_stack,
309 HeapWord* finger,
310 HeapWord** global_finger_addr,
311 Par_MarkFromRootsClosure* parent);
312 virtual void do_oop(oop* p);
313 virtual void do_oop(narrowOop* p);
314 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
315 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
317 // Deal with a stack overflow condition
318 void handle_stack_overflow(HeapWord* lost);
319 private:
320 inline void do_yield_check();
321 };
323 // For objects in CMS generation, this closure marks
324 // given objects (transitively) as being reachable/live.
325 // This is currently used during the (weak) reference object
326 // processing phase of the CMS final checkpoint step, as
327 // well as during the concurrent precleaning of the discovered
328 // reference lists.
329 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
330 private:
331 CMSCollector* _collector;
332 const MemRegion _span;
333 CMSMarkStack* _mark_stack;
334 CMSBitMap* _bit_map;
335 bool _concurrent_precleaning;
336 protected:
337 DO_OOP_WORK_DEFN
338 public:
339 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
340 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
341 bool cpc);
342 bool concurrent_precleaning() const { return _concurrent_precleaning; }
343 virtual void do_oop(oop* p);
344 virtual void do_oop(narrowOop* p);
345 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
346 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
347 };
349 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
350 private:
351 CMSCollector* _collector;
352 MemRegion _span;
353 OopTaskQueue* _work_queue;
354 CMSBitMap* _bit_map;
355 protected:
356 DO_OOP_WORK_DEFN
357 public:
358 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
359 MemRegion span, CMSBitMap* bit_map,
360 OopTaskQueue* work_queue);
361 virtual void do_oop(oop* p);
362 virtual void do_oop(narrowOop* p);
363 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
364 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
365 };
367 // A parallel (MT) version of the above, used when
368 // reference processing is parallel; the only difference
369 // is in the do_oop method.
370 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
371 private:
372 MemRegion _span;
373 OopTaskQueue* _work_queue;
374 CMSBitMap* _bit_map;
375 CMSInnerParMarkAndPushClosure
376 _mark_and_push;
377 const uint _low_water_mark;
378 void trim_queue(uint max);
379 protected:
380 DO_OOP_WORK_DEFN
381 public:
382 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
383 CMSBitMap* bit_map, OopTaskQueue* work_queue);
384 virtual void do_oop(oop* p);
385 virtual void do_oop(narrowOop* p);
386 };
388 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP