Tue, 27 Nov 2012 07:57:57 -0800
8003879: Duplicate definitions in vmStructs
Summary: Removed duplicate entries
Reviewed-by: dholmes, sspitsyn
1 /*
2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
28 #include "memory/genOopClosures.hpp"
30 /////////////////////////////////////////////////////////////////
31 // Closures used by ConcurrentMarkSweepGeneration's collector
32 /////////////////////////////////////////////////////////////////
33 class ConcurrentMarkSweepGeneration;
34 class CMSBitMap;
35 class CMSMarkStack;
36 class CMSCollector;
37 class MarkFromRootsClosure;
38 class Par_MarkFromRootsClosure;
40 // Decode the oop and call do_oop on it.
41 #define DO_OOP_WORK_DEFN \
42 void do_oop(oop obj); \
43 template <class T> inline void do_oop_work(T* p) { \
44 T heap_oop = oopDesc::load_heap_oop(p); \
45 if (!oopDesc::is_null(heap_oop)) { \
46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
47 do_oop(obj); \
48 } \
49 }
51 // Applies the given oop closure to all oops in all klasses visited.
52 class CMKlassClosure : public KlassClosure {
53 friend class CMSOopClosure;
54 friend class CMSOopsInGenClosure;
56 OopClosure* _oop_closure;
58 // Used when _oop_closure couldn't be set in an initialization list.
59 void initialize(OopClosure* oop_closure) {
60 assert(_oop_closure == NULL, "Should only be called once");
61 _oop_closure = oop_closure;
62 }
63 public:
64 CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { }
66 void do_klass(Klass* k);
67 };
69 // The base class for all CMS marking closures.
70 // It's used to proxy through the metadata to the oops defined in them.
71 class CMSOopClosure: public ExtendedOopClosure {
72 CMKlassClosure _klass_closure;
73 public:
74 CMSOopClosure() : ExtendedOopClosure() {
75 _klass_closure.initialize(this);
76 }
77 CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
78 _klass_closure.initialize(this);
79 }
81 virtual bool do_metadata() { return do_metadata_nv(); }
82 inline bool do_metadata_nv() { return true; }
84 virtual void do_klass(Klass* k);
85 void do_klass_nv(Klass* k);
87 virtual void do_class_loader_data(ClassLoaderData* cld);
88 };
90 // TODO: This duplication of the CMSOopClosure class is only needed because
91 // some CMS OopClosures derive from OopsInGenClosure. It would be good
92 // to get rid of them completely.
93 class CMSOopsInGenClosure: public OopsInGenClosure {
94 CMKlassClosure _klass_closure;
95 public:
96 CMSOopsInGenClosure() {
97 _klass_closure.initialize(this);
98 }
100 virtual bool do_metadata() { return do_metadata_nv(); }
101 inline bool do_metadata_nv() { return true; }
103 virtual void do_klass(Klass* k);
104 void do_klass_nv(Klass* k);
106 virtual void do_class_loader_data(ClassLoaderData* cld);
107 };
109 class MarkRefsIntoClosure: public CMSOopsInGenClosure {
110 private:
111 const MemRegion _span;
112 CMSBitMap* _bitMap;
113 protected:
114 DO_OOP_WORK_DEFN
115 public:
116 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
117 virtual void do_oop(oop* p);
118 virtual void do_oop(narrowOop* p);
120 Prefetch::style prefetch_style() {
121 return Prefetch::do_read;
122 }
123 };
125 // A variant of the above used in certain kinds of CMS
126 // marking verification.
127 class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
128 private:
129 const MemRegion _span;
130 CMSBitMap* _verification_bm;
131 CMSBitMap* _cms_bm;
132 protected:
133 DO_OOP_WORK_DEFN
134 public:
135 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
136 CMSBitMap* cms_bm);
137 virtual void do_oop(oop* p);
138 virtual void do_oop(narrowOop* p);
140 Prefetch::style prefetch_style() {
141 return Prefetch::do_read;
142 }
143 };
145 // The non-parallel version (the parallel version appears further below).
146 class PushAndMarkClosure: public CMSOopClosure {
147 private:
148 CMSCollector* _collector;
149 MemRegion _span;
150 CMSBitMap* _bit_map;
151 CMSBitMap* _mod_union_table;
152 CMSMarkStack* _mark_stack;
153 bool _concurrent_precleaning;
154 protected:
155 DO_OOP_WORK_DEFN
156 public:
157 PushAndMarkClosure(CMSCollector* collector,
158 MemRegion span,
159 ReferenceProcessor* rp,
160 CMSBitMap* bit_map,
161 CMSBitMap* mod_union_table,
162 CMSMarkStack* mark_stack,
163 bool concurrent_precleaning);
164 virtual void do_oop(oop* p);
165 virtual void do_oop(narrowOop* p);
166 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
167 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
169 Prefetch::style prefetch_style() {
170 return Prefetch::do_read;
171 }
172 };
174 // In the parallel case, the bit map and the
175 // reference processor are currently all shared. Access to
176 // these shared mutable structures must use appropriate
177 // synchronization (for instance, via CAS). The marking stack
178 // used in the non-parallel case above is here replaced with
179 // an OopTaskQueue structure to allow efficient work stealing.
180 class Par_PushAndMarkClosure: public CMSOopClosure {
181 private:
182 CMSCollector* _collector;
183 MemRegion _span;
184 CMSBitMap* _bit_map;
185 OopTaskQueue* _work_queue;
186 protected:
187 DO_OOP_WORK_DEFN
188 public:
189 Par_PushAndMarkClosure(CMSCollector* collector,
190 MemRegion span,
191 ReferenceProcessor* rp,
192 CMSBitMap* bit_map,
193 OopTaskQueue* work_queue);
194 virtual void do_oop(oop* p);
195 virtual void do_oop(narrowOop* p);
196 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
197 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
199 Prefetch::style prefetch_style() {
200 return Prefetch::do_read;
201 }
202 };
204 // The non-parallel version (the parallel version appears further below).
205 class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
206 private:
207 MemRegion _span;
208 CMSBitMap* _bit_map;
209 CMSMarkStack* _mark_stack;
210 PushAndMarkClosure _pushAndMarkClosure;
211 CMSCollector* _collector;
212 Mutex* _freelistLock;
213 bool _yield;
214 // Whether closure is being used for concurrent precleaning
215 bool _concurrent_precleaning;
216 protected:
217 DO_OOP_WORK_DEFN
218 public:
219 MarkRefsIntoAndScanClosure(MemRegion span,
220 ReferenceProcessor* rp,
221 CMSBitMap* bit_map,
222 CMSBitMap* mod_union_table,
223 CMSMarkStack* mark_stack,
224 CMSCollector* collector,
225 bool should_yield,
226 bool concurrent_precleaning);
227 virtual void do_oop(oop* p);
228 virtual void do_oop(narrowOop* p);
229 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
230 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
232 Prefetch::style prefetch_style() {
233 return Prefetch::do_read;
234 }
235 void set_freelistLock(Mutex* m) {
236 _freelistLock = m;
237 }
239 private:
240 inline void do_yield_check();
241 void do_yield_work();
242 bool take_from_overflow_list();
243 };
245 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
246 // stack and the bitMap are shared, so access needs to be suitably
247 // sycnhronized. An OopTaskQueue structure, supporting efficient
248 // workstealing, replaces a CMSMarkStack for storing grey objects.
249 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
250 private:
251 MemRegion _span;
252 CMSBitMap* _bit_map;
253 OopTaskQueue* _work_queue;
254 const uint _low_water_mark;
255 Par_PushAndMarkClosure _par_pushAndMarkClosure;
256 protected:
257 DO_OOP_WORK_DEFN
258 public:
259 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
260 MemRegion span,
261 ReferenceProcessor* rp,
262 CMSBitMap* bit_map,
263 OopTaskQueue* work_queue);
264 virtual void do_oop(oop* p);
265 virtual void do_oop(narrowOop* p);
266 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
267 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
269 Prefetch::style prefetch_style() {
270 return Prefetch::do_read;
271 }
272 void trim_queue(uint size);
273 };
275 // This closure is used during the concurrent marking phase
276 // following the first checkpoint. Its use is buried in
277 // the closure MarkFromRootsClosure.
278 class PushOrMarkClosure: public CMSOopClosure {
279 private:
280 CMSCollector* _collector;
281 MemRegion _span;
282 CMSBitMap* _bitMap;
283 CMSMarkStack* _markStack;
284 HeapWord* const _finger;
285 MarkFromRootsClosure* const
286 _parent;
287 protected:
288 DO_OOP_WORK_DEFN
289 public:
290 PushOrMarkClosure(CMSCollector* cms_collector,
291 MemRegion span,
292 CMSBitMap* bitMap,
293 CMSMarkStack* markStack,
294 HeapWord* finger,
295 MarkFromRootsClosure* parent);
296 virtual void do_oop(oop* p);
297 virtual void do_oop(narrowOop* p);
298 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
299 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
301 // Deal with a stack overflow condition
302 void handle_stack_overflow(HeapWord* lost);
303 private:
304 inline void do_yield_check();
305 };
307 // A parallel (MT) version of the above.
308 // This closure is used during the concurrent marking phase
309 // following the first checkpoint. Its use is buried in
310 // the closure Par_MarkFromRootsClosure.
311 class Par_PushOrMarkClosure: public CMSOopClosure {
312 private:
313 CMSCollector* _collector;
314 MemRegion _whole_span;
315 MemRegion _span; // local chunk
316 CMSBitMap* _bit_map;
317 OopTaskQueue* _work_queue;
318 CMSMarkStack* _overflow_stack;
319 HeapWord* const _finger;
320 HeapWord** const _global_finger_addr;
321 Par_MarkFromRootsClosure* const
322 _parent;
323 protected:
324 DO_OOP_WORK_DEFN
325 public:
326 Par_PushOrMarkClosure(CMSCollector* cms_collector,
327 MemRegion span,
328 CMSBitMap* bit_map,
329 OopTaskQueue* work_queue,
330 CMSMarkStack* mark_stack,
331 HeapWord* finger,
332 HeapWord** global_finger_addr,
333 Par_MarkFromRootsClosure* parent);
334 virtual void do_oop(oop* p);
335 virtual void do_oop(narrowOop* p);
336 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
337 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
339 // Deal with a stack overflow condition
340 void handle_stack_overflow(HeapWord* lost);
341 private:
342 inline void do_yield_check();
343 };
345 // For objects in CMS generation, this closure marks
346 // given objects (transitively) as being reachable/live.
347 // This is currently used during the (weak) reference object
348 // processing phase of the CMS final checkpoint step, as
349 // well as during the concurrent precleaning of the discovered
350 // reference lists.
351 class CMSKeepAliveClosure: public CMSOopClosure {
352 private:
353 CMSCollector* _collector;
354 const MemRegion _span;
355 CMSMarkStack* _mark_stack;
356 CMSBitMap* _bit_map;
357 bool _concurrent_precleaning;
358 protected:
359 DO_OOP_WORK_DEFN
360 public:
361 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
362 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
363 bool cpc);
364 bool concurrent_precleaning() const { return _concurrent_precleaning; }
365 virtual void do_oop(oop* p);
366 virtual void do_oop(narrowOop* p);
367 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
368 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
369 };
371 class CMSInnerParMarkAndPushClosure: public CMSOopClosure {
372 private:
373 CMSCollector* _collector;
374 MemRegion _span;
375 OopTaskQueue* _work_queue;
376 CMSBitMap* _bit_map;
377 protected:
378 DO_OOP_WORK_DEFN
379 public:
380 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
381 MemRegion span, CMSBitMap* bit_map,
382 OopTaskQueue* work_queue);
383 virtual void do_oop(oop* p);
384 virtual void do_oop(narrowOop* p);
385 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
386 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
387 };
389 // A parallel (MT) version of the above, used when
390 // reference processing is parallel; the only difference
391 // is in the do_oop method.
392 class CMSParKeepAliveClosure: public CMSOopClosure {
393 private:
394 MemRegion _span;
395 OopTaskQueue* _work_queue;
396 CMSBitMap* _bit_map;
397 CMSInnerParMarkAndPushClosure
398 _mark_and_push;
399 const uint _low_water_mark;
400 void trim_queue(uint max);
401 protected:
402 DO_OOP_WORK_DEFN
403 public:
404 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
405 CMSBitMap* bit_map, OopTaskQueue* work_queue);
406 virtual void do_oop(oop* p);
407 virtual void do_oop(narrowOop* p);
408 };
410 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP