Tue, 15 Sep 2009 21:53:47 -0700
6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops.
Reviewed-by: twisti, kvn, never, jmasa, ysr
1 /*
2 * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 /////////////////////////////////////////////////////////////////
26 // Closures used by ConcurrentMarkSweepGeneration's collector
27 /////////////////////////////////////////////////////////////////
28 class ConcurrentMarkSweepGeneration;
29 class CMSBitMap;
30 class CMSMarkStack;
31 class CMSCollector;
32 class MarkFromRootsClosure;
33 class Par_MarkFromRootsClosure;
35 // Decode the oop and call do_oop on it.
36 #define DO_OOP_WORK_DEFN \
37 void do_oop(oop obj); \
38 template <class T> inline void do_oop_work(T* p) { \
39 T heap_oop = oopDesc::load_heap_oop(p); \
40 if (!oopDesc::is_null(heap_oop)) { \
41 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
42 do_oop(obj); \
43 } \
44 }
46 class MarkRefsIntoClosure: public OopsInGenClosure {
47 private:
48 const MemRegion _span;
49 CMSBitMap* _bitMap;
50 const bool _should_do_nmethods;
51 protected:
52 DO_OOP_WORK_DEFN
53 public:
54 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
55 bool should_do_nmethods);
56 bool should_do_nmethods() { return _should_do_nmethods; }
57 virtual void do_oop(oop* p);
58 virtual void do_oop(narrowOop* p);
59 inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
60 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
61 bool do_header() { return true; }
62 Prefetch::style prefetch_style() {
63 return Prefetch::do_read;
64 }
65 };
67 // A variant of the above used in certain kinds of CMS
68 // marking verification.
69 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
70 private:
71 const MemRegion _span;
72 CMSBitMap* _verification_bm;
73 CMSBitMap* _cms_bm;
74 const bool _should_do_nmethods;
75 protected:
76 DO_OOP_WORK_DEFN
77 public:
78 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
79 CMSBitMap* cms_bm, bool should_do_nmethods);
80 bool should_do_nmethods() { return _should_do_nmethods; }
81 virtual void do_oop(oop* p);
82 virtual void do_oop(narrowOop* p);
83 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
84 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
85 bool do_header() { return true; }
86 Prefetch::style prefetch_style() {
87 return Prefetch::do_read;
88 }
89 };
91 // The non-parallel version (the parallel version appears further below).
92 class PushAndMarkClosure: public OopClosure {
93 private:
94 CMSCollector* _collector;
95 MemRegion _span;
96 CMSBitMap* _bit_map;
97 CMSBitMap* _mod_union_table;
98 CMSMarkStack* _mark_stack;
99 CMSMarkStack* _revisit_stack;
100 bool _concurrent_precleaning;
101 bool const _should_remember_klasses;
102 protected:
103 DO_OOP_WORK_DEFN
104 public:
105 PushAndMarkClosure(CMSCollector* collector,
106 MemRegion span,
107 ReferenceProcessor* rp,
108 CMSBitMap* bit_map,
109 CMSBitMap* mod_union_table,
110 CMSMarkStack* mark_stack,
111 CMSMarkStack* revisit_stack,
112 bool concurrent_precleaning);
113 virtual void do_oop(oop* p);
114 virtual void do_oop(narrowOop* p);
115 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
116 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
117 bool do_header() { return true; }
118 Prefetch::style prefetch_style() {
119 return Prefetch::do_read;
120 }
121 virtual const bool should_remember_klasses() const {
122 return _should_remember_klasses;
123 }
124 virtual void remember_klass(Klass* k);
125 };
127 // In the parallel case, the revisit stack, the bit map and the
128 // reference processor are currently all shared. Access to
129 // these shared mutable structures must use appropriate
130 // synchronization (for instance, via CAS). The marking stack
131 // used in the non-parallel case above is here replaced with
132 // an OopTaskQueue structure to allow efficient work stealing.
133 class Par_PushAndMarkClosure: public OopClosure {
134 private:
135 CMSCollector* _collector;
136 MemRegion _span;
137 CMSBitMap* _bit_map;
138 OopTaskQueue* _work_queue;
139 CMSMarkStack* _revisit_stack;
140 bool const _should_remember_klasses;
141 protected:
142 DO_OOP_WORK_DEFN
143 public:
144 Par_PushAndMarkClosure(CMSCollector* collector,
145 MemRegion span,
146 ReferenceProcessor* rp,
147 CMSBitMap* bit_map,
148 OopTaskQueue* work_queue,
149 CMSMarkStack* revisit_stack);
150 virtual void do_oop(oop* p);
151 virtual void do_oop(narrowOop* p);
152 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
153 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
154 bool do_header() { return true; }
155 Prefetch::style prefetch_style() {
156 return Prefetch::do_read;
157 }
158 virtual const bool should_remember_klasses() const {
159 return _should_remember_klasses;
160 }
161 virtual void remember_klass(Klass* k);
162 };
164 // The non-parallel version (the parallel version appears further below).
165 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
166 private:
167 MemRegion _span;
168 CMSBitMap* _bit_map;
169 CMSMarkStack* _mark_stack;
170 PushAndMarkClosure _pushAndMarkClosure;
171 CMSCollector* _collector;
172 Mutex* _freelistLock;
173 bool _yield;
174 // Whether closure is being used for concurrent precleaning
175 bool _concurrent_precleaning;
176 protected:
177 DO_OOP_WORK_DEFN
178 public:
179 MarkRefsIntoAndScanClosure(MemRegion span,
180 ReferenceProcessor* rp,
181 CMSBitMap* bit_map,
182 CMSBitMap* mod_union_table,
183 CMSMarkStack* mark_stack,
184 CMSMarkStack* revisit_stack,
185 CMSCollector* collector,
186 bool should_yield,
187 bool concurrent_precleaning);
188 virtual void do_oop(oop* p);
189 virtual void do_oop(narrowOop* p);
190 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
191 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
192 bool do_header() { return true; }
193 Prefetch::style prefetch_style() {
194 return Prefetch::do_read;
195 }
196 void set_freelistLock(Mutex* m) {
197 _freelistLock = m;
198 }
200 private:
201 inline void do_yield_check();
202 void do_yield_work();
203 bool take_from_overflow_list();
204 };
206 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
207 // stack and the bitMap are shared, so access needs to be suitably
208 // sycnhronized. An OopTaskQueue structure, supporting efficient
209 // workstealing, replaces a CMSMarkStack for storing grey objects.
210 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
211 private:
212 MemRegion _span;
213 CMSBitMap* _bit_map;
214 OopTaskQueue* _work_queue;
215 const uint _low_water_mark;
216 Par_PushAndMarkClosure _par_pushAndMarkClosure;
217 protected:
218 DO_OOP_WORK_DEFN
219 public:
220 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
221 MemRegion span,
222 ReferenceProcessor* rp,
223 CMSBitMap* bit_map,
224 OopTaskQueue* work_queue,
225 CMSMarkStack* revisit_stack);
226 virtual void do_oop(oop* p);
227 virtual void do_oop(narrowOop* p);
228 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
229 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
230 bool do_header() { return true; }
231 virtual const bool do_nmethods() const { return true; }
232 Prefetch::style prefetch_style() {
233 return Prefetch::do_read;
234 }
235 void trim_queue(uint size);
236 };
238 // This closure is used during the concurrent marking phase
239 // following the first checkpoint. Its use is buried in
240 // the closure MarkFromRootsClosure.
241 class PushOrMarkClosure: public OopClosure {
242 private:
243 CMSCollector* _collector;
244 MemRegion _span;
245 CMSBitMap* _bitMap;
246 CMSMarkStack* _markStack;
247 CMSMarkStack* _revisitStack;
248 HeapWord* const _finger;
249 MarkFromRootsClosure* const
250 _parent;
251 bool const _should_remember_klasses;
252 protected:
253 DO_OOP_WORK_DEFN
254 public:
255 PushOrMarkClosure(CMSCollector* cms_collector,
256 MemRegion span,
257 CMSBitMap* bitMap,
258 CMSMarkStack* markStack,
259 CMSMarkStack* revisitStack,
260 HeapWord* finger,
261 MarkFromRootsClosure* parent);
262 virtual void do_oop(oop* p);
263 virtual void do_oop(narrowOop* p);
264 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
265 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
266 virtual const bool should_remember_klasses() const {
267 return _should_remember_klasses;
268 }
269 virtual void remember_klass(Klass* k);
270 // Deal with a stack overflow condition
271 void handle_stack_overflow(HeapWord* lost);
272 private:
273 inline void do_yield_check();
274 };
276 // A parallel (MT) version of the above.
277 // This closure is used during the concurrent marking phase
278 // following the first checkpoint. Its use is buried in
279 // the closure Par_MarkFromRootsClosure.
280 class Par_PushOrMarkClosure: public OopClosure {
281 private:
282 CMSCollector* _collector;
283 MemRegion _whole_span;
284 MemRegion _span; // local chunk
285 CMSBitMap* _bit_map;
286 OopTaskQueue* _work_queue;
287 CMSMarkStack* _overflow_stack;
288 CMSMarkStack* _revisit_stack;
289 HeapWord* const _finger;
290 HeapWord** const _global_finger_addr;
291 Par_MarkFromRootsClosure* const
292 _parent;
293 bool const _should_remember_klasses;
294 protected:
295 DO_OOP_WORK_DEFN
296 public:
297 Par_PushOrMarkClosure(CMSCollector* cms_collector,
298 MemRegion span,
299 CMSBitMap* bit_map,
300 OopTaskQueue* work_queue,
301 CMSMarkStack* mark_stack,
302 CMSMarkStack* revisit_stack,
303 HeapWord* finger,
304 HeapWord** global_finger_addr,
305 Par_MarkFromRootsClosure* parent);
306 virtual void do_oop(oop* p);
307 virtual void do_oop(narrowOop* p);
308 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
309 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
310 virtual const bool should_remember_klasses() const {
311 return _should_remember_klasses;
312 }
313 virtual void remember_klass(Klass* k);
314 // Deal with a stack overflow condition
315 void handle_stack_overflow(HeapWord* lost);
316 private:
317 inline void do_yield_check();
318 };
320 // For objects in CMS generation, this closure marks
321 // given objects (transitively) as being reachable/live.
322 // This is currently used during the (weak) reference object
323 // processing phase of the CMS final checkpoint step, as
324 // well as during the concurrent precleaning of the discovered
325 // reference lists.
326 class CMSKeepAliveClosure: public OopClosure {
327 private:
328 CMSCollector* _collector;
329 const MemRegion _span;
330 CMSMarkStack* _mark_stack;
331 CMSBitMap* _bit_map;
332 bool _concurrent_precleaning;
333 protected:
334 DO_OOP_WORK_DEFN
335 public:
336 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
337 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
338 bool cpc):
339 _collector(collector),
340 _span(span),
341 _bit_map(bit_map),
342 _mark_stack(mark_stack),
343 _concurrent_precleaning(cpc) {
344 assert(!_span.is_empty(), "Empty span could spell trouble");
345 }
346 bool concurrent_precleaning() const { return _concurrent_precleaning; }
347 virtual void do_oop(oop* p);
348 virtual void do_oop(narrowOop* p);
349 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
350 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
351 };
353 class CMSInnerParMarkAndPushClosure: public OopClosure {
354 private:
355 CMSCollector* _collector;
356 MemRegion _span;
357 OopTaskQueue* _work_queue;
358 CMSBitMap* _bit_map;
359 protected:
360 DO_OOP_WORK_DEFN
361 public:
362 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
363 MemRegion span, CMSBitMap* bit_map,
364 OopTaskQueue* work_queue):
365 _collector(collector),
366 _span(span),
367 _bit_map(bit_map),
368 _work_queue(work_queue) { }
369 virtual void do_oop(oop* p);
370 virtual void do_oop(narrowOop* p);
371 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
372 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
373 };
375 // A parallel (MT) version of the above, used when
376 // reference processing is parallel; the only difference
377 // is in the do_oop method.
378 class CMSParKeepAliveClosure: public OopClosure {
379 private:
380 CMSCollector* _collector;
381 MemRegion _span;
382 OopTaskQueue* _work_queue;
383 CMSBitMap* _bit_map;
384 CMSInnerParMarkAndPushClosure
385 _mark_and_push;
386 const uint _low_water_mark;
387 void trim_queue(uint max);
388 protected:
389 DO_OOP_WORK_DEFN
390 public:
391 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
392 CMSBitMap* bit_map, OopTaskQueue* work_queue);
393 virtual void do_oop(oop* p);
394 virtual void do_oop(narrowOop* p);
395 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
396 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
397 };