Thu, 06 Jan 2011 23:50:02 -0800
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
Summary: The recorded _sweep_limit may not necessarily remain a block boundary as the old generation expands during a concurrent cycle. Terminal actions inside the sweep closure need to be aware of this as they cross over the limit.
Reviewed-by: johnc, minqi
1 /*
2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
28 #include "memory/genOopClosures.hpp"
30 /////////////////////////////////////////////////////////////////
31 // Closures used by ConcurrentMarkSweepGeneration's collector
32 /////////////////////////////////////////////////////////////////
33 class ConcurrentMarkSweepGeneration;
34 class CMSBitMap;
35 class CMSMarkStack;
36 class CMSCollector;
37 class MarkFromRootsClosure;
38 class Par_MarkFromRootsClosure;
40 // Decode the oop and call do_oop on it.
41 #define DO_OOP_WORK_DEFN \
42 void do_oop(oop obj); \
43 template <class T> inline void do_oop_work(T* p) { \
44 T heap_oop = oopDesc::load_heap_oop(p); \
45 if (!oopDesc::is_null(heap_oop)) { \
46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
47 do_oop(obj); \
48 } \
49 }
51 class MarkRefsIntoClosure: public OopsInGenClosure {
52 private:
53 const MemRegion _span;
54 CMSBitMap* _bitMap;
55 protected:
56 DO_OOP_WORK_DEFN
57 public:
58 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
59 virtual void do_oop(oop* p);
60 virtual void do_oop(narrowOop* p);
61 inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
62 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
63 bool do_header() { return true; }
64 Prefetch::style prefetch_style() {
65 return Prefetch::do_read;
66 }
67 };
69 // A variant of the above used in certain kinds of CMS
70 // marking verification.
71 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
72 private:
73 const MemRegion _span;
74 CMSBitMap* _verification_bm;
75 CMSBitMap* _cms_bm;
76 protected:
77 DO_OOP_WORK_DEFN
78 public:
79 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
80 CMSBitMap* cms_bm);
81 virtual void do_oop(oop* p);
82 virtual void do_oop(narrowOop* p);
83 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
84 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
85 bool do_header() { return true; }
86 Prefetch::style prefetch_style() {
87 return Prefetch::do_read;
88 }
89 };
91 // KlassRememberingOopClosure is used when marking of the permanent generation
92 // is being done. It adds fields to support revisiting of klasses
93 // for class unloading. _should_remember_klasses should be set to
94 // indicate if klasses should be remembered. Currently that is whenever
95 // CMS class unloading is turned on. The _revisit_stack is used
96 // to save the klasses for later processing.
97 class KlassRememberingOopClosure : public OopClosure {
98 protected:
99 CMSCollector* _collector;
100 CMSMarkStack* _revisit_stack;
101 bool const _should_remember_klasses;
102 public:
103 void check_remember_klasses() const PRODUCT_RETURN;
104 virtual const bool should_remember_klasses() const {
105 check_remember_klasses();
106 return _should_remember_klasses;
107 }
108 virtual void remember_klass(Klass* k);
110 KlassRememberingOopClosure(CMSCollector* collector,
111 ReferenceProcessor* rp,
112 CMSMarkStack* revisit_stack);
113 };
115 // Similar to KlassRememberingOopClosure for use when multiple
116 // GC threads will execute the closure.
118 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
119 public:
120 Par_KlassRememberingOopClosure(CMSCollector* collector,
121 ReferenceProcessor* rp,
122 CMSMarkStack* revisit_stack):
123 KlassRememberingOopClosure(collector, rp, revisit_stack) {}
124 virtual void remember_klass(Klass* k);
125 };
127 // The non-parallel version (the parallel version appears further below).
128 class PushAndMarkClosure: public KlassRememberingOopClosure {
129 private:
130 MemRegion _span;
131 CMSBitMap* _bit_map;
132 CMSBitMap* _mod_union_table;
133 CMSMarkStack* _mark_stack;
134 bool _concurrent_precleaning;
135 protected:
136 DO_OOP_WORK_DEFN
137 public:
138 PushAndMarkClosure(CMSCollector* collector,
139 MemRegion span,
140 ReferenceProcessor* rp,
141 CMSBitMap* bit_map,
142 CMSBitMap* mod_union_table,
143 CMSMarkStack* mark_stack,
144 CMSMarkStack* revisit_stack,
145 bool concurrent_precleaning);
146 virtual void do_oop(oop* p);
147 virtual void do_oop(narrowOop* p);
148 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
149 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
150 bool do_header() { return true; }
151 Prefetch::style prefetch_style() {
152 return Prefetch::do_read;
153 }
154 // In support of class unloading
155 virtual const bool should_remember_mdo() const {
156 return false;
157 // return _should_remember_klasses;
158 }
159 virtual void remember_mdo(DataLayout* v);
160 };
162 // In the parallel case, the revisit stack, the bit map and the
163 // reference processor are currently all shared. Access to
164 // these shared mutable structures must use appropriate
165 // synchronization (for instance, via CAS). The marking stack
166 // used in the non-parallel case above is here replaced with
167 // an OopTaskQueue structure to allow efficient work stealing.
168 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
169 private:
170 MemRegion _span;
171 CMSBitMap* _bit_map;
172 OopTaskQueue* _work_queue;
173 protected:
174 DO_OOP_WORK_DEFN
175 public:
176 Par_PushAndMarkClosure(CMSCollector* collector,
177 MemRegion span,
178 ReferenceProcessor* rp,
179 CMSBitMap* bit_map,
180 OopTaskQueue* work_queue,
181 CMSMarkStack* revisit_stack);
182 virtual void do_oop(oop* p);
183 virtual void do_oop(narrowOop* p);
184 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
185 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
186 bool do_header() { return true; }
187 Prefetch::style prefetch_style() {
188 return Prefetch::do_read;
189 }
190 // In support of class unloading
191 virtual const bool should_remember_mdo() const {
192 return false;
193 // return _should_remember_klasses;
194 }
195 virtual void remember_mdo(DataLayout* v);
196 };
198 // The non-parallel version (the parallel version appears further below).
199 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
200 private:
201 MemRegion _span;
202 CMSBitMap* _bit_map;
203 CMSMarkStack* _mark_stack;
204 PushAndMarkClosure _pushAndMarkClosure;
205 CMSCollector* _collector;
206 Mutex* _freelistLock;
207 bool _yield;
208 // Whether closure is being used for concurrent precleaning
209 bool _concurrent_precleaning;
210 protected:
211 DO_OOP_WORK_DEFN
212 public:
213 MarkRefsIntoAndScanClosure(MemRegion span,
214 ReferenceProcessor* rp,
215 CMSBitMap* bit_map,
216 CMSBitMap* mod_union_table,
217 CMSMarkStack* mark_stack,
218 CMSMarkStack* revisit_stack,
219 CMSCollector* collector,
220 bool should_yield,
221 bool concurrent_precleaning);
222 virtual void do_oop(oop* p);
223 virtual void do_oop(narrowOop* p);
224 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
225 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
226 bool do_header() { return true; }
227 Prefetch::style prefetch_style() {
228 return Prefetch::do_read;
229 }
230 void set_freelistLock(Mutex* m) {
231 _freelistLock = m;
232 }
233 virtual const bool should_remember_klasses() const {
234 return _pushAndMarkClosure.should_remember_klasses();
235 }
236 virtual void remember_klass(Klass* k) {
237 _pushAndMarkClosure.remember_klass(k);
238 }
240 private:
241 inline void do_yield_check();
242 void do_yield_work();
243 bool take_from_overflow_list();
244 };
246 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
247 // stack and the bitMap are shared, so access needs to be suitably
248 // sycnhronized. An OopTaskQueue structure, supporting efficient
249 // workstealing, replaces a CMSMarkStack for storing grey objects.
250 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
251 private:
252 MemRegion _span;
253 CMSBitMap* _bit_map;
254 OopTaskQueue* _work_queue;
255 const uint _low_water_mark;
256 Par_PushAndMarkClosure _par_pushAndMarkClosure;
257 protected:
258 DO_OOP_WORK_DEFN
259 public:
260 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
261 MemRegion span,
262 ReferenceProcessor* rp,
263 CMSBitMap* bit_map,
264 OopTaskQueue* work_queue,
265 CMSMarkStack* revisit_stack);
266 virtual void do_oop(oop* p);
267 virtual void do_oop(narrowOop* p);
268 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
269 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
270 bool do_header() { return true; }
271 // When ScanMarkedObjectsAgainClosure is used,
272 // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
273 // and this delegation is used.
274 virtual const bool should_remember_klasses() const {
275 return _par_pushAndMarkClosure.should_remember_klasses();
276 }
277 // See comment on should_remember_klasses() above.
278 virtual void remember_klass(Klass* k) {
279 _par_pushAndMarkClosure.remember_klass(k);
280 }
281 Prefetch::style prefetch_style() {
282 return Prefetch::do_read;
283 }
284 void trim_queue(uint size);
285 };
287 // This closure is used during the concurrent marking phase
288 // following the first checkpoint. Its use is buried in
289 // the closure MarkFromRootsClosure.
290 class PushOrMarkClosure: public KlassRememberingOopClosure {
291 private:
292 MemRegion _span;
293 CMSBitMap* _bitMap;
294 CMSMarkStack* _markStack;
295 HeapWord* const _finger;
296 MarkFromRootsClosure* const
297 _parent;
298 protected:
299 DO_OOP_WORK_DEFN
300 public:
301 PushOrMarkClosure(CMSCollector* cms_collector,
302 MemRegion span,
303 CMSBitMap* bitMap,
304 CMSMarkStack* markStack,
305 CMSMarkStack* revisitStack,
306 HeapWord* finger,
307 MarkFromRootsClosure* parent);
308 virtual void do_oop(oop* p);
309 virtual void do_oop(narrowOop* p);
310 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
311 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
312 // In support of class unloading
313 virtual const bool should_remember_mdo() const {
314 return false;
315 // return _should_remember_klasses;
316 }
317 virtual void remember_mdo(DataLayout* v);
319 // Deal with a stack overflow condition
320 void handle_stack_overflow(HeapWord* lost);
321 private:
322 inline void do_yield_check();
323 };
325 // A parallel (MT) version of the above.
326 // This closure is used during the concurrent marking phase
327 // following the first checkpoint. Its use is buried in
328 // the closure Par_MarkFromRootsClosure.
329 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
330 private:
331 MemRegion _whole_span;
332 MemRegion _span; // local chunk
333 CMSBitMap* _bit_map;
334 OopTaskQueue* _work_queue;
335 CMSMarkStack* _overflow_stack;
336 HeapWord* const _finger;
337 HeapWord** const _global_finger_addr;
338 Par_MarkFromRootsClosure* const
339 _parent;
340 protected:
341 DO_OOP_WORK_DEFN
342 public:
343 Par_PushOrMarkClosure(CMSCollector* cms_collector,
344 MemRegion span,
345 CMSBitMap* bit_map,
346 OopTaskQueue* work_queue,
347 CMSMarkStack* mark_stack,
348 CMSMarkStack* revisit_stack,
349 HeapWord* finger,
350 HeapWord** global_finger_addr,
351 Par_MarkFromRootsClosure* parent);
352 virtual void do_oop(oop* p);
353 virtual void do_oop(narrowOop* p);
354 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
355 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
356 // In support of class unloading
357 virtual const bool should_remember_mdo() const {
358 return false;
359 // return _should_remember_klasses;
360 }
361 virtual void remember_mdo(DataLayout* v);
363 // Deal with a stack overflow condition
364 void handle_stack_overflow(HeapWord* lost);
365 private:
366 inline void do_yield_check();
367 };
369 // For objects in CMS generation, this closure marks
370 // given objects (transitively) as being reachable/live.
371 // This is currently used during the (weak) reference object
372 // processing phase of the CMS final checkpoint step, as
373 // well as during the concurrent precleaning of the discovered
374 // reference lists.
375 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
376 private:
377 const MemRegion _span;
378 CMSMarkStack* _mark_stack;
379 CMSBitMap* _bit_map;
380 bool _concurrent_precleaning;
381 protected:
382 DO_OOP_WORK_DEFN
383 public:
384 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
385 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
386 CMSMarkStack* revisit_stack, bool cpc);
387 bool concurrent_precleaning() const { return _concurrent_precleaning; }
388 virtual void do_oop(oop* p);
389 virtual void do_oop(narrowOop* p);
390 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
391 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
392 };
394 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
395 private:
396 MemRegion _span;
397 OopTaskQueue* _work_queue;
398 CMSBitMap* _bit_map;
399 protected:
400 DO_OOP_WORK_DEFN
401 public:
402 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
403 MemRegion span, CMSBitMap* bit_map,
404 CMSMarkStack* revisit_stack,
405 OopTaskQueue* work_queue);
406 virtual void do_oop(oop* p);
407 virtual void do_oop(narrowOop* p);
408 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
409 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
410 };
412 // A parallel (MT) version of the above, used when
413 // reference processing is parallel; the only difference
414 // is in the do_oop method.
415 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
416 private:
417 MemRegion _span;
418 OopTaskQueue* _work_queue;
419 CMSBitMap* _bit_map;
420 CMSInnerParMarkAndPushClosure
421 _mark_and_push;
422 const uint _low_water_mark;
423 void trim_queue(uint max);
424 protected:
425 DO_OOP_WORK_DEFN
426 public:
427 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
428 CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
429 OopTaskQueue* work_queue);
430 virtual void do_oop(oop* p);
431 virtual void do_oop(narrowOop* p);
432 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
433 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
434 };
436 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP