src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 887
00b023ae2d78
child 1370
05f89f00a864
child 1424
148e5441d916
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

duke@435 1 /*
duke@435 2 * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 /////////////////////////////////////////////////////////////////
duke@435 26 // Closures used by ConcurrentMarkSweepGeneration's collector
duke@435 27 /////////////////////////////////////////////////////////////////
duke@435 28 class ConcurrentMarkSweepGeneration;
duke@435 29 class CMSBitMap;
duke@435 30 class CMSMarkStack;
duke@435 31 class CMSCollector;
duke@435 32 class MarkFromRootsClosure;
duke@435 33 class Par_MarkFromRootsClosure;
duke@435 34
coleenp@548 35 // Decode the oop and call do_oop on it.
coleenp@548 36 #define DO_OOP_WORK_DEFN \
coleenp@548 37 void do_oop(oop obj); \
coleenp@548 38 template <class T> inline void do_oop_work(T* p) { \
coleenp@548 39 T heap_oop = oopDesc::load_heap_oop(p); \
coleenp@548 40 if (!oopDesc::is_null(heap_oop)) { \
coleenp@548 41 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
coleenp@548 42 do_oop(obj); \
coleenp@548 43 } \
coleenp@548 44 }
coleenp@548 45
duke@435 46 class MarkRefsIntoClosure: public OopsInGenClosure {
coleenp@548 47 private:
coleenp@548 48 const MemRegion _span;
coleenp@548 49 CMSBitMap* _bitMap;
coleenp@548 50 const bool _should_do_nmethods;
coleenp@548 51 protected:
coleenp@548 52 DO_OOP_WORK_DEFN
duke@435 53 public:
duke@435 54 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
duke@435 55 bool should_do_nmethods);
coleenp@548 56 virtual void do_oop(oop* p);
coleenp@548 57 virtual void do_oop(narrowOop* p);
coleenp@548 58 inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
coleenp@548 59 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
duke@435 60 bool do_header() { return true; }
duke@435 61 virtual const bool do_nmethods() const {
duke@435 62 return _should_do_nmethods;
duke@435 63 }
duke@435 64 Prefetch::style prefetch_style() {
duke@435 65 return Prefetch::do_read;
duke@435 66 }
duke@435 67 };
duke@435 68
duke@435 69 // A variant of the above used in certain kinds of CMS
duke@435 70 // marking verification.
duke@435 71 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
coleenp@548 72 private:
coleenp@548 73 const MemRegion _span;
coleenp@548 74 CMSBitMap* _verification_bm;
coleenp@548 75 CMSBitMap* _cms_bm;
coleenp@548 76 const bool _should_do_nmethods;
coleenp@548 77 protected:
coleenp@548 78 DO_OOP_WORK_DEFN
duke@435 79 public:
duke@435 80 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
duke@435 81 CMSBitMap* cms_bm, bool should_do_nmethods);
coleenp@548 82 virtual void do_oop(oop* p);
coleenp@548 83 virtual void do_oop(narrowOop* p);
coleenp@548 84 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
coleenp@548 85 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
duke@435 86 bool do_header() { return true; }
duke@435 87 virtual const bool do_nmethods() const {
duke@435 88 return _should_do_nmethods;
duke@435 89 }
duke@435 90 Prefetch::style prefetch_style() {
duke@435 91 return Prefetch::do_read;
duke@435 92 }
duke@435 93 };
duke@435 94
duke@435 95 // The non-parallel version (the parallel version appears further below).
duke@435 96 class PushAndMarkClosure: public OopClosure {
coleenp@548 97 private:
coleenp@548 98 CMSCollector* _collector;
coleenp@548 99 MemRegion _span;
coleenp@548 100 CMSBitMap* _bit_map;
coleenp@548 101 CMSBitMap* _mod_union_table;
coleenp@548 102 CMSMarkStack* _mark_stack;
coleenp@548 103 CMSMarkStack* _revisit_stack;
coleenp@548 104 bool _concurrent_precleaning;
coleenp@548 105 bool const _should_remember_klasses;
coleenp@548 106 protected:
coleenp@548 107 DO_OOP_WORK_DEFN
duke@435 108 public:
duke@435 109 PushAndMarkClosure(CMSCollector* collector,
duke@435 110 MemRegion span,
duke@435 111 ReferenceProcessor* rp,
duke@435 112 CMSBitMap* bit_map,
duke@435 113 CMSBitMap* mod_union_table,
coleenp@548 114 CMSMarkStack* mark_stack,
coleenp@548 115 CMSMarkStack* revisit_stack,
coleenp@548 116 bool concurrent_precleaning);
coleenp@548 117 virtual void do_oop(oop* p);
coleenp@548 118 virtual void do_oop(narrowOop* p);
coleenp@548 119 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
coleenp@548 120 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
duke@435 121 bool do_header() { return true; }
duke@435 122 Prefetch::style prefetch_style() {
duke@435 123 return Prefetch::do_read;
duke@435 124 }
coleenp@548 125 virtual const bool should_remember_klasses() const {
duke@435 126 return _should_remember_klasses;
duke@435 127 }
coleenp@548 128 virtual void remember_klass(Klass* k);
duke@435 129 };
duke@435 130
duke@435 131 // In the parallel case, the revisit stack, the bit map and the
duke@435 132 // reference processor are currently all shared. Access to
duke@435 133 // these shared mutable structures must use appropriate
duke@435 134 // synchronization (for instance, via CAS). The marking stack
duke@435 135 // used in the non-parallel case above is here replaced with
duke@435 136 // an OopTaskQueue structure to allow efficient work stealing.
duke@435 137 class Par_PushAndMarkClosure: public OopClosure {
coleenp@548 138 private:
coleenp@548 139 CMSCollector* _collector;
coleenp@548 140 MemRegion _span;
coleenp@548 141 CMSBitMap* _bit_map;
coleenp@548 142 OopTaskQueue* _work_queue;
coleenp@548 143 CMSMarkStack* _revisit_stack;
coleenp@548 144 bool const _should_remember_klasses;
coleenp@548 145 protected:
coleenp@548 146 DO_OOP_WORK_DEFN
duke@435 147 public:
duke@435 148 Par_PushAndMarkClosure(CMSCollector* collector,
duke@435 149 MemRegion span,
duke@435 150 ReferenceProcessor* rp,
duke@435 151 CMSBitMap* bit_map,
duke@435 152 OopTaskQueue* work_queue,
duke@435 153 CMSMarkStack* revisit_stack);
coleenp@548 154 virtual void do_oop(oop* p);
coleenp@548 155 virtual void do_oop(narrowOop* p);
coleenp@548 156 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
coleenp@548 157 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
duke@435 158 bool do_header() { return true; }
duke@435 159 Prefetch::style prefetch_style() {
duke@435 160 return Prefetch::do_read;
duke@435 161 }
coleenp@548 162 virtual const bool should_remember_klasses() const {
duke@435 163 return _should_remember_klasses;
duke@435 164 }
coleenp@548 165 virtual void remember_klass(Klass* k);
duke@435 166 };
duke@435 167
duke@435 168 // The non-parallel version (the parallel version appears further below).
duke@435 169 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
coleenp@548 170 private:
coleenp@548 171 MemRegion _span;
coleenp@548 172 CMSBitMap* _bit_map;
coleenp@548 173 CMSMarkStack* _mark_stack;
coleenp@548 174 PushAndMarkClosure _pushAndMarkClosure;
coleenp@548 175 CMSCollector* _collector;
coleenp@548 176 Mutex* _freelistLock;
coleenp@548 177 bool _yield;
duke@435 178 // Whether closure is being used for concurrent precleaning
coleenp@548 179 bool _concurrent_precleaning;
coleenp@548 180 protected:
coleenp@548 181 DO_OOP_WORK_DEFN
duke@435 182 public:
duke@435 183 MarkRefsIntoAndScanClosure(MemRegion span,
duke@435 184 ReferenceProcessor* rp,
duke@435 185 CMSBitMap* bit_map,
duke@435 186 CMSBitMap* mod_union_table,
coleenp@548 187 CMSMarkStack* mark_stack,
coleenp@548 188 CMSMarkStack* revisit_stack,
duke@435 189 CMSCollector* collector,
duke@435 190 bool should_yield,
duke@435 191 bool concurrent_precleaning);
coleenp@548 192 virtual void do_oop(oop* p);
coleenp@548 193 virtual void do_oop(narrowOop* p);
coleenp@548 194 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
coleenp@548 195 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
duke@435 196 bool do_header() { return true; }
duke@435 197 virtual const bool do_nmethods() const { return true; }
duke@435 198 Prefetch::style prefetch_style() {
duke@435 199 return Prefetch::do_read;
duke@435 200 }
duke@435 201 void set_freelistLock(Mutex* m) {
duke@435 202 _freelistLock = m;
duke@435 203 }
duke@435 204
duke@435 205 private:
duke@435 206 inline void do_yield_check();
duke@435 207 void do_yield_work();
duke@435 208 bool take_from_overflow_list();
duke@435 209 };
duke@435 210
duke@435 211 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
duke@435 212 // stack and the bitMap are shared, so access needs to be suitably
duke@435 213 // sycnhronized. An OopTaskQueue structure, supporting efficient
duke@435 214 // workstealing, replaces a CMSMarkStack for storing grey objects.
duke@435 215 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
coleenp@548 216 private:
coleenp@548 217 MemRegion _span;
coleenp@548 218 CMSBitMap* _bit_map;
coleenp@548 219 OopTaskQueue* _work_queue;
coleenp@548 220 const uint _low_water_mark;
coleenp@548 221 Par_PushAndMarkClosure _par_pushAndMarkClosure;
coleenp@548 222 protected:
coleenp@548 223 DO_OOP_WORK_DEFN
duke@435 224 public:
duke@435 225 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
duke@435 226 MemRegion span,
duke@435 227 ReferenceProcessor* rp,
duke@435 228 CMSBitMap* bit_map,
duke@435 229 OopTaskQueue* work_queue,
duke@435 230 CMSMarkStack* revisit_stack);
coleenp@548 231 virtual void do_oop(oop* p);
coleenp@548 232 virtual void do_oop(narrowOop* p);
coleenp@548 233 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
coleenp@548 234 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
duke@435 235 bool do_header() { return true; }
duke@435 236 virtual const bool do_nmethods() const { return true; }
duke@435 237 Prefetch::style prefetch_style() {
duke@435 238 return Prefetch::do_read;
duke@435 239 }
duke@435 240 void trim_queue(uint size);
duke@435 241 };
duke@435 242
duke@435 243 // This closure is used during the concurrent marking phase
duke@435 244 // following the first checkpoint. Its use is buried in
duke@435 245 // the closure MarkFromRootsClosure.
duke@435 246 class PushOrMarkClosure: public OopClosure {
coleenp@548 247 private:
coleenp@548 248 CMSCollector* _collector;
coleenp@548 249 MemRegion _span;
coleenp@548 250 CMSBitMap* _bitMap;
coleenp@548 251 CMSMarkStack* _markStack;
coleenp@548 252 CMSMarkStack* _revisitStack;
coleenp@548 253 HeapWord* const _finger;
coleenp@548 254 MarkFromRootsClosure* const
coleenp@548 255 _parent;
coleenp@548 256 bool const _should_remember_klasses;
coleenp@548 257 protected:
coleenp@548 258 DO_OOP_WORK_DEFN
duke@435 259 public:
duke@435 260 PushOrMarkClosure(CMSCollector* cms_collector,
duke@435 261 MemRegion span,
duke@435 262 CMSBitMap* bitMap,
coleenp@548 263 CMSMarkStack* markStack,
coleenp@548 264 CMSMarkStack* revisitStack,
coleenp@548 265 HeapWord* finger,
duke@435 266 MarkFromRootsClosure* parent);
coleenp@548 267 virtual void do_oop(oop* p);
coleenp@548 268 virtual void do_oop(narrowOop* p);
coleenp@548 269 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
coleenp@548 270 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
coleenp@548 271 virtual const bool should_remember_klasses() const {
duke@435 272 return _should_remember_klasses;
duke@435 273 }
coleenp@548 274 virtual void remember_klass(Klass* k);
duke@435 275 // Deal with a stack overflow condition
duke@435 276 void handle_stack_overflow(HeapWord* lost);
duke@435 277 private:
duke@435 278 inline void do_yield_check();
duke@435 279 };
duke@435 280
duke@435 281 // A parallel (MT) version of the above.
duke@435 282 // This closure is used during the concurrent marking phase
duke@435 283 // following the first checkpoint. Its use is buried in
duke@435 284 // the closure Par_MarkFromRootsClosure.
duke@435 285 class Par_PushOrMarkClosure: public OopClosure {
coleenp@548 286 private:
duke@435 287 CMSCollector* _collector;
duke@435 288 MemRegion _whole_span;
duke@435 289 MemRegion _span; // local chunk
duke@435 290 CMSBitMap* _bit_map;
duke@435 291 OopTaskQueue* _work_queue;
duke@435 292 CMSMarkStack* _overflow_stack;
duke@435 293 CMSMarkStack* _revisit_stack;
duke@435 294 HeapWord* const _finger;
duke@435 295 HeapWord** const _global_finger_addr;
coleenp@548 296 Par_MarkFromRootsClosure* const
coleenp@548 297 _parent;
coleenp@548 298 bool const _should_remember_klasses;
coleenp@548 299 protected:
coleenp@548 300 DO_OOP_WORK_DEFN
duke@435 301 public:
duke@435 302 Par_PushOrMarkClosure(CMSCollector* cms_collector,
coleenp@548 303 MemRegion span,
coleenp@548 304 CMSBitMap* bit_map,
coleenp@548 305 OopTaskQueue* work_queue,
coleenp@548 306 CMSMarkStack* mark_stack,
coleenp@548 307 CMSMarkStack* revisit_stack,
coleenp@548 308 HeapWord* finger,
coleenp@548 309 HeapWord** global_finger_addr,
coleenp@548 310 Par_MarkFromRootsClosure* parent);
coleenp@548 311 virtual void do_oop(oop* p);
coleenp@548 312 virtual void do_oop(narrowOop* p);
coleenp@548 313 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
coleenp@548 314 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
coleenp@548 315 virtual const bool should_remember_klasses() const {
duke@435 316 return _should_remember_klasses;
duke@435 317 }
coleenp@548 318 virtual void remember_klass(Klass* k);
duke@435 319 // Deal with a stack overflow condition
duke@435 320 void handle_stack_overflow(HeapWord* lost);
duke@435 321 private:
duke@435 322 inline void do_yield_check();
duke@435 323 };
duke@435 324
duke@435 325 // For objects in CMS generation, this closure marks
duke@435 326 // given objects (transitively) as being reachable/live.
duke@435 327 // This is currently used during the (weak) reference object
ysr@887 328 // processing phase of the CMS final checkpoint step, as
ysr@887 329 // well as during the concurrent precleaning of the discovered
ysr@887 330 // reference lists.
duke@435 331 class CMSKeepAliveClosure: public OopClosure {
coleenp@548 332 private:
duke@435 333 CMSCollector* _collector;
ysr@578 334 const MemRegion _span;
duke@435 335 CMSMarkStack* _mark_stack;
duke@435 336 CMSBitMap* _bit_map;
ysr@887 337 bool _concurrent_precleaning;
coleenp@548 338 protected:
coleenp@548 339 DO_OOP_WORK_DEFN
duke@435 340 public:
duke@435 341 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
ysr@887 342 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
ysr@887 343 bool cpc):
duke@435 344 _collector(collector),
duke@435 345 _span(span),
duke@435 346 _bit_map(bit_map),
ysr@887 347 _mark_stack(mark_stack),
ysr@887 348 _concurrent_precleaning(cpc) {
ysr@578 349 assert(!_span.is_empty(), "Empty span could spell trouble");
ysr@578 350 }
ysr@887 351 bool concurrent_precleaning() const { return _concurrent_precleaning; }
coleenp@548 352 virtual void do_oop(oop* p);
coleenp@548 353 virtual void do_oop(narrowOop* p);
coleenp@548 354 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
coleenp@548 355 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
duke@435 356 };
duke@435 357
duke@435 358 class CMSInnerParMarkAndPushClosure: public OopClosure {
coleenp@548 359 private:
duke@435 360 CMSCollector* _collector;
duke@435 361 MemRegion _span;
duke@435 362 OopTaskQueue* _work_queue;
duke@435 363 CMSBitMap* _bit_map;
coleenp@548 364 protected:
coleenp@548 365 DO_OOP_WORK_DEFN
duke@435 366 public:
duke@435 367 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
duke@435 368 MemRegion span, CMSBitMap* bit_map,
duke@435 369 OopTaskQueue* work_queue):
duke@435 370 _collector(collector),
duke@435 371 _span(span),
duke@435 372 _bit_map(bit_map),
duke@435 373 _work_queue(work_queue) { }
coleenp@548 374 virtual void do_oop(oop* p);
coleenp@548 375 virtual void do_oop(narrowOop* p);
coleenp@548 376 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
coleenp@548 377 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
duke@435 378 };
duke@435 379
duke@435 380 // A parallel (MT) version of the above, used when
duke@435 381 // reference processing is parallel; the only difference
duke@435 382 // is in the do_oop method.
duke@435 383 class CMSParKeepAliveClosure: public OopClosure {
coleenp@548 384 private:
duke@435 385 CMSCollector* _collector;
duke@435 386 MemRegion _span;
duke@435 387 OopTaskQueue* _work_queue;
duke@435 388 CMSBitMap* _bit_map;
coleenp@548 389 CMSInnerParMarkAndPushClosure
coleenp@548 390 _mark_and_push;
duke@435 391 const uint _low_water_mark;
duke@435 392 void trim_queue(uint max);
coleenp@548 393 protected:
coleenp@548 394 DO_OOP_WORK_DEFN
duke@435 395 public:
duke@435 396 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
duke@435 397 CMSBitMap* bit_map, OopTaskQueue* work_queue);
coleenp@548 398 virtual void do_oop(oop* p);
coleenp@548 399 virtual void do_oop(narrowOop* p);
coleenp@548 400 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
coleenp@548 401 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
duke@435 402 };

mercurial