src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp

Tue, 06 May 2008 15:37:36 -0700

author
ysr
date
Tue, 06 May 2008 15:37:36 -0700
changeset 578
b5489bb705c9
parent 548
ba764ed4b6f2
child 887
00b023ae2d78
permissions
-rw-r--r--

6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
Summary: Construct the relevant CMSIsAliveClosure used by CMS during parallel reference processing with the correct span. It had incorrectly been constructed with an empty span, a regression introduced in 6417901.
Reviewed-by: jcoomes

     1 /*
     2  * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 /////////////////////////////////////////////////////////////////
    26 // Closures used by ConcurrentMarkSweepGeneration's collector
    27 /////////////////////////////////////////////////////////////////
    28 class ConcurrentMarkSweepGeneration;
    29 class CMSBitMap;
    30 class CMSMarkStack;
    31 class CMSCollector;
    32 class MarkFromRootsClosure;
    33 class Par_MarkFromRootsClosure;
    35 // Decode the oop and call do_oop on it.
    36 #define DO_OOP_WORK_DEFN \
    37   void do_oop(oop obj);                                   \
    38   template <class T> inline void do_oop_work(T* p) {      \
    39     T heap_oop = oopDesc::load_heap_oop(p);               \
    40     if (!oopDesc::is_null(heap_oop)) {                    \
    41       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
    42       do_oop(obj);                                        \
    43     }                                                     \
    44   }
    46 class MarkRefsIntoClosure: public OopsInGenClosure {
    47  private:
    48   const MemRegion _span;
    49   CMSBitMap*      _bitMap;
    50   const bool      _should_do_nmethods;
    51  protected:
    52   DO_OOP_WORK_DEFN
    53  public:
    54   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
    55                       bool should_do_nmethods);
    56   virtual void do_oop(oop* p);
    57   virtual void do_oop(narrowOop* p);
    58   inline void do_oop_nv(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
    59   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
    60   bool do_header() { return true; }
    61   virtual const bool do_nmethods() const {
    62     return _should_do_nmethods;
    63   }
    64   Prefetch::style prefetch_style() {
    65     return Prefetch::do_read;
    66   }
    67 };
    69 // A variant of the above used in certain kinds of CMS
    70 // marking verification.
    71 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
    72  private:
    73   const MemRegion _span;
    74   CMSBitMap*      _verification_bm;
    75   CMSBitMap*      _cms_bm;
    76   const bool      _should_do_nmethods;
    77  protected:
    78   DO_OOP_WORK_DEFN
    79  public:
    80   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
    81                             CMSBitMap* cms_bm, bool should_do_nmethods);
    82   virtual void do_oop(oop* p);
    83   virtual void do_oop(narrowOop* p);
    84   inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
    85   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
    86   bool do_header() { return true; }
    87   virtual const bool do_nmethods() const {
    88     return _should_do_nmethods;
    89   }
    90   Prefetch::style prefetch_style() {
    91     return Prefetch::do_read;
    92   }
    93 };
    95 // The non-parallel version (the parallel version appears further below).
    96 class PushAndMarkClosure: public OopClosure {
    97  private:
    98   CMSCollector* _collector;
    99   MemRegion     _span;
   100   CMSBitMap*    _bit_map;
   101   CMSBitMap*    _mod_union_table;
   102   CMSMarkStack* _mark_stack;
   103   CMSMarkStack* _revisit_stack;
   104   bool          _concurrent_precleaning;
   105   bool const    _should_remember_klasses;
   106  protected:
   107   DO_OOP_WORK_DEFN
   108  public:
   109   PushAndMarkClosure(CMSCollector* collector,
   110                      MemRegion span,
   111                      ReferenceProcessor* rp,
   112                      CMSBitMap* bit_map,
   113                      CMSBitMap* mod_union_table,
   114                      CMSMarkStack* mark_stack,
   115                      CMSMarkStack* revisit_stack,
   116                      bool concurrent_precleaning);
   117   virtual void do_oop(oop* p);
   118   virtual void do_oop(narrowOop* p);
   119   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
   120   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
   121   bool do_header() { return true; }
   122   Prefetch::style prefetch_style() {
   123     return Prefetch::do_read;
   124   }
   125   virtual const bool should_remember_klasses() const {
   126     return _should_remember_klasses;
   127   }
   128   virtual void remember_klass(Klass* k);
   129 };
   131 // In the parallel case, the revisit stack, the bit map and the
   132 // reference processor are currently all shared. Access to
   133 // these shared mutable structures must use appropriate
   134 // synchronization (for instance, via CAS). The marking stack
   135 // used in the non-parallel case above is here replaced with
   136 // an OopTaskQueue structure to allow efficient work stealing.
   137 class Par_PushAndMarkClosure: public OopClosure {
   138  private:
   139   CMSCollector* _collector;
   140   MemRegion     _span;
   141   CMSBitMap*    _bit_map;
   142   OopTaskQueue* _work_queue;
   143   CMSMarkStack* _revisit_stack;
   144   bool const    _should_remember_klasses;
   145  protected:
   146   DO_OOP_WORK_DEFN
   147  public:
   148   Par_PushAndMarkClosure(CMSCollector* collector,
   149                          MemRegion span,
   150                          ReferenceProcessor* rp,
   151                          CMSBitMap* bit_map,
   152                          OopTaskQueue* work_queue,
   153                          CMSMarkStack* revisit_stack);
   154   virtual void do_oop(oop* p);
   155   virtual void do_oop(narrowOop* p);
   156   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
   157   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
   158   bool do_header() { return true; }
   159   Prefetch::style prefetch_style() {
   160     return Prefetch::do_read;
   161   }
   162   virtual const bool should_remember_klasses() const {
   163     return _should_remember_klasses;
   164   }
   165   virtual void remember_klass(Klass* k);
   166 };
   168 // The non-parallel version (the parallel version appears further below).
   169 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
   170  private:
   171   MemRegion          _span;
   172   CMSBitMap*         _bit_map;
   173   CMSMarkStack*      _mark_stack;
   174   PushAndMarkClosure _pushAndMarkClosure;
   175   CMSCollector*      _collector;
   176   Mutex*             _freelistLock;
   177   bool               _yield;
   178   // Whether closure is being used for concurrent precleaning
   179   bool               _concurrent_precleaning;
   180  protected:
   181   DO_OOP_WORK_DEFN
   182  public:
   183   MarkRefsIntoAndScanClosure(MemRegion span,
   184                              ReferenceProcessor* rp,
   185                              CMSBitMap* bit_map,
   186                              CMSBitMap* mod_union_table,
   187                              CMSMarkStack* mark_stack,
   188                              CMSMarkStack* revisit_stack,
   189                              CMSCollector* collector,
   190                              bool should_yield,
   191                              bool concurrent_precleaning);
   192   virtual void do_oop(oop* p);
   193   virtual void do_oop(narrowOop* p);
   194   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   195   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   196   bool do_header() { return true; }
   197   virtual const bool do_nmethods() const { return true; }
   198   Prefetch::style prefetch_style() {
   199     return Prefetch::do_read;
   200   }
   201   void set_freelistLock(Mutex* m) {
   202     _freelistLock = m;
   203   }
   205  private:
   206   inline void do_yield_check();
   207   void do_yield_work();
   208   bool take_from_overflow_list();
   209 };
   211 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
   212 // stack and the bitMap are shared, so access needs to be suitably
   213 // sycnhronized. An OopTaskQueue structure, supporting efficient
   214 // workstealing, replaces a CMSMarkStack for storing grey objects.
   215 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
   216  private:
   217   MemRegion              _span;
   218   CMSBitMap*             _bit_map;
   219   OopTaskQueue*          _work_queue;
   220   const uint             _low_water_mark;
   221   Par_PushAndMarkClosure _par_pushAndMarkClosure;
   222  protected:
   223   DO_OOP_WORK_DEFN
   224  public:
   225   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
   226                                  MemRegion span,
   227                                  ReferenceProcessor* rp,
   228                                  CMSBitMap* bit_map,
   229                                  OopTaskQueue* work_queue,
   230                                  CMSMarkStack*  revisit_stack);
   231   virtual void do_oop(oop* p);
   232   virtual void do_oop(narrowOop* p);
   233   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   234   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   235   bool do_header() { return true; }
   236   virtual const bool do_nmethods() const { return true; }
   237   Prefetch::style prefetch_style() {
   238     return Prefetch::do_read;
   239   }
   240   void trim_queue(uint size);
   241 };
   243 // This closure is used during the concurrent marking phase
   244 // following the first checkpoint. Its use is buried in
   245 // the closure MarkFromRootsClosure.
   246 class PushOrMarkClosure: public OopClosure {
   247  private:
   248   CMSCollector*   _collector;
   249   MemRegion       _span;
   250   CMSBitMap*      _bitMap;
   251   CMSMarkStack*   _markStack;
   252   CMSMarkStack*   _revisitStack;
   253   HeapWord* const _finger;
   254   MarkFromRootsClosure* const
   255                   _parent;
   256   bool const      _should_remember_klasses;
   257  protected:
   258   DO_OOP_WORK_DEFN
   259  public:
   260   PushOrMarkClosure(CMSCollector* cms_collector,
   261                     MemRegion span,
   262                     CMSBitMap* bitMap,
   263                     CMSMarkStack* markStack,
   264                     CMSMarkStack* revisitStack,
   265                     HeapWord* finger,
   266                     MarkFromRootsClosure* parent);
   267   virtual void do_oop(oop* p);
   268   virtual void do_oop(narrowOop* p);
   269   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
   270   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
   271   virtual const bool should_remember_klasses() const {
   272     return _should_remember_klasses;
   273   }
   274   virtual void remember_klass(Klass* k);
   275   // Deal with a stack overflow condition
   276   void handle_stack_overflow(HeapWord* lost);
   277  private:
   278   inline void do_yield_check();
   279 };
   281 // A parallel (MT) version of the above.
   282 // This closure is used during the concurrent marking phase
   283 // following the first checkpoint. Its use is buried in
   284 // the closure Par_MarkFromRootsClosure.
   285 class Par_PushOrMarkClosure: public OopClosure {
   286  private:
   287   CMSCollector*    _collector;
   288   MemRegion        _whole_span;
   289   MemRegion        _span;        // local chunk
   290   CMSBitMap*       _bit_map;
   291   OopTaskQueue*    _work_queue;
   292   CMSMarkStack*    _overflow_stack;
   293   CMSMarkStack*    _revisit_stack;
   294   HeapWord*  const _finger;
   295   HeapWord** const _global_finger_addr;
   296   Par_MarkFromRootsClosure* const
   297                    _parent;
   298   bool const       _should_remember_klasses;
   299  protected:
   300   DO_OOP_WORK_DEFN
   301  public:
   302   Par_PushOrMarkClosure(CMSCollector* cms_collector,
   303                         MemRegion span,
   304                         CMSBitMap* bit_map,
   305                         OopTaskQueue* work_queue,
   306                         CMSMarkStack* mark_stack,
   307                         CMSMarkStack* revisit_stack,
   308                         HeapWord* finger,
   309                         HeapWord** global_finger_addr,
   310                         Par_MarkFromRootsClosure* parent);
   311   virtual void do_oop(oop* p);
   312   virtual void do_oop(narrowOop* p);
   313   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
   314   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
   315   virtual const bool should_remember_klasses() const {
   316     return _should_remember_klasses;
   317   }
   318   virtual void remember_klass(Klass* k);
   319   // Deal with a stack overflow condition
   320   void handle_stack_overflow(HeapWord* lost);
   321  private:
   322   inline void do_yield_check();
   323 };
   325 // For objects in CMS generation, this closure marks
   326 // given objects (transitively) as being reachable/live.
   327 // This is currently used during the (weak) reference object
   328 // processing phase of the CMS final checkpoint step.
   329 class CMSKeepAliveClosure: public OopClosure {
   330  private:
   331   CMSCollector* _collector;
   332   const MemRegion _span;
   333   CMSMarkStack* _mark_stack;
   334   CMSBitMap*    _bit_map;
   335  protected:
   336   DO_OOP_WORK_DEFN
   337  public:
   338   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
   339                       CMSBitMap* bit_map, CMSMarkStack* mark_stack):
   340     _collector(collector),
   341     _span(span),
   342     _bit_map(bit_map),
   343     _mark_stack(mark_stack) {
   344     assert(!_span.is_empty(), "Empty span could spell trouble");
   345   }
   346   virtual void do_oop(oop* p);
   347   virtual void do_oop(narrowOop* p);
   348   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
   349   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
   350 };
   352 class CMSInnerParMarkAndPushClosure: public OopClosure {
   353  private:
   354   CMSCollector* _collector;
   355   MemRegion     _span;
   356   OopTaskQueue* _work_queue;
   357   CMSBitMap*    _bit_map;
   358  protected:
   359   DO_OOP_WORK_DEFN
   360  public:
   361   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
   362                                 MemRegion span, CMSBitMap* bit_map,
   363                                 OopTaskQueue* work_queue):
   364     _collector(collector),
   365     _span(span),
   366     _bit_map(bit_map),
   367     _work_queue(work_queue) { }
   368   virtual void do_oop(oop* p);
   369   virtual void do_oop(narrowOop* p);
   370   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
   371   inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
   372 };
   374 // A parallel (MT) version of the above, used when
   375 // reference processing is parallel; the only difference
   376 // is in the do_oop method.
   377 class CMSParKeepAliveClosure: public OopClosure {
   378  private:
   379   CMSCollector* _collector;
   380   MemRegion     _span;
   381   OopTaskQueue* _work_queue;
   382   CMSBitMap*    _bit_map;
   383   CMSInnerParMarkAndPushClosure
   384                 _mark_and_push;
   385   const uint    _low_water_mark;
   386   void trim_queue(uint max);
   387  protected:
   388   DO_OOP_WORK_DEFN
   389  public:
   390   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
   391                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
   392   virtual void do_oop(oop* p);
   393   virtual void do_oop(narrowOop* p);
   394   inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
   395   inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
   396 };

mercurial