6798898: CMS: bugs related to class unloading

Mon, 24 Aug 2009 10:36:31 -0700

author
jmasa
date
Mon, 24 Aug 2009 10:36:31 -0700
changeset 1370
05f89f00a864
parent 1347
308762b2bf14
child 1372
ead53f6b615d

6798898: CMS: bugs related to class unloading
Summary: Override should_remember_klasses() and remember_klass() as needed.
Reviewed-by: ysr, jcoomes

src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/iterator.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/referenceProcessor.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Fri Aug 14 13:44:15 2009 -0700
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Mon Aug 24 10:36:31 2009 -0700
     1.3 @@ -92,17 +92,50 @@
     1.4    }
     1.5  };
     1.6  
     1.7 +// KlassRememberingOopClosure is used when marking of the permanent generation
     1.8 +// is being done.  It adds fields to support revisiting of klasses
     1.9 +// for class unloading.  _should_remember_klasses should be set to
    1.10 +// indicate if klasses should be remembered.  Currently that is whenever
    1.11 +// CMS class unloading is turned on.  The _revisit_stack is used
    1.12 +// to save the klasses for later processing.
    1.13 +class KlassRememberingOopClosure : public OopClosure {
    1.14 + protected:
    1.15 +  CMSCollector* _collector;
    1.16 +  CMSMarkStack* _revisit_stack;
    1.17 +  bool const    _should_remember_klasses;
    1.18 + public:
    1.19 +  void check_remember_klasses() const PRODUCT_RETURN;
    1.20 +  virtual const bool should_remember_klasses() const {
    1.21 +    check_remember_klasses();
    1.22 +    return _should_remember_klasses;
    1.23 +  }
    1.24 +  virtual void remember_klass(Klass* k);
    1.25 +
    1.26 +  KlassRememberingOopClosure(CMSCollector* collector,
    1.27 +                             ReferenceProcessor* rp,
    1.28 +                             CMSMarkStack* revisit_stack);
    1.29 +};
    1.30 +
    1.31 +// Similar to KlassRememberingOopClosure for use when multiple
    1.32 +// GC threads will execute the closure.
    1.33 +
    1.34 +class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
    1.35 + public:
    1.36 +  Par_KlassRememberingOopClosure(CMSCollector* collector,
    1.37 +                                 ReferenceProcessor* rp,
    1.38 +                                 CMSMarkStack* revisit_stack):
    1.39 +    KlassRememberingOopClosure(collector, rp, revisit_stack) {}
    1.40 +  virtual void remember_klass(Klass* k);
    1.41 +};
    1.42 +
    1.43  // The non-parallel version (the parallel version appears further below).
    1.44 -class PushAndMarkClosure: public OopClosure {
    1.45 +class PushAndMarkClosure: public KlassRememberingOopClosure {
    1.46   private:
    1.47 -  CMSCollector* _collector;
    1.48    MemRegion     _span;
    1.49    CMSBitMap*    _bit_map;
    1.50    CMSBitMap*    _mod_union_table;
    1.51    CMSMarkStack* _mark_stack;
    1.52 -  CMSMarkStack* _revisit_stack;
    1.53    bool          _concurrent_precleaning;
    1.54 -  bool const    _should_remember_klasses;
    1.55   protected:
    1.56    DO_OOP_WORK_DEFN
    1.57   public:
    1.58 @@ -122,10 +155,6 @@
    1.59    Prefetch::style prefetch_style() {
    1.60      return Prefetch::do_read;
    1.61    }
    1.62 -  virtual const bool should_remember_klasses() const {
    1.63 -    return _should_remember_klasses;
    1.64 -  }
    1.65 -  virtual void remember_klass(Klass* k);
    1.66  };
    1.67  
    1.68  // In the parallel case, the revisit stack, the bit map and the
    1.69 @@ -134,14 +163,11 @@
    1.70  // synchronization (for instance, via CAS). The marking stack
    1.71  // used in the non-parallel case above is here replaced with
    1.72  // an OopTaskQueue structure to allow efficient work stealing.
    1.73 -class Par_PushAndMarkClosure: public OopClosure {
    1.74 +class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
    1.75   private:
    1.76 -  CMSCollector* _collector;
    1.77    MemRegion     _span;
    1.78    CMSBitMap*    _bit_map;
    1.79    OopTaskQueue* _work_queue;
    1.80 -  CMSMarkStack* _revisit_stack;
    1.81 -  bool const    _should_remember_klasses;
    1.82   protected:
    1.83    DO_OOP_WORK_DEFN
    1.84   public:
    1.85 @@ -159,10 +185,6 @@
    1.86    Prefetch::style prefetch_style() {
    1.87      return Prefetch::do_read;
    1.88    }
    1.89 -  virtual const bool should_remember_klasses() const {
    1.90 -    return _should_remember_klasses;
    1.91 -  }
    1.92 -  virtual void remember_klass(Klass* k);
    1.93  };
    1.94  
    1.95  // The non-parallel version (the parallel version appears further below).
    1.96 @@ -201,6 +223,12 @@
    1.97    void set_freelistLock(Mutex* m) {
    1.98      _freelistLock = m;
    1.99    }
   1.100 +  virtual const bool should_remember_klasses() const {
   1.101 +    return _pushAndMarkClosure.should_remember_klasses();
   1.102 +  }
   1.103 +  virtual void remember_klass(Klass* k) {
   1.104 +    _pushAndMarkClosure.remember_klass(k);
   1.105 +  }
   1.106  
   1.107   private:
   1.108    inline void do_yield_check();
   1.109 @@ -234,6 +262,16 @@
   1.110    inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   1.111    bool do_header() { return true; }
   1.112    virtual const bool do_nmethods() const { return true; }
   1.113 +  // When ScanMarkedObjectsAgainClosure is used,
   1.114 +  // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
   1.115 +  // and this delegation is used.
   1.116 +  virtual const bool should_remember_klasses() const {
   1.117 +    return _par_pushAndMarkClosure.should_remember_klasses();
   1.118 +  }
   1.119 +  // See comment on should_remember_klasses() above.
   1.120 +  virtual void remember_klass(Klass* k) {
   1.121 +    _par_pushAndMarkClosure.remember_klass(k);
   1.122 +  }
   1.123    Prefetch::style prefetch_style() {
   1.124      return Prefetch::do_read;
   1.125    }
   1.126 @@ -243,17 +281,14 @@
   1.127  // This closure is used during the concurrent marking phase
   1.128  // following the first checkpoint. Its use is buried in
   1.129  // the closure MarkFromRootsClosure.
   1.130 -class PushOrMarkClosure: public OopClosure {
   1.131 +class PushOrMarkClosure: public KlassRememberingOopClosure {
   1.132   private:
   1.133 -  CMSCollector*   _collector;
   1.134    MemRegion       _span;
   1.135    CMSBitMap*      _bitMap;
   1.136    CMSMarkStack*   _markStack;
   1.137 -  CMSMarkStack*   _revisitStack;
   1.138    HeapWord* const _finger;
   1.139    MarkFromRootsClosure* const
   1.140                    _parent;
   1.141 -  bool const      _should_remember_klasses;
   1.142   protected:
   1.143    DO_OOP_WORK_DEFN
   1.144   public:
   1.145 @@ -268,10 +303,6 @@
   1.146    virtual void do_oop(narrowOop* p);
   1.147    inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
   1.148    inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
   1.149 -  virtual const bool should_remember_klasses() const {
   1.150 -    return _should_remember_klasses;
   1.151 -  }
   1.152 -  virtual void remember_klass(Klass* k);
   1.153    // Deal with a stack overflow condition
   1.154    void handle_stack_overflow(HeapWord* lost);
   1.155   private:
   1.156 @@ -282,20 +313,17 @@
   1.157  // This closure is used during the concurrent marking phase
   1.158  // following the first checkpoint. Its use is buried in
   1.159  // the closure Par_MarkFromRootsClosure.
   1.160 -class Par_PushOrMarkClosure: public OopClosure {
   1.161 +class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
   1.162   private:
   1.163 -  CMSCollector*    _collector;
   1.164    MemRegion        _whole_span;
   1.165    MemRegion        _span;        // local chunk
   1.166    CMSBitMap*       _bit_map;
   1.167    OopTaskQueue*    _work_queue;
   1.168    CMSMarkStack*    _overflow_stack;
   1.169 -  CMSMarkStack*    _revisit_stack;
   1.170    HeapWord*  const _finger;
   1.171    HeapWord** const _global_finger_addr;
   1.172    Par_MarkFromRootsClosure* const
   1.173                     _parent;
   1.174 -  bool const       _should_remember_klasses;
   1.175   protected:
   1.176    DO_OOP_WORK_DEFN
   1.177   public:
   1.178 @@ -312,10 +340,6 @@
   1.179    virtual void do_oop(narrowOop* p);
   1.180    inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
   1.181    inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
   1.182 -  virtual const bool should_remember_klasses() const {
   1.183 -    return _should_remember_klasses;
   1.184 -  }
   1.185 -  virtual void remember_klass(Klass* k);
   1.186    // Deal with a stack overflow condition
   1.187    void handle_stack_overflow(HeapWord* lost);
   1.188   private:
   1.189 @@ -328,9 +352,8 @@
   1.190  // processing phase of the CMS final checkpoint step, as
   1.191  // well as during the concurrent precleaning of the discovered
   1.192  // reference lists.
   1.193 -class CMSKeepAliveClosure: public OopClosure {
   1.194 +class CMSKeepAliveClosure: public KlassRememberingOopClosure {
   1.195   private:
   1.196 -  CMSCollector* _collector;
   1.197    const MemRegion _span;
   1.198    CMSMarkStack* _mark_stack;
   1.199    CMSBitMap*    _bit_map;
   1.200 @@ -340,14 +363,7 @@
   1.201   public:
   1.202    CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
   1.203                        CMSBitMap* bit_map, CMSMarkStack* mark_stack,
   1.204 -                      bool cpc):
   1.205 -    _collector(collector),
   1.206 -    _span(span),
   1.207 -    _bit_map(bit_map),
   1.208 -    _mark_stack(mark_stack),
   1.209 -    _concurrent_precleaning(cpc) {
   1.210 -    assert(!_span.is_empty(), "Empty span could spell trouble");
   1.211 -  }
   1.212 +                      CMSMarkStack* revisit_stack, bool cpc);
   1.213    bool    concurrent_precleaning() const { return _concurrent_precleaning; }
   1.214    virtual void do_oop(oop* p);
   1.215    virtual void do_oop(narrowOop* p);
   1.216 @@ -355,9 +371,8 @@
   1.217    inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
   1.218  };
   1.219  
   1.220 -class CMSInnerParMarkAndPushClosure: public OopClosure {
   1.221 +class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
   1.222   private:
   1.223 -  CMSCollector* _collector;
   1.224    MemRegion     _span;
   1.225    OopTaskQueue* _work_queue;
   1.226    CMSBitMap*    _bit_map;
   1.227 @@ -366,11 +381,8 @@
   1.228   public:
   1.229    CMSInnerParMarkAndPushClosure(CMSCollector* collector,
   1.230                                  MemRegion span, CMSBitMap* bit_map,
   1.231 -                                OopTaskQueue* work_queue):
   1.232 -    _collector(collector),
   1.233 -    _span(span),
   1.234 -    _bit_map(bit_map),
   1.235 -    _work_queue(work_queue) { }
   1.236 +                                CMSMarkStack* revisit_stack,
   1.237 +                                OopTaskQueue* work_queue);
   1.238    virtual void do_oop(oop* p);
   1.239    virtual void do_oop(narrowOop* p);
   1.240    inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
   1.241 @@ -380,9 +392,8 @@
   1.242  // A parallel (MT) version of the above, used when
   1.243  // reference processing is parallel; the only difference
   1.244  // is in the do_oop method.
   1.245 -class CMSParKeepAliveClosure: public OopClosure {
   1.246 +class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
   1.247   private:
   1.248 -  CMSCollector* _collector;
   1.249    MemRegion     _span;
   1.250    OopTaskQueue* _work_queue;
   1.251    CMSBitMap*    _bit_map;
   1.252 @@ -394,7 +405,8 @@
   1.253    DO_OOP_WORK_DEFN
   1.254   public:
   1.255    CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
   1.256 -                         CMSBitMap* bit_map, OopTaskQueue* work_queue);
   1.257 +                         CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
   1.258 +                         OopTaskQueue* work_queue);
   1.259    virtual void do_oop(oop* p);
   1.260    virtual void do_oop(narrowOop* p);
   1.261    inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
     2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Fri Aug 14 13:44:15 2009 -0700
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Mon Aug 24 10:36:31 2009 -0700
     2.3 @@ -37,16 +37,25 @@
     2.4    }
     2.5  }
     2.6  
     2.7 -inline void PushOrMarkClosure::remember_klass(Klass* k) {
     2.8 -  if (!_revisitStack->push(oop(k))) {
     2.9 +#ifndef PRODUCT
    2.10 +void KlassRememberingOopClosure::check_remember_klasses() const {
    2.11 +  assert(_should_remember_klasses == must_remember_klasses(),
    2.12 +    "Should remember klasses in this context.");
    2.13 +}
    2.14 +#endif
    2.15 +
    2.16 +void KlassRememberingOopClosure::remember_klass(Klass* k) {
    2.17 +  if (!_revisit_stack->push(oop(k))) {
    2.18      fatal("Revisit stack overflow in PushOrMarkClosure");
    2.19    }
    2.20 +  check_remember_klasses();
    2.21  }
    2.22  
    2.23 -inline void Par_PushOrMarkClosure::remember_klass(Klass* k) {
    2.24 +void Par_KlassRememberingOopClosure::remember_klass(Klass* k) {
    2.25    if (!_revisit_stack->par_push(oop(k))) {
    2.26      fatal("Revisit stack overflow in PushOrMarkClosure");
    2.27    }
    2.28 +  check_remember_klasses();
    2.29  }
    2.30  
    2.31  inline void PushOrMarkClosure::do_yield_check() {
     3.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 14 13:44:15 2009 -0700
     3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Aug 24 10:36:31 2009 -0700
     3.3 @@ -2276,7 +2276,7 @@
     3.4  
     3.5            VM_CMS_Final_Remark final_remark_op(this);
     3.6            VMThread::execute(&final_remark_op);
     3.7 -          }
     3.8 +        }
     3.9          assert(_foregroundGCShouldWait, "block post-condition");
    3.10          break;
    3.11        case Sweeping:
    3.12 @@ -3499,6 +3499,7 @@
    3.13    ref_processor()->set_enqueuing_is_done(false);
    3.14  
    3.15    {
    3.16 +    // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
    3.17      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
    3.18      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
    3.19      gch->gen_process_strong_roots(_cmsGen->level(),
    3.20 @@ -3623,6 +3624,8 @@
    3.21    verify_overflow_empty();
    3.22    assert(_revisitStack.isEmpty(), "tabula rasa");
    3.23  
    3.24 +  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
    3.25 +
    3.26    bool result = false;
    3.27    if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
    3.28      result = do_marking_mt(asynch);
    3.29 @@ -3958,24 +3961,24 @@
    3.30    pst->all_tasks_completed();
    3.31  }
    3.32  
    3.33 -class Par_ConcMarkingClosure: public OopClosure {
    3.34 +class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
    3.35   private:
    3.36 -  CMSCollector* _collector;
    3.37    MemRegion     _span;
    3.38    CMSBitMap*    _bit_map;
    3.39    CMSMarkStack* _overflow_stack;
    3.40 -  CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
    3.41    OopTaskQueue* _work_queue;
    3.42   protected:
    3.43    DO_OOP_WORK_DEFN
    3.44   public:
    3.45    Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
    3.46 -                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
    3.47 -    _collector(collector),
    3.48 +                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
    3.49 +                         CMSMarkStack* revisit_stack):
    3.50 +    Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
    3.51      _span(_collector->_span),
    3.52      _work_queue(work_queue),
    3.53      _bit_map(bit_map),
    3.54 -    _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
    3.55 +    _overflow_stack(overflow_stack)
    3.56 +  { }
    3.57    virtual void do_oop(oop* p);
    3.58    virtual void do_oop(narrowOop* p);
    3.59    void trim_queue(size_t max);
    3.60 @@ -4063,8 +4066,9 @@
    3.61    oop obj_to_scan;
    3.62    CMSBitMap* bm = &(_collector->_markBitMap);
    3.63    CMSMarkStack* ovflw = &(_collector->_markStack);
    3.64 +  CMSMarkStack* revisit = &(_collector->_revisitStack);
    3.65    int* seed = _collector->hash_seed(i);
    3.66 -  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
    3.67 +  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
    3.68    while (true) {
    3.69      cl.trim_queue(0);
    3.70      assert(work_q->size() == 0, "Should have been emptied above");
    3.71 @@ -4089,6 +4093,7 @@
    3.72    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
    3.73           "CMS thread should hold CMS token");
    3.74  
    3.75 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
    3.76    // First give up the locks, then yield, then re-lock
    3.77    // We should probably use a constructor/destructor idiom to
    3.78    // do this unlock/lock or modify the MutexUnlocker class to
    3.79 @@ -4165,6 +4170,8 @@
    3.80    // multi-threaded marking phase.
    3.81    ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
    3.82  
    3.83 +  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
    3.84 +
    3.85    conc_workers()->start_task(&tsk);
    3.86    while (tsk.yielded()) {
    3.87      tsk.coordinator_yield();
    3.88 @@ -4404,7 +4411,8 @@
    3.89      CMSPrecleanRefsYieldClosure yield_cl(this);
    3.90      assert(rp->span().equals(_span), "Spans should be equal");
    3.91      CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
    3.92 -                                   &_markStack, true /* preclean */);
    3.93 +                                   &_markStack, &_revisitStack,
    3.94 +                                   true /* preclean */);
    3.95      CMSDrainMarkingStackClosure complete_trace(this,
    3.96                                     _span, &_markBitMap, &_markStack,
    3.97                                     &keep_alive, true /* preclean */);
    3.98 @@ -4424,6 +4432,7 @@
    3.99                              bitMapLock());
   3.100      startTimer();
   3.101      sample_eden();
   3.102 +
   3.103      // The following will yield to allow foreground
   3.104      // collection to proceed promptly. XXX YSR:
   3.105      // The code in this method may need further
   3.106 @@ -4453,6 +4462,7 @@
   3.107      SurvivorSpacePrecleanClosure
   3.108        sss_cl(this, _span, &_markBitMap, &_markStack,
   3.109               &pam_cl, before_count, CMSYield);
   3.110 +    DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   3.111      dng->from()->object_iterate_careful(&sss_cl);
   3.112      dng->to()->object_iterate_careful(&sss_cl);
   3.113    }
   3.114 @@ -4554,6 +4564,13 @@
   3.115    verify_work_stacks_empty();
   3.116    verify_overflow_empty();
   3.117  
   3.118 +  // Turn off checking for this method but turn it back on
   3.119 +  // selectively.  There are yield points in this method
   3.120 +  // but it is difficult to turn the checking off just around
   3.121 +  // the yield points.  It is simpler to selectively turn
   3.122 +  // it on.
   3.123 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   3.124 +
   3.125    // strategy: starting with the first card, accumulate contiguous
   3.126    // ranges of dirty cards; clear these cards, then scan the region
   3.127    // covered by these cards.
   3.128 @@ -4582,6 +4599,7 @@
   3.129      MemRegion dirtyRegion;
   3.130      {
   3.131        stopTimer();
   3.132 +      // Potential yield point
   3.133        CMSTokenSync ts(true);
   3.134        startTimer();
   3.135        sample_eden();
   3.136 @@ -4607,6 +4625,7 @@
   3.137        assert(numDirtyCards > 0, "consistency check");
   3.138        HeapWord* stop_point = NULL;
   3.139        stopTimer();
   3.140 +      // Potential yield point
   3.141        CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
   3.142                                 bitMapLock());
   3.143        startTimer();
   3.144 @@ -4614,6 +4633,7 @@
   3.145          verify_work_stacks_empty();
   3.146          verify_overflow_empty();
   3.147          sample_eden();
   3.148 +        DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   3.149          stop_point =
   3.150            gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
   3.151        }
   3.152 @@ -4701,6 +4721,7 @@
   3.153        sample_eden();
   3.154        verify_work_stacks_empty();
   3.155        verify_overflow_empty();
   3.156 +      DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   3.157        HeapWord* stop_point =
   3.158          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
   3.159        if (stop_point != NULL) {
   3.160 @@ -4800,6 +4821,7 @@
   3.161    assert(haveFreelistLocks(), "must have free list locks");
   3.162    assert_lock_strong(bitMapLock());
   3.163  
   3.164 +  DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
   3.165    if (!init_mark_was_synchronous) {
   3.166      // We might assume that we need not fill TLAB's when
   3.167      // CMSScavengeBeforeRemark is set, because we may have just done
   3.168 @@ -4903,6 +4925,9 @@
   3.169    _markStack._hit_limit = 0;
   3.170    _markStack._failed_double = 0;
   3.171  
   3.172 +  // Check that all the klasses have been checked
   3.173 +  assert(_revisitStack.isEmpty(), "Not all klasses revisited");
   3.174 +
   3.175    if ((VerifyAfterGC || VerifyDuringGC) &&
   3.176        GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
   3.177      verify_after_remark();
   3.178 @@ -5574,9 +5599,13 @@
   3.179  void CMSRefProcTaskProxy::work(int i) {
   3.180    assert(_collector->_span.equals(_span), "Inconsistency in _span");
   3.181    CMSParKeepAliveClosure par_keep_alive(_collector, _span,
   3.182 -                                        _mark_bit_map, work_queue(i));
   3.183 +                                        _mark_bit_map,
   3.184 +                                        &_collector->_revisitStack,
   3.185 +                                        work_queue(i));
   3.186    CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
   3.187 -                                                 _mark_bit_map, work_queue(i));
   3.188 +                                                 _mark_bit_map,
   3.189 +                                                 &_collector->_revisitStack,
   3.190 +                                                 work_queue(i));
   3.191    CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
   3.192    _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
   3.193    if (_task.marks_oops_alive()) {
   3.194 @@ -5604,12 +5633,13 @@
   3.195  };
   3.196  
   3.197  CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
   3.198 -  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
   3.199 -   _collector(collector),
   3.200 +  MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
   3.201 +  OopTaskQueue* work_queue):
   3.202 +   Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
   3.203     _span(span),
   3.204     _bit_map(bit_map),
   3.205     _work_queue(work_queue),
   3.206 -   _mark_and_push(collector, span, bit_map, work_queue),
   3.207 +   _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
   3.208     _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
   3.209                          (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
   3.210  { }
   3.211 @@ -5696,7 +5726,8 @@
   3.212    verify_work_stacks_empty();
   3.213  
   3.214    CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
   3.215 -                                          &_markStack, false /* !preclean */);
   3.216 +                                          &_markStack, &_revisitStack,
   3.217 +                                          false /* !preclean */);
   3.218    CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
   3.219                                  _span, &_markBitMap, &_markStack,
   3.220                                  &cmsKeepAliveClosure, false /* !preclean */);
   3.221 @@ -6531,6 +6562,7 @@
   3.222    assert_lock_strong(_freelistLock);
   3.223    assert_lock_strong(_bit_map->lock());
   3.224    // relinquish the free_list_lock and bitMaplock()
   3.225 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   3.226    _bit_map->lock()->unlock();
   3.227    _freelistLock->unlock();
   3.228    ConcurrentMarkSweepThread::desynchronize(true);
   3.229 @@ -6703,6 +6735,7 @@
   3.230           "CMS thread should hold CMS token");
   3.231    assert_lock_strong(_freelistLock);
   3.232    assert_lock_strong(_bitMap->lock());
   3.233 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   3.234    // relinquish the free_list_lock and bitMaplock()
   3.235    _bitMap->lock()->unlock();
   3.236    _freelistLock->unlock();
   3.237 @@ -6779,6 +6812,7 @@
   3.238    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   3.239           "CMS thread should hold CMS token");
   3.240    assert_lock_strong(_bit_map->lock());
   3.241 +  DEBUG_ONLY(RememberKlassesChecker smx(false);)
   3.242    // Relinquish the bit map lock
   3.243    _bit_map->lock()->unlock();
   3.244    ConcurrentMarkSweepThread::desynchronize(true);
   3.245 @@ -6941,6 +6975,7 @@
   3.246    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   3.247           "CMS thread should hold CMS token");
   3.248    assert_lock_strong(_bitMap->lock());
   3.249 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   3.250    _bitMap->lock()->unlock();
   3.251    ConcurrentMarkSweepThread::desynchronize(true);
   3.252    ConcurrentMarkSweepThread::acknowledge_yield_request();
   3.253 @@ -7295,15 +7330,12 @@
   3.254                       CMSBitMap* bitMap, CMSMarkStack*  markStack,
   3.255                       CMSMarkStack*  revisitStack,
   3.256                       HeapWord* finger, MarkFromRootsClosure* parent) :
   3.257 -  OopClosure(collector->ref_processor()),
   3.258 -  _collector(collector),
   3.259 +  KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
   3.260    _span(span),
   3.261    _bitMap(bitMap),
   3.262    _markStack(markStack),
   3.263 -  _revisitStack(revisitStack),
   3.264    _finger(finger),
   3.265 -  _parent(parent),
   3.266 -  _should_remember_klasses(collector->should_unload_classes())
   3.267 +  _parent(parent)
   3.268  { }
   3.269  
   3.270  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
   3.271 @@ -7315,18 +7347,17 @@
   3.272                       HeapWord* finger,
   3.273                       HeapWord** global_finger_addr,
   3.274                       Par_MarkFromRootsClosure* parent) :
   3.275 -  OopClosure(collector->ref_processor()),
   3.276 -  _collector(collector),
   3.277 +  Par_KlassRememberingOopClosure(collector,
   3.278 +                            collector->ref_processor(),
   3.279 +                            revisit_stack),
   3.280    _whole_span(collector->_span),
   3.281    _span(span),
   3.282    _bit_map(bit_map),
   3.283    _work_queue(work_queue),
   3.284    _overflow_stack(overflow_stack),
   3.285 -  _revisit_stack(revisit_stack),
   3.286    _finger(finger),
   3.287    _global_finger_addr(global_finger_addr),
   3.288 -  _parent(parent),
   3.289 -  _should_remember_klasses(collector->should_unload_classes())
   3.290 +  _parent(parent)
   3.291  { }
   3.292  
   3.293  // Assumes thread-safe access by callers, who are
   3.294 @@ -7456,6 +7487,14 @@
   3.295  void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
   3.296  void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
   3.297  
   3.298 +KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
   3.299 +                                             ReferenceProcessor* rp,
   3.300 +                                             CMSMarkStack* revisit_stack) :
   3.301 +  OopClosure(rp),
   3.302 +  _collector(collector),
   3.303 +  _revisit_stack(revisit_stack),
   3.304 +  _should_remember_klasses(collector->should_unload_classes()) {}
   3.305 +
   3.306  PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
   3.307                                         MemRegion span,
   3.308                                         ReferenceProcessor* rp,
   3.309 @@ -7464,15 +7503,12 @@
   3.310                                         CMSMarkStack*  mark_stack,
   3.311                                         CMSMarkStack*  revisit_stack,
   3.312                                         bool           concurrent_precleaning):
   3.313 -  OopClosure(rp),
   3.314 -  _collector(collector),
   3.315 +  KlassRememberingOopClosure(collector, rp, revisit_stack),
   3.316    _span(span),
   3.317    _bit_map(bit_map),
   3.318    _mod_union_table(mod_union_table),
   3.319    _mark_stack(mark_stack),
   3.320 -  _revisit_stack(revisit_stack),
   3.321 -  _concurrent_precleaning(concurrent_precleaning),
   3.322 -  _should_remember_klasses(collector->should_unload_classes())
   3.323 +  _concurrent_precleaning(concurrent_precleaning)
   3.324  {
   3.325    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   3.326  }
   3.327 @@ -7540,13 +7576,10 @@
   3.328                                                 CMSBitMap* bit_map,
   3.329                                                 OopTaskQueue* work_queue,
   3.330                                                 CMSMarkStack* revisit_stack):
   3.331 -  OopClosure(rp),
   3.332 -  _collector(collector),
   3.333 +  Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
   3.334    _span(span),
   3.335    _bit_map(bit_map),
   3.336 -  _work_queue(work_queue),
   3.337 -  _revisit_stack(revisit_stack),
   3.338 -  _should_remember_klasses(collector->should_unload_classes())
   3.339 +  _work_queue(work_queue)
   3.340  {
   3.341    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   3.342  }
   3.343 @@ -7599,19 +7632,8 @@
   3.344  void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
   3.345  void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
   3.346  
   3.347 -void PushAndMarkClosure::remember_klass(Klass* k) {
   3.348 -  if (!_revisit_stack->push(oop(k))) {
   3.349 -    fatal("Revisit stack overflowed in PushAndMarkClosure");
   3.350 -  }
   3.351 -}
   3.352 -
   3.353 -void Par_PushAndMarkClosure::remember_klass(Klass* k) {
   3.354 -  if (!_revisit_stack->par_push(oop(k))) {
   3.355 -    fatal("Revist stack overflowed in Par_PushAndMarkClosure");
   3.356 -  }
   3.357 -}
   3.358 -
   3.359  void CMSPrecleanRefsYieldClosure::do_yield_work() {
   3.360 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   3.361    Mutex* bml = _collector->bitMapLock();
   3.362    assert_lock_strong(bml);
   3.363    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   3.364 @@ -8302,6 +8324,19 @@
   3.365           (!_span.contains(addr) || _bit_map->isMarked(addr));
   3.366  }
   3.367  
   3.368 +CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
   3.369 +                      MemRegion span,
   3.370 +                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
   3.371 +                      CMSMarkStack* revisit_stack, bool cpc):
   3.372 +  KlassRememberingOopClosure(collector, NULL, revisit_stack),
   3.373 +  _span(span),
   3.374 +  _bit_map(bit_map),
   3.375 +  _mark_stack(mark_stack),
   3.376 +  _concurrent_precleaning(cpc) {
   3.377 +  assert(!_span.is_empty(), "Empty span could spell trouble");
   3.378 +}
   3.379 +
   3.380 +
   3.381  // CMSKeepAliveClosure: the serial version
   3.382  void CMSKeepAliveClosure::do_oop(oop obj) {
   3.383    HeapWord* addr = (HeapWord*)obj;
   3.384 @@ -8385,6 +8420,16 @@
   3.385    }
   3.386  }
   3.387  
   3.388 +CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
   3.389 +                                CMSCollector* collector,
   3.390 +                                MemRegion span, CMSBitMap* bit_map,
   3.391 +                                CMSMarkStack* revisit_stack,
   3.392 +                                OopTaskQueue* work_queue):
   3.393 +  Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
   3.394 +  _span(span),
   3.395 +  _bit_map(bit_map),
   3.396 +  _work_queue(work_queue) { }
   3.397 +
   3.398  void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
   3.399    HeapWord* addr = (HeapWord*)obj;
   3.400    if (_span.contains(addr) &&
     4.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Aug 14 13:44:15 2009 -0700
     4.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Aug 24 10:36:31 2009 -0700
     4.3 @@ -1790,12 +1790,13 @@
     4.4   public:
     4.5    CMSParDrainMarkingStackClosure(CMSCollector* collector,
     4.6                                   MemRegion span, CMSBitMap* bit_map,
     4.7 +                                 CMSMarkStack* revisit_stack,
     4.8                                   OopTaskQueue* work_queue):
     4.9      _collector(collector),
    4.10      _span(span),
    4.11      _bit_map(bit_map),
    4.12      _work_queue(work_queue),
    4.13 -    _mark_and_push(collector, span, bit_map, work_queue) { }
    4.14 +    _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
    4.15  
    4.16   public:
    4.17    void trim_queue(uint max);
     5.1 --- a/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Fri Aug 14 13:44:15 2009 -0700
     5.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Mon Aug 24 10:36:31 2009 -0700
     5.3 @@ -145,6 +145,7 @@
     5.4  concurrentMarkSweepGeneration.cpp       globals_extension.hpp
     5.5  concurrentMarkSweepGeneration.cpp       handles.inline.hpp
     5.6  concurrentMarkSweepGeneration.cpp       isGCActiveMark.hpp
     5.7 +concurrentMarkSweepGeneration.cpp       iterator.hpp
     5.8  concurrentMarkSweepGeneration.cpp       java.hpp
     5.9  concurrentMarkSweepGeneration.cpp       jvmtiExport.hpp
    5.10  concurrentMarkSweepGeneration.cpp       oop.inline.hpp
     6.1 --- a/src/share/vm/memory/iterator.cpp	Fri Aug 14 13:44:15 2009 -0700
     6.2 +++ b/src/share/vm/memory/iterator.cpp	Mon Aug 24 10:36:31 2009 -0700
     6.3 @@ -25,6 +25,10 @@
     6.4  # include "incls/_precompiled.incl"
     6.5  # include "incls/_iterator.cpp.incl"
     6.6  
     6.7 +#ifdef ASSERT
     6.8 +bool OopClosure::_must_remember_klasses = false;
     6.9 +#endif
    6.10 +
    6.11  void ObjectToOopClosure::do_object(oop obj) {
    6.12    obj->oop_iterate(_cl);
    6.13  }
    6.14 @@ -32,3 +36,13 @@
    6.15  void VoidClosure::do_void() {
    6.16    ShouldNotCallThis();
    6.17  }
    6.18 +
    6.19 +#ifdef ASSERT
    6.20 +bool OopClosure::must_remember_klasses() {
    6.21 +  return _must_remember_klasses;
    6.22 +}
    6.23 +void OopClosure::set_must_remember_klasses(bool v) {
    6.24 +  _must_remember_klasses = v;
    6.25 +}
    6.26 +#endif
    6.27 +
     7.1 --- a/src/share/vm/memory/iterator.hpp	Fri Aug 14 13:44:15 2009 -0700
     7.2 +++ b/src/share/vm/memory/iterator.hpp	Mon Aug 24 10:36:31 2009 -0700
     7.3 @@ -54,7 +54,12 @@
     7.4  
     7.5    // In support of post-processing of weak links of KlassKlass objects;
     7.6    // see KlassKlass::oop_oop_iterate().
     7.7 -  virtual const bool should_remember_klasses() const { return false;    }
     7.8 +
     7.9 +  virtual const bool should_remember_klasses() const {
    7.10 +    assert(!must_remember_klasses(), "Should have overriden this method.");
    7.11 +    return false;
    7.12 +  }
    7.13 +
    7.14    virtual void remember_klass(Klass* k) { /* do nothing */ }
    7.15  
    7.16    // If "true", invoke on nmethods (when scanning compiled frames).
    7.17 @@ -74,6 +79,12 @@
    7.18    // location without an intervening "major reset" (like the end of a GC).
    7.19    virtual bool idempotent() { return false; }
    7.20    virtual bool apply_to_weak_ref_discovered_field() { return false; }
    7.21 +
    7.22 +#ifdef ASSERT
    7.23 +  static bool _must_remember_klasses;
    7.24 +  static bool must_remember_klasses();
    7.25 +  static void set_must_remember_klasses(bool v);
    7.26 +#endif
    7.27  };
    7.28  
    7.29  // ObjectClosure is used for iterating through an object space
    7.30 @@ -219,3 +230,38 @@
    7.31    // correct length.
    7.32    virtual void do_tag(int tag) = 0;
    7.33  };
    7.34 +
    7.35 +#ifdef ASSERT
    7.36 +// This class is used to flag phases of a collection that
    7.37 +// can unload classes and which should override the
    7.38 +// should_remember_klasses() and remember_klass() of OopClosure.
    7.39 +// The _must_remember_klasses is set in the contructor and restored
    7.40 +// in the destructor.  _must_remember_klasses is checked in assertions
    7.41 +// in the OopClosure implementations of should_remember_klasses() and
    7.42 +// remember_klass() and the expectation is that the OopClosure
    7.43 +// implementation should not be in use if _must_remember_klasses is set.
    7.44 +// Instances of RememberKlassesChecker can be place in
    7.45 +// marking phases of collections which can do class unloading.
    7.46 +// RememberKlassesChecker can be passed "false" to turn off checking.
    7.47 +// It is used by CMS when CMS yields to a different collector.
    7.48 +class RememberKlassesChecker: StackObj {
    7.49 + bool _state;
    7.50 + bool _skip;
    7.51 + public:
    7.52 +  RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
    7.53 +    _skip = !(ClassUnloading && !UseConcMarkSweepGC ||
    7.54 +              CMSClassUnloadingEnabled && UseConcMarkSweepGC);
    7.55 +    if (_skip) {
    7.56 +      return;
    7.57 +    }
    7.58 +    _state = OopClosure::must_remember_klasses();
    7.59 +    OopClosure::set_must_remember_klasses(checking_on);
    7.60 +  }
    7.61 +  ~RememberKlassesChecker() {
    7.62 +    if (_skip) {
    7.63 +      return;
    7.64 +    }
    7.65 +    OopClosure::set_must_remember_klasses(_state);
    7.66 +  }
    7.67 +};
    7.68 +#endif  // ASSERT
     8.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Fri Aug 14 13:44:15 2009 -0700
     8.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Aug 24 10:36:31 2009 -0700
     8.3 @@ -1231,6 +1231,11 @@
     8.4  
     8.5    NOT_PRODUCT(verify_ok_to_handle_reflists());
     8.6  
     8.7 +#ifdef ASSERT
     8.8 +  bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
     8.9 +                               CMSClassUnloadingEnabled && UseConcMarkSweepGC;
    8.10 +  RememberKlassesChecker mx(must_remember_klasses);
    8.11 +#endif
    8.12    // Soft references
    8.13    {
    8.14      TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,

mercurial