src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

changeset 1370
05f89f00a864
parent 1233
fe1574da39fc
child 1376
8b46c4d82093
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 14 13:44:15 2009 -0700
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Aug 24 10:36:31 2009 -0700
     1.3 @@ -2276,7 +2276,7 @@
     1.4  
     1.5            VM_CMS_Final_Remark final_remark_op(this);
     1.6            VMThread::execute(&final_remark_op);
     1.7 -          }
     1.8 +        }
     1.9          assert(_foregroundGCShouldWait, "block post-condition");
    1.10          break;
    1.11        case Sweeping:
    1.12 @@ -3499,6 +3499,7 @@
    1.13    ref_processor()->set_enqueuing_is_done(false);
    1.14  
    1.15    {
    1.16 +    // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
    1.17      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
    1.18      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
    1.19      gch->gen_process_strong_roots(_cmsGen->level(),
    1.20 @@ -3623,6 +3624,8 @@
    1.21    verify_overflow_empty();
    1.22    assert(_revisitStack.isEmpty(), "tabula rasa");
    1.23  
    1.24 +  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
    1.25 +
    1.26    bool result = false;
    1.27    if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
    1.28      result = do_marking_mt(asynch);
    1.29 @@ -3958,24 +3961,24 @@
    1.30    pst->all_tasks_completed();
    1.31  }
    1.32  
    1.33 -class Par_ConcMarkingClosure: public OopClosure {
    1.34 +class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
    1.35   private:
    1.36 -  CMSCollector* _collector;
    1.37    MemRegion     _span;
    1.38    CMSBitMap*    _bit_map;
    1.39    CMSMarkStack* _overflow_stack;
    1.40 -  CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
    1.41    OopTaskQueue* _work_queue;
    1.42   protected:
    1.43    DO_OOP_WORK_DEFN
    1.44   public:
    1.45    Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
    1.46 -                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
    1.47 -    _collector(collector),
    1.48 +                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
    1.49 +                         CMSMarkStack* revisit_stack):
    1.50 +    Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
    1.51      _span(_collector->_span),
    1.52      _work_queue(work_queue),
    1.53      _bit_map(bit_map),
    1.54 -    _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
    1.55 +    _overflow_stack(overflow_stack)
    1.56 +  { }
    1.57    virtual void do_oop(oop* p);
    1.58    virtual void do_oop(narrowOop* p);
    1.59    void trim_queue(size_t max);
    1.60 @@ -4063,8 +4066,9 @@
    1.61    oop obj_to_scan;
    1.62    CMSBitMap* bm = &(_collector->_markBitMap);
    1.63    CMSMarkStack* ovflw = &(_collector->_markStack);
    1.64 +  CMSMarkStack* revisit = &(_collector->_revisitStack);
    1.65    int* seed = _collector->hash_seed(i);
    1.66 -  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
    1.67 +  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
    1.68    while (true) {
    1.69      cl.trim_queue(0);
    1.70      assert(work_q->size() == 0, "Should have been emptied above");
    1.71 @@ -4089,6 +4093,7 @@
    1.72    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
    1.73           "CMS thread should hold CMS token");
    1.74  
    1.75 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
    1.76    // First give up the locks, then yield, then re-lock
    1.77    // We should probably use a constructor/destructor idiom to
    1.78    // do this unlock/lock or modify the MutexUnlocker class to
    1.79 @@ -4165,6 +4170,8 @@
    1.80    // multi-threaded marking phase.
    1.81    ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
    1.82  
    1.83 +  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
    1.84 +
    1.85    conc_workers()->start_task(&tsk);
    1.86    while (tsk.yielded()) {
    1.87      tsk.coordinator_yield();
    1.88 @@ -4404,7 +4411,8 @@
    1.89      CMSPrecleanRefsYieldClosure yield_cl(this);
    1.90      assert(rp->span().equals(_span), "Spans should be equal");
    1.91      CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
    1.92 -                                   &_markStack, true /* preclean */);
    1.93 +                                   &_markStack, &_revisitStack,
    1.94 +                                   true /* preclean */);
    1.95      CMSDrainMarkingStackClosure complete_trace(this,
    1.96                                     _span, &_markBitMap, &_markStack,
    1.97                                     &keep_alive, true /* preclean */);
    1.98 @@ -4424,6 +4432,7 @@
    1.99                              bitMapLock());
   1.100      startTimer();
   1.101      sample_eden();
   1.102 +
   1.103      // The following will yield to allow foreground
   1.104      // collection to proceed promptly. XXX YSR:
   1.105      // The code in this method may need further
   1.106 @@ -4453,6 +4462,7 @@
   1.107      SurvivorSpacePrecleanClosure
   1.108        sss_cl(this, _span, &_markBitMap, &_markStack,
   1.109               &pam_cl, before_count, CMSYield);
   1.110 +    DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   1.111      dng->from()->object_iterate_careful(&sss_cl);
   1.112      dng->to()->object_iterate_careful(&sss_cl);
   1.113    }
   1.114 @@ -4554,6 +4564,13 @@
   1.115    verify_work_stacks_empty();
   1.116    verify_overflow_empty();
   1.117  
   1.118 +  // Turn off checking for this method but turn it back on
   1.119 +  // selectively.  There are yield points in this method
   1.120 +  // but it is difficult to turn the checking off just around
   1.121 +  // the yield points.  It is simpler to selectively turn
   1.122 +  // it on.
   1.123 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   1.124 +
   1.125    // strategy: starting with the first card, accumulate contiguous
   1.126    // ranges of dirty cards; clear these cards, then scan the region
   1.127    // covered by these cards.
   1.128 @@ -4582,6 +4599,7 @@
   1.129      MemRegion dirtyRegion;
   1.130      {
   1.131        stopTimer();
   1.132 +      // Potential yield point
   1.133        CMSTokenSync ts(true);
   1.134        startTimer();
   1.135        sample_eden();
   1.136 @@ -4607,6 +4625,7 @@
   1.137        assert(numDirtyCards > 0, "consistency check");
   1.138        HeapWord* stop_point = NULL;
   1.139        stopTimer();
   1.140 +      // Potential yield point
   1.141        CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
   1.142                                 bitMapLock());
   1.143        startTimer();
   1.144 @@ -4614,6 +4633,7 @@
   1.145          verify_work_stacks_empty();
   1.146          verify_overflow_empty();
   1.147          sample_eden();
   1.148 +        DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   1.149          stop_point =
   1.150            gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
   1.151        }
   1.152 @@ -4701,6 +4721,7 @@
   1.153        sample_eden();
   1.154        verify_work_stacks_empty();
   1.155        verify_overflow_empty();
   1.156 +      DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
   1.157        HeapWord* stop_point =
   1.158          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
   1.159        if (stop_point != NULL) {
   1.160 @@ -4800,6 +4821,7 @@
   1.161    assert(haveFreelistLocks(), "must have free list locks");
   1.162    assert_lock_strong(bitMapLock());
   1.163  
   1.164 +  DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
   1.165    if (!init_mark_was_synchronous) {
   1.166      // We might assume that we need not fill TLAB's when
   1.167      // CMSScavengeBeforeRemark is set, because we may have just done
   1.168 @@ -4903,6 +4925,9 @@
   1.169    _markStack._hit_limit = 0;
   1.170    _markStack._failed_double = 0;
   1.171  
   1.172 +  // Check that all the klasses have been checked
   1.173 +  assert(_revisitStack.isEmpty(), "Not all klasses revisited");
   1.174 +
   1.175    if ((VerifyAfterGC || VerifyDuringGC) &&
   1.176        GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
   1.177      verify_after_remark();
   1.178 @@ -5574,9 +5599,13 @@
   1.179  void CMSRefProcTaskProxy::work(int i) {
   1.180    assert(_collector->_span.equals(_span), "Inconsistency in _span");
   1.181    CMSParKeepAliveClosure par_keep_alive(_collector, _span,
   1.182 -                                        _mark_bit_map, work_queue(i));
   1.183 +                                        _mark_bit_map,
   1.184 +                                        &_collector->_revisitStack,
   1.185 +                                        work_queue(i));
   1.186    CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
   1.187 -                                                 _mark_bit_map, work_queue(i));
   1.188 +                                                 _mark_bit_map,
   1.189 +                                                 &_collector->_revisitStack,
   1.190 +                                                 work_queue(i));
   1.191    CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
   1.192    _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
   1.193    if (_task.marks_oops_alive()) {
   1.194 @@ -5604,12 +5633,13 @@
   1.195  };
   1.196  
   1.197  CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
   1.198 -  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
   1.199 -   _collector(collector),
   1.200 +  MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
   1.201 +  OopTaskQueue* work_queue):
   1.202 +   Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
   1.203     _span(span),
   1.204     _bit_map(bit_map),
   1.205     _work_queue(work_queue),
   1.206 -   _mark_and_push(collector, span, bit_map, work_queue),
   1.207 +   _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
   1.208     _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
   1.209                          (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
   1.210  { }
   1.211 @@ -5696,7 +5726,8 @@
   1.212    verify_work_stacks_empty();
   1.213  
   1.214    CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
   1.215 -                                          &_markStack, false /* !preclean */);
   1.216 +                                          &_markStack, &_revisitStack,
   1.217 +                                          false /* !preclean */);
   1.218    CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
   1.219                                  _span, &_markBitMap, &_markStack,
   1.220                                  &cmsKeepAliveClosure, false /* !preclean */);
   1.221 @@ -6531,6 +6562,7 @@
   1.222    assert_lock_strong(_freelistLock);
   1.223    assert_lock_strong(_bit_map->lock());
   1.224    // relinquish the free_list_lock and bitMaplock()
   1.225 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   1.226    _bit_map->lock()->unlock();
   1.227    _freelistLock->unlock();
   1.228    ConcurrentMarkSweepThread::desynchronize(true);
   1.229 @@ -6703,6 +6735,7 @@
   1.230           "CMS thread should hold CMS token");
   1.231    assert_lock_strong(_freelistLock);
   1.232    assert_lock_strong(_bitMap->lock());
   1.233 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   1.234    // relinquish the free_list_lock and bitMaplock()
   1.235    _bitMap->lock()->unlock();
   1.236    _freelistLock->unlock();
   1.237 @@ -6779,6 +6812,7 @@
   1.238    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   1.239           "CMS thread should hold CMS token");
   1.240    assert_lock_strong(_bit_map->lock());
   1.241 +  DEBUG_ONLY(RememberKlassesChecker smx(false);)
   1.242    // Relinquish the bit map lock
   1.243    _bit_map->lock()->unlock();
   1.244    ConcurrentMarkSweepThread::desynchronize(true);
   1.245 @@ -6941,6 +6975,7 @@
   1.246    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   1.247           "CMS thread should hold CMS token");
   1.248    assert_lock_strong(_bitMap->lock());
   1.249 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   1.250    _bitMap->lock()->unlock();
   1.251    ConcurrentMarkSweepThread::desynchronize(true);
   1.252    ConcurrentMarkSweepThread::acknowledge_yield_request();
   1.253 @@ -7295,15 +7330,12 @@
   1.254                       CMSBitMap* bitMap, CMSMarkStack*  markStack,
   1.255                       CMSMarkStack*  revisitStack,
   1.256                       HeapWord* finger, MarkFromRootsClosure* parent) :
   1.257 -  OopClosure(collector->ref_processor()),
   1.258 -  _collector(collector),
   1.259 +  KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
   1.260    _span(span),
   1.261    _bitMap(bitMap),
   1.262    _markStack(markStack),
   1.263 -  _revisitStack(revisitStack),
   1.264    _finger(finger),
   1.265 -  _parent(parent),
   1.266 -  _should_remember_klasses(collector->should_unload_classes())
   1.267 +  _parent(parent)
   1.268  { }
   1.269  
   1.270  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
   1.271 @@ -7315,18 +7347,17 @@
   1.272                       HeapWord* finger,
   1.273                       HeapWord** global_finger_addr,
   1.274                       Par_MarkFromRootsClosure* parent) :
   1.275 -  OopClosure(collector->ref_processor()),
   1.276 -  _collector(collector),
   1.277 +  Par_KlassRememberingOopClosure(collector,
   1.278 +                            collector->ref_processor(),
   1.279 +                            revisit_stack),
   1.280    _whole_span(collector->_span),
   1.281    _span(span),
   1.282    _bit_map(bit_map),
   1.283    _work_queue(work_queue),
   1.284    _overflow_stack(overflow_stack),
   1.285 -  _revisit_stack(revisit_stack),
   1.286    _finger(finger),
   1.287    _global_finger_addr(global_finger_addr),
   1.288 -  _parent(parent),
   1.289 -  _should_remember_klasses(collector->should_unload_classes())
   1.290 +  _parent(parent)
   1.291  { }
   1.292  
   1.293  // Assumes thread-safe access by callers, who are
   1.294 @@ -7456,6 +7487,14 @@
   1.295  void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
   1.296  void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
   1.297  
   1.298 +KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
   1.299 +                                             ReferenceProcessor* rp,
   1.300 +                                             CMSMarkStack* revisit_stack) :
   1.301 +  OopClosure(rp),
   1.302 +  _collector(collector),
   1.303 +  _revisit_stack(revisit_stack),
   1.304 +  _should_remember_klasses(collector->should_unload_classes()) {}
   1.305 +
   1.306  PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
   1.307                                         MemRegion span,
   1.308                                         ReferenceProcessor* rp,
   1.309 @@ -7464,15 +7503,12 @@
   1.310                                         CMSMarkStack*  mark_stack,
   1.311                                         CMSMarkStack*  revisit_stack,
   1.312                                         bool           concurrent_precleaning):
   1.313 -  OopClosure(rp),
   1.314 -  _collector(collector),
   1.315 +  KlassRememberingOopClosure(collector, rp, revisit_stack),
   1.316    _span(span),
   1.317    _bit_map(bit_map),
   1.318    _mod_union_table(mod_union_table),
   1.319    _mark_stack(mark_stack),
   1.320 -  _revisit_stack(revisit_stack),
   1.321 -  _concurrent_precleaning(concurrent_precleaning),
   1.322 -  _should_remember_klasses(collector->should_unload_classes())
   1.323 +  _concurrent_precleaning(concurrent_precleaning)
   1.324  {
   1.325    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.326  }
   1.327 @@ -7540,13 +7576,10 @@
   1.328                                                 CMSBitMap* bit_map,
   1.329                                                 OopTaskQueue* work_queue,
   1.330                                                 CMSMarkStack* revisit_stack):
   1.331 -  OopClosure(rp),
   1.332 -  _collector(collector),
   1.333 +  Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
   1.334    _span(span),
   1.335    _bit_map(bit_map),
   1.336 -  _work_queue(work_queue),
   1.337 -  _revisit_stack(revisit_stack),
   1.338 -  _should_remember_klasses(collector->should_unload_classes())
   1.339 +  _work_queue(work_queue)
   1.340  {
   1.341    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.342  }
   1.343 @@ -7599,19 +7632,8 @@
   1.344  void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
   1.345  void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
   1.346  
   1.347 -void PushAndMarkClosure::remember_klass(Klass* k) {
   1.348 -  if (!_revisit_stack->push(oop(k))) {
   1.349 -    fatal("Revisit stack overflowed in PushAndMarkClosure");
   1.350 -  }
   1.351 -}
   1.352 -
   1.353 -void Par_PushAndMarkClosure::remember_klass(Klass* k) {
   1.354 -  if (!_revisit_stack->par_push(oop(k))) {
   1.355 -    fatal("Revist stack overflowed in Par_PushAndMarkClosure");
   1.356 -  }
   1.357 -}
   1.358 -
   1.359  void CMSPrecleanRefsYieldClosure::do_yield_work() {
   1.360 +  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   1.361    Mutex* bml = _collector->bitMapLock();
   1.362    assert_lock_strong(bml);
   1.363    assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   1.364 @@ -8302,6 +8324,19 @@
   1.365           (!_span.contains(addr) || _bit_map->isMarked(addr));
   1.366  }
   1.367  
   1.368 +CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
   1.369 +                      MemRegion span,
   1.370 +                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
   1.371 +                      CMSMarkStack* revisit_stack, bool cpc):
   1.372 +  KlassRememberingOopClosure(collector, NULL, revisit_stack),
   1.373 +  _span(span),
   1.374 +  _bit_map(bit_map),
   1.375 +  _mark_stack(mark_stack),
   1.376 +  _concurrent_precleaning(cpc) {
   1.377 +  assert(!_span.is_empty(), "Empty span could spell trouble");
   1.378 +}
   1.379 +
   1.380 +
   1.381  // CMSKeepAliveClosure: the serial version
   1.382  void CMSKeepAliveClosure::do_oop(oop obj) {
   1.383    HeapWord* addr = (HeapWord*)obj;
   1.384 @@ -8385,6 +8420,16 @@
   1.385    }
   1.386  }
   1.387  
   1.388 +CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
   1.389 +                                CMSCollector* collector,
   1.390 +                                MemRegion span, CMSBitMap* bit_map,
   1.391 +                                CMSMarkStack* revisit_stack,
   1.392 +                                OopTaskQueue* work_queue):
   1.393 +  Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
   1.394 +  _span(span),
   1.395 +  _bit_map(bit_map),
   1.396 +  _work_queue(work_queue) { }
   1.397 +
   1.398  void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
   1.399    HeapWord* addr = (HeapWord*)obj;
   1.400    if (_span.contains(addr) &&

mercurial