src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

changeset 529
0834225a7916
parent 444
173195ff483a
child 548
ba764ed4b6f2
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Mar 11 14:19:53 2008 -0700
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sun Mar 16 21:57:25 2008 -0700
     1.3 @@ -225,6 +225,34 @@
     1.4    assert(_dilatation_factor >= 1.0, "from previous assert");
     1.5  }
     1.6  
     1.7 +
     1.8 +// The field "_initiating_occupancy" represents the occupancy percentage
     1.9 +// at which we trigger a new collection cycle.  Unless explicitly specified
    1.10 +// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
    1.11 +// is calculated by:
    1.12 +//
    1.13 +//   Let "f" be MinHeapFreeRatio in
    1.14 +//
    1.15 +//    _intiating_occupancy = 100-f +
    1.16 +//                           f * (CMSTrigger[Perm]Ratio/100)
    1.17 +//   where CMSTrigger[Perm]Ratio is the argument "tr" below.
    1.18 +//
    1.19 +// That is, if we assume the heap is at its desired maximum occupancy at the
    1.20 +// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
    1.21 +// space be allocated before initiating a new collection cycle.
    1.22 +//
    1.23 +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
    1.24 +  assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
    1.25 +  if (io >= 0) {
    1.26 +    _initiating_occupancy = (double)io / 100.0;
    1.27 +  } else {
    1.28 +    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
    1.29 +                             (double)(tr * MinHeapFreeRatio) / 100.0)
    1.30 +                            / 100.0;
    1.31 +  }
    1.32 +}
    1.33 +
    1.34 +
    1.35  void ConcurrentMarkSweepGeneration::ref_processor_init() {
    1.36    assert(collector() != NULL, "no collector");
    1.37    collector()->ref_processor_init();
    1.38 @@ -520,8 +548,8 @@
    1.39    _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
    1.40    _completed_initialization(false),
    1.41    _collector_policy(cp),
    1.42 -  _unload_classes(false),
    1.43 -  _unloaded_classes_last_cycle(false),
    1.44 +  _should_unload_classes(false),
    1.45 +  _concurrent_cycles_since_last_unload(0),
    1.46    _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
    1.47  {
    1.48    if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
    1.49 @@ -642,26 +670,11 @@
    1.50      }
    1.51    }
    1.52  
    1.53 -  // "initiatingOccupancy" is the occupancy ratio at which we trigger
    1.54 -  // a new collection cycle.  Unless explicitly specified via
    1.55 -  // CMSTriggerRatio, it is calculated by:
    1.56 -  //   Let "f" be MinHeapFreeRatio in
    1.57 -  //
    1.58 -  //    intiatingOccupancy = 100-f +
    1.59 -  //                         f * (CMSTriggerRatio/100)
    1.60 -  // That is, if we assume the heap is at its desired maximum occupancy at the
    1.61 -  // end of a collection, we let CMSTriggerRatio of the (purported) free
    1.62 -  // space be allocated before initiating a new collection cycle.
    1.63 -  if (CMSInitiatingOccupancyFraction > 0) {
    1.64 -    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
    1.65 -  } else {
    1.66 -    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
    1.67 -                           (double)(CMSTriggerRatio *
    1.68 -                                    MinHeapFreeRatio) / 100.0)
    1.69 -                           / 100.0;
    1.70 -  }
    1.71 +  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
    1.72 +  _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
    1.73 +
    1.74    // Clip CMSBootstrapOccupancy between 0 and 100.
    1.75 -  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
    1.76 +  _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
    1.77                           /(double)100;
    1.78  
    1.79    _full_gcs_since_conc_gc = 0;
    1.80 @@ -1413,7 +1426,8 @@
    1.81      gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
    1.82      gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
    1.83      gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
    1.84 -    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
    1.85 +    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
    1.86 +    gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
    1.87    }
    1.88    // ------------------------------------------------------------------
    1.89  
    1.90 @@ -1446,84 +1460,91 @@
    1.91    // old gen want a collection cycle started. Each may use
    1.92    // an appropriate criterion for making this decision.
    1.93    // XXX We need to make sure that the gen expansion
    1.94 -  // criterion dovetails well with this.
    1.95 -  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
    1.96 +  // criterion dovetails well with this. XXX NEED TO FIX THIS
    1.97 +  if (_cmsGen->should_concurrent_collect()) {
    1.98      if (Verbose && PrintGCDetails) {
    1.99        gclog_or_tty->print_cr("CMS old gen initiated");
   1.100      }
   1.101      return true;
   1.102    }
   1.103  
   1.104 -  if (cms_should_unload_classes() &&
   1.105 -      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
   1.106 -    if (Verbose && PrintGCDetails) {
   1.107 -     gclog_or_tty->print_cr("CMS perm gen initiated");
   1.108 -    }
   1.109 -    return true;
   1.110 -  }
   1.111 -
   1.112 -  return false;
   1.113 -}
   1.114 -
   1.115 -// Clear _expansion_cause fields of constituent generations
   1.116 -void CMSCollector::clear_expansion_cause() {
   1.117 -  _cmsGen->clear_expansion_cause();
   1.118 -  _permGen->clear_expansion_cause();
   1.119 -}
   1.120 -
   1.121 -bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
   1.122 -  double initiatingOccupancy) {
   1.123 -  // We should be conservative in starting a collection cycle.  To
   1.124 -  // start too eagerly runs the risk of collecting too often in the
   1.125 -  // extreme.  To collect too rarely falls back on full collections,
   1.126 -  // which works, even if not optimum in terms of concurrent work.
   1.127 -  // As a work around for too eagerly collecting, use the flag
   1.128 -  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
   1.129 -  // giving the user an easily understandable way of controlling the
   1.130 -  // collections.
   1.131 -  // We want to start a new collection cycle if any of the following
   1.132 -  // conditions hold:
   1.133 -  // . our current occupancy exceeds the initiating occupancy, or
   1.134 -  // . we recently needed to expand and have not since that expansion,
   1.135 -  //   collected, or
   1.136 -  // . we are not using adaptive free lists and linear allocation is
   1.137 -  //   going to fail, or
   1.138 -  // . (for old gen) incremental collection has already failed or
   1.139 -  //   may soon fail in the near future as we may not be able to absorb
   1.140 -  //   promotions.
   1.141 -  assert_lock_strong(freelistLock());
   1.142 -
   1.143 -  if (occupancy() > initiatingOccupancy) {
   1.144 -    if (PrintGCDetails && Verbose) {
   1.145 -      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
   1.146 -        short_name(), occupancy(), initiatingOccupancy);
   1.147 -    }
   1.148 -    return true;
   1.149 -  }
   1.150 -  if (UseCMSInitiatingOccupancyOnly) {
   1.151 -    return false;
   1.152 -  }
   1.153 -  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
   1.154 -    if (PrintGCDetails && Verbose) {
   1.155 -      gclog_or_tty->print(" %s: collect because expanded for allocation ",
   1.156 -        short_name());
   1.157 -    }
   1.158 -    return true;
   1.159 -  }
   1.160 +  // We start a collection if we believe an incremental collection may fail;
   1.161 +  // this is not likely to be productive in practice because it's probably too
   1.162 +  // late anyway.
   1.163    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.164    assert(gch->collector_policy()->is_two_generation_policy(),
   1.165           "You may want to check the correctness of the following");
   1.166    if (gch->incremental_collection_will_fail()) {
   1.167      if (PrintGCDetails && Verbose) {
   1.168 -      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
   1.169 +      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
   1.170 +    }
   1.171 +    return true;
   1.172 +  }
   1.173 +
   1.174 +  if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
   1.175 +    bool res = update_should_unload_classes();
   1.176 +    if (res) {
   1.177 +      if (Verbose && PrintGCDetails) {
   1.178 +        gclog_or_tty->print_cr("CMS perm gen initiated");
   1.179 +      }
   1.180 +      return true;
   1.181 +    }
   1.182 +  }
   1.183 +  return false;
   1.184 +}
   1.185 +
   1.186 +// Clear _expansion_cause fields of constituent generations
   1.187 +void CMSCollector::clear_expansion_cause() {
   1.188 +  _cmsGen->clear_expansion_cause();
   1.189 +  _permGen->clear_expansion_cause();
   1.190 +}
   1.191 +
   1.192 +// We should be conservative in starting a collection cycle.  To
   1.193 +// start too eagerly runs the risk of collecting too often in the
   1.194 +// extreme.  To collect too rarely falls back on full collections,
   1.195 +// which works, even if not optimum in terms of concurrent work.
   1.196 +// As a work around for too eagerly collecting, use the flag
   1.197 +// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
   1.198 +// giving the user an easily understandable way of controlling the
   1.199 +// collections.
   1.200 +// We want to start a new collection cycle if any of the following
   1.201 +// conditions hold:
   1.202 +// . our current occupancy exceeds the configured initiating occupancy
   1.203 +//   for this generation, or
   1.204 +// . we recently needed to expand this space and have not, since that
   1.205 +//   expansion, done a collection of this generation, or
   1.206 +// . the underlying space believes that it may be a good idea to initiate
   1.207 +//   a concurrent collection (this may be based on criteria such as the
   1.208 +//   following: the space uses linear allocation and linear allocation is
   1.209 +//   going to fail, or there is believed to be excessive fragmentation in
   1.210 +//   the generation, etc... or ...
   1.211 +// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
   1.212 +//   the case of the old generation, not the perm generation; see CR 6543076):
   1.213 +//   we may be approaching a point at which allocation requests may fail because
   1.214 +//   we will be out of sufficient free space given allocation rate estimates.]
   1.215 +bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
   1.216 +
   1.217 +  assert_lock_strong(freelistLock());
   1.218 +  if (occupancy() > initiating_occupancy()) {
   1.219 +    if (PrintGCDetails && Verbose) {
   1.220 +      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
   1.221 +        short_name(), occupancy(), initiating_occupancy());
   1.222 +    }
   1.223 +    return true;
   1.224 +  }
   1.225 +  if (UseCMSInitiatingOccupancyOnly) {
   1.226 +    return false;
   1.227 +  }
   1.228 +  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
   1.229 +    if (PrintGCDetails && Verbose) {
   1.230 +      gclog_or_tty->print(" %s: collect because expanded for allocation ",
   1.231          short_name());
   1.232      }
   1.233      return true;
   1.234    }
   1.235 -  if (!_cmsSpace->adaptive_freelists() &&
   1.236 -      _cmsSpace->linearAllocationWouldFail()) {
   1.237 +  if (_cmsSpace->should_concurrent_collect()) {
   1.238      if (PrintGCDetails && Verbose) {
   1.239 -      gclog_or_tty->print(" %s: collect because of linAB ",
   1.240 +      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
   1.241          short_name());
   1.242      }
   1.243      return true;
   1.244 @@ -1970,8 +1991,9 @@
   1.245           "Should have been NULL'd before baton was passed");
   1.246    reset(false /* == !asynch */);
   1.247    _cmsGen->reset_after_compaction();
   1.248 -
   1.249 -  if (verifying() && !cms_should_unload_classes()) {
   1.250 +  _concurrent_cycles_since_last_unload = 0;
   1.251 +
   1.252 +  if (verifying() && !should_unload_classes()) {
   1.253      perm_gen_verify_bit_map()->clear_all();
   1.254    }
   1.255  
   1.256 @@ -2098,6 +2120,7 @@
   1.257    {
   1.258      bool safepoint_check = Mutex::_no_safepoint_check_flag;
   1.259      MutexLockerEx hl(Heap_lock, safepoint_check);
   1.260 +    FreelistLocker fll(this);
   1.261      MutexLockerEx x(CGC_lock, safepoint_check);
   1.262      if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
   1.263        // The foreground collector is active or we're
   1.264 @@ -2112,13 +2135,9 @@
   1.265        // a new cycle.
   1.266        clear_expansion_cause();
   1.267      }
   1.268 -    _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
   1.269 -    // This controls class unloading in response to an explicit gc request.
   1.270 -    // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
   1.271 -    // we will unload classes even if CMSClassUnloadingEnabled is not set.
   1.272 -    // See CR 6541037 and related CRs.
   1.273 -    _unload_classes = _full_gc_requested                      // ... for this cycle
   1.274 -                      && ExplicitGCInvokesConcurrentAndUnloadsClasses;
   1.275 +    // Decide if we want to enable class unloading as part of the
   1.276 +    // ensuing concurrent GC cycle.
   1.277 +    update_should_unload_classes();
   1.278      _full_gc_requested = false;           // acks all outstanding full gc requests
   1.279      // Signal that we are about to start a collection
   1.280      gch->increment_total_full_collections();  // ... starting a collection cycle
   1.281 @@ -3047,21 +3066,62 @@
   1.282  }
   1.283  #endif // PRODUCT
   1.284  
   1.285 +// Decide if we want to enable class unloading as part of the
   1.286 +// ensuing concurrent GC cycle. We will collect the perm gen and
   1.287 +// unload classes if it's the case that:
   1.288 +// (1) an explicit gc request has been made and the flag
   1.289 +//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
   1.290 +// (2) (a) class unloading is enabled at the command line, and
   1.291 +//     (b) (i)   perm gen threshold has been crossed, or
   1.292 +//         (ii)  old gen is getting really full, or
   1.293 +//         (iii) the previous N CMS collections did not collect the
   1.294 +//               perm gen
   1.295 +// NOTE: Provided there is no change in the state of the heap between
   1.296 +// calls to this method, it should have idempotent results. Moreover,
   1.297 +// its results should be monotonically increasing (i.e. going from 0 to 1,
   1.298 +// but not 1 to 0) between successive calls between which the heap was
   1.299 +// not collected. For the implementation below, it must thus rely on
   1.300 +// the property that concurrent_cycles_since_last_unload()
   1.301 +// will not decrease unless a collection cycle happened and that
   1.302 +// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
   1.303 +// themselves also monotonic in that sense. See check_monotonicity()
   1.304 +// below.
   1.305 +bool CMSCollector::update_should_unload_classes() {
   1.306 +  _should_unload_classes = false;
   1.307 +  // Condition 1 above
   1.308 +  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
   1.309 +    _should_unload_classes = true;
   1.310 +  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
   1.311 +    // Disjuncts 2.b.(i,ii,iii) above
   1.312 +    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
   1.313 +                              CMSClassUnloadingMaxInterval)
   1.314 +                           || _permGen->should_concurrent_collect()
   1.315 +                           || _cmsGen->is_too_full();
   1.316 +  }
   1.317 +  return _should_unload_classes;
   1.318 +}
   1.319 +
   1.320 +bool ConcurrentMarkSweepGeneration::is_too_full() const {
   1.321 +  bool res = should_concurrent_collect();
   1.322 +  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
   1.323 +  return res;
   1.324 +}
   1.325 +
   1.326  void CMSCollector::setup_cms_unloading_and_verification_state() {
   1.327    const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
   1.328                               || VerifyBeforeExit;
   1.329    const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
   1.330                               |   SharedHeap::SO_CodeCache;
   1.331  
   1.332 -  if (cms_should_unload_classes()) {   // Should unload classes this cycle
   1.333 +  if (should_unload_classes()) {   // Should unload classes this cycle
   1.334      remove_root_scanning_option(rso);  // Shrink the root set appropriately
   1.335      set_verifying(should_verify);    // Set verification state for this cycle
   1.336      return;                            // Nothing else needs to be done at this time
   1.337    }
   1.338  
   1.339    // Not unloading classes this cycle
   1.340 -  assert(!cms_should_unload_classes(), "Inconsitency!");
   1.341 -  if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
   1.342 +  assert(!should_unload_classes(), "Inconsitency!");
   1.343 +  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
   1.344      // We were not verifying, or we _were_ unloading classes in the last cycle,
   1.345      // AND some verification options are enabled this cycle; in this case,
   1.346      // we must make sure that the deadness map is allocated if not already so,
   1.347 @@ -4693,7 +4753,7 @@
   1.348  
   1.349    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.350  
   1.351 -  if (cms_should_unload_classes()) {
   1.352 +  if (should_unload_classes()) {
   1.353      CodeCache::gc_prologue();
   1.354    }
   1.355    assert(haveFreelistLocks(), "must have free list locks");
   1.356 @@ -4753,7 +4813,7 @@
   1.357    verify_work_stacks_empty();
   1.358    verify_overflow_empty();
   1.359  
   1.360 -  if (cms_should_unload_classes()) {
   1.361 +  if (should_unload_classes()) {
   1.362      CodeCache::gc_epilogue();
   1.363    }
   1.364  
   1.365 @@ -5623,7 +5683,7 @@
   1.366      verify_work_stacks_empty();
   1.367    }
   1.368  
   1.369 -  if (cms_should_unload_classes()) {
   1.370 +  if (should_unload_classes()) {
   1.371      {
   1.372        TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
   1.373  
   1.374 @@ -5726,7 +5786,7 @@
   1.375    // this cycle, we preserve the perm gen object "deadness" information
   1.376    // in the perm_gen_verify_bit_map. In order to do that we traverse
   1.377    // all blocks in perm gen and mark all dead objects.
   1.378 -  if (verifying() && !cms_should_unload_classes()) {
   1.379 +  if (verifying() && !should_unload_classes()) {
   1.380      assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
   1.381             "Should have already been allocated");
   1.382      MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
   1.383 @@ -5753,7 +5813,7 @@
   1.384      }
   1.385  
   1.386      // Now repeat for perm gen
   1.387 -    if (cms_should_unload_classes()) {
   1.388 +    if (should_unload_classes()) {
   1.389        CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
   1.390                               bitMapLock());
   1.391        sweepWork(_permGen, asynch);
   1.392 @@ -5775,7 +5835,7 @@
   1.393      // already have needed locks
   1.394      sweepWork(_cmsGen,  asynch);
   1.395  
   1.396 -    if (cms_should_unload_classes()) {
   1.397 +    if (should_unload_classes()) {
   1.398        sweepWork(_permGen, asynch);
   1.399      }
   1.400      // Update heap occupancy information which is used as
   1.401 @@ -5937,6 +5997,11 @@
   1.402    }
   1.403    gen->cmsSpace()->sweep_completed();
   1.404    gen->cmsSpace()->endSweepFLCensus(sweepCount());
   1.405 +  if (should_unload_classes()) {                // unloaded classes this cycle,
   1.406 +    _concurrent_cycles_since_last_unload = 0;   // ... reset count
   1.407 +  } else {                                      // did not unload classes,
   1.408 +    _concurrent_cycles_since_last_unload++;     // ... increment count
   1.409 +  }
   1.410  }
   1.411  
   1.412  // Reset CMS data structures (for now just the marking bit map)
   1.413 @@ -7194,7 +7259,7 @@
   1.414    _revisitStack(revisitStack),
   1.415    _finger(finger),
   1.416    _parent(parent),
   1.417 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.418 +  _should_remember_klasses(collector->should_unload_classes())
   1.419  { }
   1.420  
   1.421  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
   1.422 @@ -7217,7 +7282,7 @@
   1.423    _finger(finger),
   1.424    _global_finger_addr(global_finger_addr),
   1.425    _parent(parent),
   1.426 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.427 +  _should_remember_klasses(collector->should_unload_classes())
   1.428  { }
   1.429  
   1.430  
   1.431 @@ -7360,7 +7425,7 @@
   1.432    _mark_stack(mark_stack),
   1.433    _revisit_stack(revisit_stack),
   1.434    _concurrent_precleaning(concurrent_precleaning),
   1.435 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.436 +  _should_remember_klasses(collector->should_unload_classes())
   1.437  {
   1.438    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.439  }
   1.440 @@ -7422,7 +7487,7 @@
   1.441    _bit_map(bit_map),
   1.442    _work_queue(work_queue),
   1.443    _revisit_stack(revisit_stack),
   1.444 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.445 +  _should_remember_klasses(collector->should_unload_classes())
   1.446  {
   1.447    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.448  }
   1.449 @@ -7944,7 +8009,7 @@
   1.450  
   1.451      #ifdef DEBUG
   1.452        if (oop(addr)->klass() != NULL &&
   1.453 -          (   !_collector->cms_should_unload_classes()
   1.454 +          (   !_collector->should_unload_classes()
   1.455             || oop(addr)->is_parsable())) {
   1.456          // Ignore mark word because we are running concurrent with mutators
   1.457          assert(oop(addr)->is_oop(true), "live block should be an oop");
   1.458 @@ -7957,7 +8022,7 @@
   1.459    } else {
   1.460      // This should be an initialized object that's alive.
   1.461      assert(oop(addr)->klass() != NULL &&
   1.462 -           (!_collector->cms_should_unload_classes()
   1.463 +           (!_collector->should_unload_classes()
   1.464              || oop(addr)->is_parsable()),
   1.465             "Should be an initialized object");
   1.466      // Ignore mark word because we are running concurrent with mutators

mercurial