Merge

Wed, 09 Apr 2008 10:38:30 -0400

author
kamg
date
Wed, 09 Apr 2008 10:38:30 -0400
changeset 532
f38a25e2458a
parent 525
cf4e16e9ca60
parent 531
2acabb781f53
child 544
9f4457a14b58

Merge

     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 04 10:48:43 2008 -0400
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Apr 09 10:38:30 2008 -0400
     1.3 @@ -225,6 +225,34 @@
     1.4    assert(_dilatation_factor >= 1.0, "from previous assert");
     1.5  }
     1.6  
     1.7 +
     1.8 +// The field "_initiating_occupancy" represents the occupancy percentage
     1.9 +// at which we trigger a new collection cycle.  Unless explicitly specified
    1.10 +// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
    1.11 +// is calculated by:
    1.12 +//
    1.13 +//   Let "f" be MinHeapFreeRatio in
    1.14 +//
    1.15 +//    _intiating_occupancy = 100-f +
    1.16 +//                           f * (CMSTrigger[Perm]Ratio/100)
    1.17 +//   where CMSTrigger[Perm]Ratio is the argument "tr" below.
    1.18 +//
    1.19 +// That is, if we assume the heap is at its desired maximum occupancy at the
    1.20 +// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
    1.21 +// space be allocated before initiating a new collection cycle.
    1.22 +//
    1.23 +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
    1.24 +  assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
    1.25 +  if (io >= 0) {
    1.26 +    _initiating_occupancy = (double)io / 100.0;
    1.27 +  } else {
    1.28 +    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
    1.29 +                             (double)(tr * MinHeapFreeRatio) / 100.0)
    1.30 +                            / 100.0;
    1.31 +  }
    1.32 +}
    1.33 +
    1.34 +
    1.35  void ConcurrentMarkSweepGeneration::ref_processor_init() {
    1.36    assert(collector() != NULL, "no collector");
    1.37    collector()->ref_processor_init();
    1.38 @@ -520,8 +548,8 @@
    1.39    _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
    1.40    _completed_initialization(false),
    1.41    _collector_policy(cp),
    1.42 -  _unload_classes(false),
    1.43 -  _unloaded_classes_last_cycle(false),
    1.44 +  _should_unload_classes(false),
    1.45 +  _concurrent_cycles_since_last_unload(0),
    1.46    _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
    1.47  {
    1.48    if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
    1.49 @@ -642,26 +670,11 @@
    1.50      }
    1.51    }
    1.52  
    1.53 -  // "initiatingOccupancy" is the occupancy ratio at which we trigger
    1.54 -  // a new collection cycle.  Unless explicitly specified via
    1.55 -  // CMSTriggerRatio, it is calculated by:
    1.56 -  //   Let "f" be MinHeapFreeRatio in
    1.57 -  //
    1.58 -  //    intiatingOccupancy = 100-f +
    1.59 -  //                         f * (CMSTriggerRatio/100)
    1.60 -  // That is, if we assume the heap is at its desired maximum occupancy at the
    1.61 -  // end of a collection, we let CMSTriggerRatio of the (purported) free
    1.62 -  // space be allocated before initiating a new collection cycle.
    1.63 -  if (CMSInitiatingOccupancyFraction > 0) {
    1.64 -    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
    1.65 -  } else {
    1.66 -    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
    1.67 -                           (double)(CMSTriggerRatio *
    1.68 -                                    MinHeapFreeRatio) / 100.0)
    1.69 -                           / 100.0;
    1.70 -  }
    1.71 +  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
    1.72 +  _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
    1.73 +
    1.74    // Clip CMSBootstrapOccupancy between 0 and 100.
    1.75 -  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
    1.76 +  _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
    1.77                           /(double)100;
    1.78  
    1.79    _full_gcs_since_conc_gc = 0;
    1.80 @@ -1413,7 +1426,8 @@
    1.81      gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
    1.82      gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
    1.83      gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
    1.84 -    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
    1.85 +    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
    1.86 +    gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
    1.87    }
    1.88    // ------------------------------------------------------------------
    1.89  
    1.90 @@ -1446,84 +1460,91 @@
    1.91    // old gen want a collection cycle started. Each may use
    1.92    // an appropriate criterion for making this decision.
    1.93    // XXX We need to make sure that the gen expansion
    1.94 -  // criterion dovetails well with this.
    1.95 -  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
    1.96 +  // criterion dovetails well with this. XXX NEED TO FIX THIS
    1.97 +  if (_cmsGen->should_concurrent_collect()) {
    1.98      if (Verbose && PrintGCDetails) {
    1.99        gclog_or_tty->print_cr("CMS old gen initiated");
   1.100      }
   1.101      return true;
   1.102    }
   1.103  
   1.104 -  if (cms_should_unload_classes() &&
   1.105 -      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
   1.106 -    if (Verbose && PrintGCDetails) {
   1.107 -     gclog_or_tty->print_cr("CMS perm gen initiated");
   1.108 -    }
   1.109 -    return true;
   1.110 -  }
   1.111 -
   1.112 -  return false;
   1.113 -}
   1.114 -
   1.115 -// Clear _expansion_cause fields of constituent generations
   1.116 -void CMSCollector::clear_expansion_cause() {
   1.117 -  _cmsGen->clear_expansion_cause();
   1.118 -  _permGen->clear_expansion_cause();
   1.119 -}
   1.120 -
   1.121 -bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
   1.122 -  double initiatingOccupancy) {
   1.123 -  // We should be conservative in starting a collection cycle.  To
   1.124 -  // start too eagerly runs the risk of collecting too often in the
   1.125 -  // extreme.  To collect too rarely falls back on full collections,
   1.126 -  // which works, even if not optimum in terms of concurrent work.
   1.127 -  // As a work around for too eagerly collecting, use the flag
   1.128 -  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
   1.129 -  // giving the user an easily understandable way of controlling the
   1.130 -  // collections.
   1.131 -  // We want to start a new collection cycle if any of the following
   1.132 -  // conditions hold:
   1.133 -  // . our current occupancy exceeds the initiating occupancy, or
   1.134 -  // . we recently needed to expand and have not since that expansion,
   1.135 -  //   collected, or
   1.136 -  // . we are not using adaptive free lists and linear allocation is
   1.137 -  //   going to fail, or
   1.138 -  // . (for old gen) incremental collection has already failed or
   1.139 -  //   may soon fail in the near future as we may not be able to absorb
   1.140 -  //   promotions.
   1.141 -  assert_lock_strong(freelistLock());
   1.142 -
   1.143 -  if (occupancy() > initiatingOccupancy) {
   1.144 -    if (PrintGCDetails && Verbose) {
   1.145 -      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
   1.146 -        short_name(), occupancy(), initiatingOccupancy);
   1.147 -    }
   1.148 -    return true;
   1.149 -  }
   1.150 -  if (UseCMSInitiatingOccupancyOnly) {
   1.151 -    return false;
   1.152 -  }
   1.153 -  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
   1.154 -    if (PrintGCDetails && Verbose) {
   1.155 -      gclog_or_tty->print(" %s: collect because expanded for allocation ",
   1.156 -        short_name());
   1.157 -    }
   1.158 -    return true;
   1.159 -  }
   1.160 +  // We start a collection if we believe an incremental collection may fail;
   1.161 +  // this is not likely to be productive in practice because it's probably too
   1.162 +  // late anyway.
   1.163    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.164    assert(gch->collector_policy()->is_two_generation_policy(),
   1.165           "You may want to check the correctness of the following");
   1.166    if (gch->incremental_collection_will_fail()) {
   1.167      if (PrintGCDetails && Verbose) {
   1.168 -      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
   1.169 +      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
   1.170 +    }
   1.171 +    return true;
   1.172 +  }
   1.173 +
   1.174 +  if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
   1.175 +    bool res = update_should_unload_classes();
   1.176 +    if (res) {
   1.177 +      if (Verbose && PrintGCDetails) {
   1.178 +        gclog_or_tty->print_cr("CMS perm gen initiated");
   1.179 +      }
   1.180 +      return true;
   1.181 +    }
   1.182 +  }
   1.183 +  return false;
   1.184 +}
   1.185 +
   1.186 +// Clear _expansion_cause fields of constituent generations
   1.187 +void CMSCollector::clear_expansion_cause() {
   1.188 +  _cmsGen->clear_expansion_cause();
   1.189 +  _permGen->clear_expansion_cause();
   1.190 +}
   1.191 +
   1.192 +// We should be conservative in starting a collection cycle.  To
   1.193 +// start too eagerly runs the risk of collecting too often in the
   1.194 +// extreme.  To collect too rarely falls back on full collections,
   1.195 +// which works, even if not optimum in terms of concurrent work.
   1.196 +// As a work around for too eagerly collecting, use the flag
   1.197 +// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
   1.198 +// giving the user an easily understandable way of controlling the
   1.199 +// collections.
   1.200 +// We want to start a new collection cycle if any of the following
   1.201 +// conditions hold:
   1.202 +// . our current occupancy exceeds the configured initiating occupancy
   1.203 +//   for this generation, or
   1.204 +// . we recently needed to expand this space and have not, since that
   1.205 +//   expansion, done a collection of this generation, or
   1.206 +// . the underlying space believes that it may be a good idea to initiate
   1.207 +//   a concurrent collection (this may be based on criteria such as the
   1.208 +//   following: the space uses linear allocation and linear allocation is
   1.209 +//   going to fail, or there is believed to be excessive fragmentation in
   1.210 +//   the generation, etc... or ...
   1.211 +// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
   1.212 +//   the case of the old generation, not the perm generation; see CR 6543076):
   1.213 +//   we may be approaching a point at which allocation requests may fail because
   1.214 +//   we will be out of sufficient free space given allocation rate estimates.]
   1.215 +bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
   1.216 +
   1.217 +  assert_lock_strong(freelistLock());
   1.218 +  if (occupancy() > initiating_occupancy()) {
   1.219 +    if (PrintGCDetails && Verbose) {
   1.220 +      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
   1.221 +        short_name(), occupancy(), initiating_occupancy());
   1.222 +    }
   1.223 +    return true;
   1.224 +  }
   1.225 +  if (UseCMSInitiatingOccupancyOnly) {
   1.226 +    return false;
   1.227 +  }
   1.228 +  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
   1.229 +    if (PrintGCDetails && Verbose) {
   1.230 +      gclog_or_tty->print(" %s: collect because expanded for allocation ",
   1.231          short_name());
   1.232      }
   1.233      return true;
   1.234    }
   1.235 -  if (!_cmsSpace->adaptive_freelists() &&
   1.236 -      _cmsSpace->linearAllocationWouldFail()) {
   1.237 +  if (_cmsSpace->should_concurrent_collect()) {
   1.238      if (PrintGCDetails && Verbose) {
   1.239 -      gclog_or_tty->print(" %s: collect because of linAB ",
   1.240 +      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
   1.241          short_name());
   1.242      }
   1.243      return true;
   1.244 @@ -1970,8 +1991,9 @@
   1.245           "Should have been NULL'd before baton was passed");
   1.246    reset(false /* == !asynch */);
   1.247    _cmsGen->reset_after_compaction();
   1.248 -
   1.249 -  if (verifying() && !cms_should_unload_classes()) {
   1.250 +  _concurrent_cycles_since_last_unload = 0;
   1.251 +
   1.252 +  if (verifying() && !should_unload_classes()) {
   1.253      perm_gen_verify_bit_map()->clear_all();
   1.254    }
   1.255  
   1.256 @@ -2098,6 +2120,7 @@
   1.257    {
   1.258      bool safepoint_check = Mutex::_no_safepoint_check_flag;
   1.259      MutexLockerEx hl(Heap_lock, safepoint_check);
   1.260 +    FreelistLocker fll(this);
   1.261      MutexLockerEx x(CGC_lock, safepoint_check);
   1.262      if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
   1.263        // The foreground collector is active or we're
   1.264 @@ -2112,13 +2135,9 @@
   1.265        // a new cycle.
   1.266        clear_expansion_cause();
   1.267      }
   1.268 -    _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
   1.269 -    // This controls class unloading in response to an explicit gc request.
   1.270 -    // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
   1.271 -    // we will unload classes even if CMSClassUnloadingEnabled is not set.
   1.272 -    // See CR 6541037 and related CRs.
   1.273 -    _unload_classes = _full_gc_requested                      // ... for this cycle
   1.274 -                      && ExplicitGCInvokesConcurrentAndUnloadsClasses;
   1.275 +    // Decide if we want to enable class unloading as part of the
   1.276 +    // ensuing concurrent GC cycle.
   1.277 +    update_should_unload_classes();
   1.278      _full_gc_requested = false;           // acks all outstanding full gc requests
   1.279      // Signal that we are about to start a collection
   1.280      gch->increment_total_full_collections();  // ... starting a collection cycle
   1.281 @@ -3047,21 +3066,62 @@
   1.282  }
   1.283  #endif // PRODUCT
   1.284  
   1.285 +// Decide if we want to enable class unloading as part of the
   1.286 +// ensuing concurrent GC cycle. We will collect the perm gen and
   1.287 +// unload classes if it's the case that:
   1.288 +// (1) an explicit gc request has been made and the flag
   1.289 +//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
   1.290 +// (2) (a) class unloading is enabled at the command line, and
   1.291 +//     (b) (i)   perm gen threshold has been crossed, or
   1.292 +//         (ii)  old gen is getting really full, or
   1.293 +//         (iii) the previous N CMS collections did not collect the
   1.294 +//               perm gen
   1.295 +// NOTE: Provided there is no change in the state of the heap between
   1.296 +// calls to this method, it should have idempotent results. Moreover,
   1.297 +// its results should be monotonically increasing (i.e. going from 0 to 1,
   1.298 +// but not 1 to 0) between successive calls between which the heap was
   1.299 +// not collected. For the implementation below, it must thus rely on
   1.300 +// the property that concurrent_cycles_since_last_unload()
   1.301 +// will not decrease unless a collection cycle happened and that
   1.302 +// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
   1.303 +// themselves also monotonic in that sense. See check_monotonicity()
   1.304 +// below.
   1.305 +bool CMSCollector::update_should_unload_classes() {
   1.306 +  _should_unload_classes = false;
   1.307 +  // Condition 1 above
   1.308 +  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
   1.309 +    _should_unload_classes = true;
   1.310 +  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
   1.311 +    // Disjuncts 2.b.(i,ii,iii) above
   1.312 +    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
   1.313 +                              CMSClassUnloadingMaxInterval)
   1.314 +                           || _permGen->should_concurrent_collect()
   1.315 +                           || _cmsGen->is_too_full();
   1.316 +  }
   1.317 +  return _should_unload_classes;
   1.318 +}
   1.319 +
   1.320 +bool ConcurrentMarkSweepGeneration::is_too_full() const {
   1.321 +  bool res = should_concurrent_collect();
   1.322 +  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
   1.323 +  return res;
   1.324 +}
   1.325 +
   1.326  void CMSCollector::setup_cms_unloading_and_verification_state() {
   1.327    const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
   1.328                               || VerifyBeforeExit;
   1.329    const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
   1.330                               |   SharedHeap::SO_CodeCache;
   1.331  
   1.332 -  if (cms_should_unload_classes()) {   // Should unload classes this cycle
   1.333 +  if (should_unload_classes()) {   // Should unload classes this cycle
   1.334      remove_root_scanning_option(rso);  // Shrink the root set appropriately
   1.335      set_verifying(should_verify);    // Set verification state for this cycle
   1.336      return;                            // Nothing else needs to be done at this time
   1.337    }
   1.338  
   1.339    // Not unloading classes this cycle
   1.340 -  assert(!cms_should_unload_classes(), "Inconsitency!");
   1.341 -  if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
   1.342 +  assert(!should_unload_classes(), "Inconsitency!");
   1.343 +  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
   1.344      // We were not verifying, or we _were_ unloading classes in the last cycle,
   1.345      // AND some verification options are enabled this cycle; in this case,
   1.346      // we must make sure that the deadness map is allocated if not already so,
   1.347 @@ -4693,7 +4753,7 @@
   1.348  
   1.349    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.350  
   1.351 -  if (cms_should_unload_classes()) {
   1.352 +  if (should_unload_classes()) {
   1.353      CodeCache::gc_prologue();
   1.354    }
   1.355    assert(haveFreelistLocks(), "must have free list locks");
   1.356 @@ -4753,7 +4813,7 @@
   1.357    verify_work_stacks_empty();
   1.358    verify_overflow_empty();
   1.359  
   1.360 -  if (cms_should_unload_classes()) {
   1.361 +  if (should_unload_classes()) {
   1.362      CodeCache::gc_epilogue();
   1.363    }
   1.364  
   1.365 @@ -5623,7 +5683,7 @@
   1.366      verify_work_stacks_empty();
   1.367    }
   1.368  
   1.369 -  if (cms_should_unload_classes()) {
   1.370 +  if (should_unload_classes()) {
   1.371      {
   1.372        TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
   1.373  
   1.374 @@ -5726,7 +5786,7 @@
   1.375    // this cycle, we preserve the perm gen object "deadness" information
   1.376    // in the perm_gen_verify_bit_map. In order to do that we traverse
   1.377    // all blocks in perm gen and mark all dead objects.
   1.378 -  if (verifying() && !cms_should_unload_classes()) {
   1.379 +  if (verifying() && !should_unload_classes()) {
   1.380      assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
   1.381             "Should have already been allocated");
   1.382      MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
   1.383 @@ -5753,7 +5813,7 @@
   1.384      }
   1.385  
   1.386      // Now repeat for perm gen
   1.387 -    if (cms_should_unload_classes()) {
   1.388 +    if (should_unload_classes()) {
   1.389        CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
   1.390                               bitMapLock());
   1.391        sweepWork(_permGen, asynch);
   1.392 @@ -5775,7 +5835,7 @@
   1.393      // already have needed locks
   1.394      sweepWork(_cmsGen,  asynch);
   1.395  
   1.396 -    if (cms_should_unload_classes()) {
   1.397 +    if (should_unload_classes()) {
   1.398        sweepWork(_permGen, asynch);
   1.399      }
   1.400      // Update heap occupancy information which is used as
   1.401 @@ -5937,6 +5997,11 @@
   1.402    }
   1.403    gen->cmsSpace()->sweep_completed();
   1.404    gen->cmsSpace()->endSweepFLCensus(sweepCount());
   1.405 +  if (should_unload_classes()) {                // unloaded classes this cycle,
   1.406 +    _concurrent_cycles_since_last_unload = 0;   // ... reset count
   1.407 +  } else {                                      // did not unload classes,
   1.408 +    _concurrent_cycles_since_last_unload++;     // ... increment count
   1.409 +  }
   1.410  }
   1.411  
   1.412  // Reset CMS data structures (for now just the marking bit map)
   1.413 @@ -7194,7 +7259,7 @@
   1.414    _revisitStack(revisitStack),
   1.415    _finger(finger),
   1.416    _parent(parent),
   1.417 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.418 +  _should_remember_klasses(collector->should_unload_classes())
   1.419  { }
   1.420  
   1.421  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
   1.422 @@ -7217,7 +7282,7 @@
   1.423    _finger(finger),
   1.424    _global_finger_addr(global_finger_addr),
   1.425    _parent(parent),
   1.426 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.427 +  _should_remember_klasses(collector->should_unload_classes())
   1.428  { }
   1.429  
   1.430  
   1.431 @@ -7360,7 +7425,7 @@
   1.432    _mark_stack(mark_stack),
   1.433    _revisit_stack(revisit_stack),
   1.434    _concurrent_precleaning(concurrent_precleaning),
   1.435 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.436 +  _should_remember_klasses(collector->should_unload_classes())
   1.437  {
   1.438    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.439  }
   1.440 @@ -7422,7 +7487,7 @@
   1.441    _bit_map(bit_map),
   1.442    _work_queue(work_queue),
   1.443    _revisit_stack(revisit_stack),
   1.444 -  _should_remember_klasses(collector->cms_should_unload_classes())
   1.445 +  _should_remember_klasses(collector->should_unload_classes())
   1.446  {
   1.447    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
   1.448  }
   1.449 @@ -7944,7 +8009,7 @@
   1.450  
   1.451      #ifdef DEBUG
   1.452        if (oop(addr)->klass() != NULL &&
   1.453 -          (   !_collector->cms_should_unload_classes()
   1.454 +          (   !_collector->should_unload_classes()
   1.455             || oop(addr)->is_parsable())) {
   1.456          // Ignore mark word because we are running concurrent with mutators
   1.457          assert(oop(addr)->is_oop(true), "live block should be an oop");
   1.458 @@ -7957,7 +8022,7 @@
   1.459    } else {
   1.460      // This should be an initialized object that's alive.
   1.461      assert(oop(addr)->klass() != NULL &&
   1.462 -           (!_collector->cms_should_unload_classes()
   1.463 +           (!_collector->should_unload_classes()
   1.464              || oop(addr)->is_parsable()),
   1.465             "Should be an initialized object");
   1.466      // Ignore mark word because we are running concurrent with mutators
     2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 04 10:48:43 2008 -0400
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Apr 09 10:38:30 2008 -0400
     2.3 @@ -535,13 +535,16 @@
     2.4    // In support of ExplicitGCInvokesConcurrent
     2.5    static   bool _full_gc_requested;
     2.6    unsigned int  _collection_count_start;
     2.7 +
     2.8    // Should we unload classes this concurrent cycle?
     2.9 -  // Set in response to a concurrent full gc request.
    2.10 -  bool _unload_classes;
    2.11 -  bool _unloaded_classes_last_cycle;
    2.12 +  bool _should_unload_classes;
    2.13 +  unsigned int  _concurrent_cycles_since_last_unload;
    2.14 +  unsigned int concurrent_cycles_since_last_unload() const {
    2.15 +    return _concurrent_cycles_since_last_unload;
    2.16 +  }
    2.17    // Did we (allow) unload classes in the previous concurrent cycle?
    2.18 -  bool cms_unloaded_classes_last_cycle() const {
    2.19 -    return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
    2.20 +  bool unloaded_classes_last_cycle() const {
    2.21 +    return concurrent_cycles_since_last_unload() == 0;
    2.22    }
    2.23  
    2.24    // Verification support
    2.25 @@ -651,8 +654,6 @@
    2.26    // number of full gc's since the last concurrent gc.
    2.27    uint   _full_gcs_since_conc_gc;
    2.28  
    2.29 -  // if occupancy exceeds this, start a new gc cycle
    2.30 -  double _initiatingOccupancy;
    2.31    // occupancy used for bootstrapping stats
    2.32    double _bootstrap_occupancy;
    2.33  
    2.34 @@ -825,7 +826,6 @@
    2.35  
    2.36    Mutex* bitMapLock()        const { return _markBitMap.lock();    }
    2.37    static CollectorState abstract_state() { return _collectorState;  }
    2.38 -  double initiatingOccupancy() const { return _initiatingOccupancy; }
    2.39  
    2.40    bool should_abort_preclean() const; // Whether preclean should be aborted.
    2.41    size_t get_eden_used() const;
    2.42 @@ -849,11 +849,10 @@
    2.43    // In support of ExplicitGCInvokesConcurrent
    2.44    static void request_full_gc(unsigned int full_gc_count);
    2.45    // Should we unload classes in a particular concurrent cycle?
    2.46 -  bool cms_should_unload_classes() const {
    2.47 -    assert(!_unload_classes ||  ExplicitGCInvokesConcurrentAndUnloadsClasses,
    2.48 -           "Inconsistency; see CR 6541037");
    2.49 -    return _unload_classes || CMSClassUnloadingEnabled;
    2.50 +  bool should_unload_classes() const {
    2.51 +    return _should_unload_classes;
    2.52    }
    2.53 +  bool update_should_unload_classes();
    2.54  
    2.55    void direct_allocated(HeapWord* start, size_t size);
    2.56  
    2.57 @@ -1022,6 +1021,10 @@
    2.58      _incremental_collection_failed = false;
    2.59    }
    2.60  
    2.61 +  // accessors
    2.62 +  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
    2.63 +  CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
    2.64 +
    2.65   private:
    2.66    // For parallel young-gen GC support.
    2.67    CMSParGCThreadState** _par_gc_thread_states;
    2.68 @@ -1029,10 +1032,6 @@
    2.69    // Reason generation was expanded
    2.70    CMSExpansionCause::Cause _expansion_cause;
    2.71  
    2.72 -  // accessors
    2.73 -  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
    2.74 -  CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
    2.75 -
    2.76    // In support of MinChunkSize being larger than min object size
    2.77    const double _dilatation_factor;
    2.78  
    2.79 @@ -1045,6 +1044,10 @@
    2.80  
    2.81    CollectionTypes _debug_collection_type;
    2.82  
    2.83 +  // Fraction of current occupancy at which to start a CMS collection which
    2.84 +  // will collect this generation (at least).
    2.85 +  double _initiating_occupancy;
    2.86 +
    2.87   protected:
    2.88    // Grow generation by specified size (returns false if unable to grow)
    2.89    bool grow_by(size_t bytes);
    2.90 @@ -1060,6 +1063,10 @@
    2.91    // space.
    2.92    size_t max_available() const;
    2.93  
    2.94 +  // getter and initializer for _initiating_occupancy field.
    2.95 +  double initiating_occupancy() const { return _initiating_occupancy; }
    2.96 +  void   init_initiating_occupancy(intx io, intx tr);
    2.97 +
    2.98   public:
    2.99    ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
   2.100                                  int level, CardTableRS* ct,
   2.101 @@ -1103,7 +1110,7 @@
   2.102    size_t capacity() const;
   2.103    size_t used() const;
   2.104    size_t free() const;
   2.105 -  double occupancy()      { return ((double)used())/((double)capacity()); }
   2.106 +  double occupancy() const { return ((double)used())/((double)capacity()); }
   2.107    size_t contiguous_available() const;
   2.108    size_t unsafe_max_alloc_nogc() const;
   2.109  
   2.110 @@ -1158,8 +1165,8 @@
   2.111      bool younger_handles_promotion_failure) const;
   2.112  
   2.113    bool should_collect(bool full, size_t size, bool tlab);
   2.114 -    // XXXPERM
   2.115 -  bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
   2.116 +  virtual bool should_concurrent_collect() const;
   2.117 +  virtual bool is_too_full() const;
   2.118    void collect(bool   full,
   2.119                 bool   clear_all_soft_refs,
   2.120                 size_t size,
     3.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Fri Apr 04 10:48:43 2008 -0400
     3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Wed Apr 09 10:38:30 2008 -0400
     3.3 @@ -267,7 +267,7 @@
     3.4           (_permGen->cmsSpace()->is_in_reserved(addr)
     3.5            && _permGen->cmsSpace()->block_is_obj(addr)),
     3.6           "must be object");
     3.7 -  return  cms_should_unload_classes() &&
     3.8 +  return  should_unload_classes() &&
     3.9            _collectorState == Sweeping &&
    3.10           !_markBitMap.isMarked(addr);
    3.11  }
     4.1 --- a/src/share/vm/runtime/globals.hpp	Fri Apr 04 10:48:43 2008 -0400
     4.2 +++ b/src/share/vm/runtime/globals.hpp	Wed Apr 09 10:38:30 2008 -0400
     4.3 @@ -1319,6 +1319,10 @@
     4.4    product(bool, CMSClassUnloadingEnabled, false,                            \
     4.5            "Whether class unloading enabled when using CMS GC")              \
     4.6                                                                              \
     4.7 +  product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
     4.8 +          "When CMS class unloading is enabled, the maximum CMS cycle count"\
     4.9 +          " for which classes may not be unloaded")                         \
    4.10 +                                                                            \
    4.11    product(bool, CMSCompactWhenClearAllSoftRefs, true,                       \
    4.12            "Compact when asked to collect CMS gen with clear_all_soft_refs") \
    4.13                                                                              \
    4.14 @@ -1504,17 +1508,30 @@
    4.15            "Percentage of MinHeapFreeRatio in CMS generation that is "       \
    4.16            "  allocated before a CMS collection cycle commences")            \
    4.17                                                                              \
    4.18 -  product(intx, CMSBootstrapOccupancy, 50,                                  \
    4.19 +  product(intx, CMSTriggerPermRatio, 80,                                    \
    4.20 +          "Percentage of MinHeapFreeRatio in the CMS perm generation that"  \
    4.21 +          "  is allocated before a CMS collection cycle commences, that  "  \
    4.22 +          "  also collects the perm generation")                            \
    4.23 +                                                                            \
    4.24 +  product(uintx, CMSBootstrapOccupancy, 50,                                 \
    4.25            "Percentage CMS generation occupancy at which to "                \
    4.26            " initiate CMS collection for bootstrapping collection stats")    \
    4.27                                                                              \
    4.28    product(intx, CMSInitiatingOccupancyFraction, -1,                         \
    4.29            "Percentage CMS generation occupancy to start a CMS collection "  \
    4.30 -          " cycle (A negative value means that CMSTirggerRatio is used)")   \
    4.31 +          " cycle (A negative value means that CMSTriggerRatio is used)")   \
    4.32 +                                                                            \
    4.33 +  product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
    4.34 +          "Percentage CMS perm generation occupancy to start a CMScollection"\
    4.35 +          " cycle (A negative value means that CMSTriggerPermRatio is used)")\
    4.36                                                                              \
    4.37    product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
    4.38            "Only use occupancy as a crierion for starting a CMS collection") \
    4.39                                                                              \
    4.40 +  product(intx, CMSIsTooFullPercentage, 98,                                 \
    4.41 +          "An absolute ceiling above which CMS will always consider the"    \
    4.42 +          " perm gen ripe for collection")                                  \
    4.43 +                                                                            \
    4.44    develop(bool, CMSTestInFreeList, false,                                   \
    4.45            "Check if the coalesced range is already in the "                 \
    4.46            "free lists as claimed.")                                         \

mercurial