src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,669 @@
     1.4 +/*
     1.5 + * Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    1.30 +#include "gc_interface/gcCause.hpp"
    1.31 +#include "memory/collectorPolicy.hpp"
    1.32 +#include "runtime/timer.hpp"
    1.33 +#include "utilities/ostream.hpp"
    1.34 +#include "utilities/workgroup.hpp"
    1.35 +elapsedTimer AdaptiveSizePolicy::_minor_timer;
    1.36 +elapsedTimer AdaptiveSizePolicy::_major_timer;
    1.37 +bool AdaptiveSizePolicy::_debug_perturbation = false;
    1.38 +
    1.39 +// The throughput goal is implemented as
    1.40 +//      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
    1.41 +// gc_cost_ratio is the ratio
    1.42 +//      application cost / gc cost
    1.43 +// For example a gc_cost_ratio of 4 translates into a
    1.44 +// throughput goal of .80
    1.45 +
    1.46 +AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
    1.47 +                                       size_t init_promo_size,
    1.48 +                                       size_t init_survivor_size,
    1.49 +                                       double gc_pause_goal_sec,
    1.50 +                                       uint gc_cost_ratio) :
    1.51 +    _eden_size(init_eden_size),
    1.52 +    _promo_size(init_promo_size),
    1.53 +    _survivor_size(init_survivor_size),
    1.54 +    _gc_pause_goal_sec(gc_pause_goal_sec),
    1.55 +    _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
    1.56 +    _gc_overhead_limit_exceeded(false),
    1.57 +    _print_gc_overhead_limit_would_be_exceeded(false),
    1.58 +    _gc_overhead_limit_count(0),
    1.59 +    _latest_minor_mutator_interval_seconds(0),
    1.60 +    _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
    1.61 +    _young_gen_change_for_minor_throughput(0),
    1.62 +    _old_gen_change_for_major_throughput(0) {
    1.63 +  assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
    1.64 +    "No opportunity to clear SoftReferences before GC overhead limit");
    1.65 +  _avg_minor_pause    =
    1.66 +    new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
    1.67 +  _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    1.68 +  _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    1.69 +  _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    1.70 +
    1.71 +  _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    1.72 +  _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    1.73 +  _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    1.74 +
    1.75 +  _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
    1.76 +                                                  SurvivorPadding);
    1.77 +  _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
    1.78 +                                                  AdaptiveSizePolicyWeight,
    1.79 +                                                  SurvivorPadding);
    1.80 +
    1.81 +  _minor_pause_old_estimator =
    1.82 +    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    1.83 +  _minor_pause_young_estimator =
    1.84 +    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    1.85 +  _minor_collection_estimator =
    1.86 +    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    1.87 +  _major_collection_estimator =
    1.88 +    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    1.89 +
    1.90 +  // Start the timers
    1.91 +  _minor_timer.start();
    1.92 +
    1.93 +  _young_gen_policy_is_ready = false;
    1.94 +}
    1.95 +
    1.96 +//  If the number of GC threads was set on the command line,
    1.97 +// use it.
    1.98 +//  Else
    1.99 +//    Calculate the number of GC threads based on the number of Java threads.
   1.100 +//    Calculate the number of GC threads based on the size of the heap.
   1.101 +//    Use the larger.
   1.102 +
   1.103 +int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
   1.104 +                                            const uintx min_workers,
   1.105 +                                            uintx active_workers,
   1.106 +                                            uintx application_workers) {
   1.107 +  // If the user has specifically set the number of
   1.108 +  // GC threads, use them.
   1.109 +
   1.110 +  // If the user has turned off using a dynamic number of GC threads
   1.111 +  // or the users has requested a specific number, set the active
   1.112 +  // number of workers to all the workers.
   1.113 +
   1.114 +  uintx new_active_workers = total_workers;
   1.115 +  uintx prev_active_workers = active_workers;
   1.116 +  uintx active_workers_by_JT = 0;
   1.117 +  uintx active_workers_by_heap_size = 0;
   1.118 +
   1.119 +  // Always use at least min_workers but use up to
   1.120 +  // GCThreadsPerJavaThreads * application threads.
   1.121 +  active_workers_by_JT =
   1.122 +    MAX2((uintx) GCWorkersPerJavaThread * application_workers,
   1.123 +         min_workers);
   1.124 +
   1.125 +  // Choose a number of GC threads based on the current size
   1.126 +  // of the heap.  This may be complicated because the size of
   1.127 +  // the heap depends on factors such as the thoughput goal.
   1.128 +  // Still a large heap should be collected by more GC threads.
   1.129 +  active_workers_by_heap_size =
   1.130 +      MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
   1.131 +
   1.132 +  uintx max_active_workers =
   1.133 +    MAX2(active_workers_by_JT, active_workers_by_heap_size);
   1.134 +
   1.135 +  // Limit the number of workers to the the number created,
   1.136 +  // (workers()).
   1.137 +  new_active_workers = MIN2(max_active_workers,
   1.138 +                                (uintx) total_workers);
   1.139 +
   1.140 +  // Increase GC workers instantly but decrease them more
   1.141 +  // slowly.
   1.142 +  if (new_active_workers < prev_active_workers) {
   1.143 +    new_active_workers =
   1.144 +      MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
   1.145 +  }
   1.146 +
   1.147 +  // Check once more that the number of workers is within the limits.
   1.148 +  assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
   1.149 +  assert(new_active_workers >= min_workers, "Minimum workers not observed");
   1.150 +  assert(new_active_workers <= total_workers, "Total workers not observed");
   1.151 +
   1.152 +  if (ForceDynamicNumberOfGCThreads) {
   1.153 +    // Assume this is debugging and jiggle the number of GC threads.
   1.154 +    if (new_active_workers == prev_active_workers) {
   1.155 +      if (new_active_workers < total_workers) {
   1.156 +        new_active_workers++;
   1.157 +      } else if (new_active_workers > min_workers) {
   1.158 +        new_active_workers--;
   1.159 +      }
   1.160 +    }
   1.161 +    if (new_active_workers == total_workers) {
   1.162 +      if (_debug_perturbation) {
   1.163 +        new_active_workers =  min_workers;
   1.164 +      }
   1.165 +      _debug_perturbation = !_debug_perturbation;
   1.166 +    }
   1.167 +    assert((new_active_workers <= (uintx) ParallelGCThreads) &&
   1.168 +           (new_active_workers >= min_workers),
   1.169 +      "Jiggled active workers too much");
   1.170 +  }
   1.171 +
   1.172 +  if (TraceDynamicGCThreads) {
   1.173 +     gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
   1.174 +       "active_workers(): %d  new_acitve_workers: %d  "
   1.175 +       "prev_active_workers: %d\n"
   1.176 +       " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
   1.177 +       (int) active_workers, (int) new_active_workers, (int) prev_active_workers,
   1.178 +       (int) active_workers_by_JT, (int) active_workers_by_heap_size);
   1.179 +  }
   1.180 +  assert(new_active_workers > 0, "Always need at least 1");
   1.181 +  return new_active_workers;
   1.182 +}
   1.183 +
   1.184 +int AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
   1.185 +                                            uintx active_workers,
   1.186 +                                            uintx application_workers) {
   1.187 +  // If the user has specifically set the number of
   1.188 +  // GC threads, use them.
   1.189 +
   1.190 +  // If the user has turned off using a dynamic number of GC threads
   1.191 +  // or the users has requested a specific number, set the active
   1.192 +  // number of workers to all the workers.
   1.193 +
   1.194 +  int new_active_workers;
   1.195 +  if (!UseDynamicNumberOfGCThreads ||
   1.196 +     (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   1.197 +    new_active_workers = total_workers;
   1.198 +  } else {
   1.199 +    new_active_workers = calc_default_active_workers(total_workers,
   1.200 +                                                     2, /* Minimum number of workers */
   1.201 +                                                     active_workers,
   1.202 +                                                     application_workers);
   1.203 +  }
   1.204 +  assert(new_active_workers > 0, "Always need at least 1");
   1.205 +  return new_active_workers;
   1.206 +}
   1.207 +
   1.208 +int AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
   1.209 +                                                 uintx active_workers,
   1.210 +                                                 uintx application_workers) {
   1.211 +  if (!UseDynamicNumberOfGCThreads ||
   1.212 +     (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   1.213 +    return ConcGCThreads;
   1.214 +  } else {
   1.215 +    int no_of_gc_threads = calc_default_active_workers(
   1.216 +                             total_workers,
   1.217 +                             1, /* Minimum number of workers */
   1.218 +                             active_workers,
   1.219 +                             application_workers);
   1.220 +    return no_of_gc_threads;
   1.221 +  }
   1.222 +}
   1.223 +
   1.224 +bool AdaptiveSizePolicy::tenuring_threshold_change() const {
   1.225 +  return decrement_tenuring_threshold_for_gc_cost() ||
   1.226 +         increment_tenuring_threshold_for_gc_cost() ||
   1.227 +         decrement_tenuring_threshold_for_survivor_limit();
   1.228 +}
   1.229 +
   1.230 +void AdaptiveSizePolicy::minor_collection_begin() {
   1.231 +  // Update the interval time
   1.232 +  _minor_timer.stop();
   1.233 +  // Save most recent collection time
   1.234 +  _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
   1.235 +  _minor_timer.reset();
   1.236 +  _minor_timer.start();
   1.237 +}
   1.238 +
   1.239 +void AdaptiveSizePolicy::update_minor_pause_young_estimator(
   1.240 +    double minor_pause_in_ms) {
   1.241 +  double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   1.242 +  _minor_pause_young_estimator->update(eden_size_in_mbytes,
   1.243 +    minor_pause_in_ms);
   1.244 +}
   1.245 +
   1.246 +void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
   1.247 +  // Update the pause time.
   1.248 +  _minor_timer.stop();
   1.249 +
   1.250 +  if (gc_cause != GCCause::_java_lang_system_gc ||
   1.251 +      UseAdaptiveSizePolicyWithSystemGC) {
   1.252 +    double minor_pause_in_seconds = _minor_timer.seconds();
   1.253 +    double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
   1.254 +
   1.255 +    // Sample for performance counter
   1.256 +    _avg_minor_pause->sample(minor_pause_in_seconds);
   1.257 +
   1.258 +    // Cost of collection (unit-less)
   1.259 +    double collection_cost = 0.0;
   1.260 +    if ((_latest_minor_mutator_interval_seconds > 0.0) &&
   1.261 +        (minor_pause_in_seconds > 0.0)) {
   1.262 +      double interval_in_seconds =
   1.263 +        _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
   1.264 +      collection_cost =
   1.265 +        minor_pause_in_seconds / interval_in_seconds;
   1.266 +      _avg_minor_gc_cost->sample(collection_cost);
   1.267 +      // Sample for performance counter
   1.268 +      _avg_minor_interval->sample(interval_in_seconds);
   1.269 +    }
   1.270 +
   1.271 +    // The policy does not have enough data until at least some
   1.272 +    // minor collections have been done.
   1.273 +    _young_gen_policy_is_ready =
   1.274 +      (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
   1.275 +
   1.276 +    // Calculate variables used to estimate pause time vs. gen sizes
   1.277 +    double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   1.278 +    update_minor_pause_young_estimator(minor_pause_in_ms);
   1.279 +    update_minor_pause_old_estimator(minor_pause_in_ms);
   1.280 +
   1.281 +    if (PrintAdaptiveSizePolicy && Verbose) {
   1.282 +      gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
   1.283 +        "minor gc cost: %f  average: %f", collection_cost,
   1.284 +        _avg_minor_gc_cost->average());
   1.285 +      gclog_or_tty->print_cr("  minor pause: %f minor period %f",
   1.286 +        minor_pause_in_ms,
   1.287 +        _latest_minor_mutator_interval_seconds * MILLIUNITS);
   1.288 +    }
   1.289 +
   1.290 +    // Calculate variable used to estimate collection cost vs. gen sizes
   1.291 +    assert(collection_cost >= 0.0, "Expected to be non-negative");
   1.292 +    _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
   1.293 +  }
   1.294 +
   1.295 +  // Interval times use this timer to measure the mutator time.
   1.296 +  // Reset the timer after the GC pause.
   1.297 +  _minor_timer.reset();
   1.298 +  _minor_timer.start();
   1.299 +}
   1.300 +
   1.301 +size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
   1.302 +                                            uint percent_change) {
   1.303 +  size_t eden_heap_delta;
   1.304 +  eden_heap_delta = cur_eden / 100 * percent_change;
   1.305 +  return eden_heap_delta;
   1.306 +}
   1.307 +
   1.308 +size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
   1.309 +  return eden_increment(cur_eden, YoungGenerationSizeIncrement);
   1.310 +}
   1.311 +
   1.312 +size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
   1.313 +  size_t eden_heap_delta = eden_increment(cur_eden) /
   1.314 +    AdaptiveSizeDecrementScaleFactor;
   1.315 +  return eden_heap_delta;
   1.316 +}
   1.317 +
   1.318 +size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
   1.319 +                                             uint percent_change) {
   1.320 +  size_t promo_heap_delta;
   1.321 +  promo_heap_delta = cur_promo / 100 * percent_change;
   1.322 +  return promo_heap_delta;
   1.323 +}
   1.324 +
   1.325 +size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
   1.326 +  return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
   1.327 +}
   1.328 +
   1.329 +size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
   1.330 +  size_t promo_heap_delta = promo_increment(cur_promo);
   1.331 +  promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
   1.332 +  return promo_heap_delta;
   1.333 +}
   1.334 +
   1.335 +double AdaptiveSizePolicy::time_since_major_gc() const {
   1.336 +  _major_timer.stop();
   1.337 +  double result = _major_timer.seconds();
   1.338 +  _major_timer.start();
   1.339 +  return result;
   1.340 +}
   1.341 +
   1.342 +// Linear decay of major gc cost
   1.343 +double AdaptiveSizePolicy::decaying_major_gc_cost() const {
   1.344 +  double major_interval = major_gc_interval_average_for_decay();
   1.345 +  double major_gc_cost_average = major_gc_cost();
   1.346 +  double decayed_major_gc_cost = major_gc_cost_average;
   1.347 +  if(time_since_major_gc() > 0.0) {
   1.348 +    decayed_major_gc_cost = major_gc_cost() *
   1.349 +      (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
   1.350 +      / time_since_major_gc();
   1.351 +  }
   1.352 +
   1.353 +  // The decayed cost should always be smaller than the
   1.354 +  // average cost but the vagaries of finite arithmetic could
   1.355 +  // produce a larger value in decayed_major_gc_cost so protect
   1.356 +  // against that.
   1.357 +  return MIN2(major_gc_cost_average, decayed_major_gc_cost);
   1.358 +}
   1.359 +
   1.360 +// Use a value of the major gc cost that has been decayed
   1.361 +// by the factor
   1.362 +//
   1.363 +//      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
   1.364 +//        time-since-last-major-gc
   1.365 +//
   1.366 +// if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
   1.367 +// is less than time-since-last-major-gc.
   1.368 +//
   1.369 +// In cases where there are initial major gc's that
   1.370 +// are of a relatively high cost but no later major
   1.371 +// gc's, the total gc cost can remain high because
   1.372 +// the major gc cost remains unchanged (since there are no major
   1.373 +// gc's).  In such a situation the value of the unchanging
   1.374 +// major gc cost can keep the mutator throughput below
   1.375 +// the goal when in fact the major gc cost is becoming diminishingly
   1.376 +// small.  Use the decaying gc cost only to decide whether to
   1.377 +// adjust for throughput.  Using it also to determine the adjustment
   1.378 +// to be made for throughput also seems reasonable but there is
   1.379 +// no test case to use to decide if it is the right thing to do
   1.380 +// don't do it yet.
   1.381 +
   1.382 +double AdaptiveSizePolicy::decaying_gc_cost() const {
   1.383 +  double decayed_major_gc_cost = major_gc_cost();
   1.384 +  double avg_major_interval = major_gc_interval_average_for_decay();
   1.385 +  if (UseAdaptiveSizeDecayMajorGCCost &&
   1.386 +      (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
   1.387 +      (avg_major_interval > 0.00)) {
   1.388 +    double time_since_last_major_gc = time_since_major_gc();
   1.389 +
   1.390 +    // Decay the major gc cost?
   1.391 +    if (time_since_last_major_gc >
   1.392 +        ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
   1.393 +
   1.394 +      // Decay using the time-since-last-major-gc
   1.395 +      decayed_major_gc_cost = decaying_major_gc_cost();
   1.396 +      if (PrintGCDetails && Verbose) {
   1.397 +        gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
   1.398 +          " %f  time since last major gc: %f",
   1.399 +          avg_major_interval, time_since_last_major_gc);
   1.400 +        gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
   1.401 +          major_gc_cost(), decayed_major_gc_cost);
   1.402 +      }
   1.403 +    }
   1.404 +  }
   1.405 +  double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
   1.406 +  return result;
   1.407 +}
   1.408 +
   1.409 +
   1.410 +void AdaptiveSizePolicy::clear_generation_free_space_flags() {
   1.411 +  set_change_young_gen_for_min_pauses(0);
   1.412 +  set_change_old_gen_for_maj_pauses(0);
   1.413 +
   1.414 +  set_change_old_gen_for_throughput(0);
   1.415 +  set_change_young_gen_for_throughput(0);
   1.416 +  set_decrease_for_footprint(0);
   1.417 +  set_decide_at_full_gc(0);
   1.418 +}
   1.419 +
   1.420 +void AdaptiveSizePolicy::check_gc_overhead_limit(
   1.421 +                                          size_t young_live,
   1.422 +                                          size_t eden_live,
   1.423 +                                          size_t max_old_gen_size,
   1.424 +                                          size_t max_eden_size,
   1.425 +                                          bool   is_full_gc,
   1.426 +                                          GCCause::Cause gc_cause,
   1.427 +                                          CollectorPolicy* collector_policy) {
   1.428 +
   1.429 +  // Ignore explicit GC's.  Exiting here does not set the flag and
   1.430 +  // does not reset the count.  Updating of the averages for system
   1.431 +  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
   1.432 +  if (GCCause::is_user_requested_gc(gc_cause) ||
   1.433 +      GCCause::is_serviceability_requested_gc(gc_cause)) {
   1.434 +    return;
   1.435 +  }
   1.436 +  // eden_limit is the upper limit on the size of eden based on
   1.437 +  // the maximum size of the young generation and the sizes
   1.438 +  // of the survivor space.
   1.439 +  // The question being asked is whether the gc costs are high
   1.440 +  // and the space being recovered by a collection is low.
   1.441 +  // free_in_young_gen is the free space in the young generation
   1.442 +  // after a collection and promo_live is the free space in the old
   1.443 +  // generation after a collection.
   1.444 +  //
   1.445 +  // Use the minimum of the current value of the live in the
   1.446 +  // young gen or the average of the live in the young gen.
   1.447 +  // If the current value drops quickly, that should be taken
   1.448 +  // into account (i.e., don't trigger if the amount of free
   1.449 +  // space has suddenly jumped up).  If the current is much
   1.450 +  // higher than the average, use the average since it represents
   1.451 +  // the longer term behavor.
   1.452 +  const size_t live_in_eden =
   1.453 +    MIN2(eden_live, (size_t) avg_eden_live()->average());
   1.454 +  const size_t free_in_eden = max_eden_size > live_in_eden ?
   1.455 +    max_eden_size - live_in_eden : 0;
   1.456 +  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
   1.457 +  const size_t total_free_limit = free_in_old_gen + free_in_eden;
   1.458 +  const size_t total_mem = max_old_gen_size + max_eden_size;
   1.459 +  const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
   1.460 +  const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
   1.461 +  const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
   1.462 +  const double gc_cost_limit = GCTimeLimit/100.0;
   1.463 +  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
   1.464 +  // But don't force a promo size below the current promo size. Otherwise,
   1.465 +  // the promo size will shrink for no good reason.
   1.466 +  promo_limit = MAX2(promo_limit, _promo_size);
   1.467 +
   1.468 +
   1.469 +  if (PrintAdaptiveSizePolicy && (Verbose ||
   1.470 +      (free_in_old_gen < (size_t) mem_free_old_limit &&
   1.471 +       free_in_eden < (size_t) mem_free_eden_limit))) {
   1.472 +    gclog_or_tty->print_cr(
   1.473 +          "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
   1.474 +          " promo_limit: " SIZE_FORMAT
   1.475 +          " max_eden_size: " SIZE_FORMAT
   1.476 +          " total_free_limit: " SIZE_FORMAT
   1.477 +          " max_old_gen_size: " SIZE_FORMAT
   1.478 +          " max_eden_size: " SIZE_FORMAT
   1.479 +          " mem_free_limit: " SIZE_FORMAT,
   1.480 +          promo_limit, max_eden_size, total_free_limit,
   1.481 +          max_old_gen_size, max_eden_size,
   1.482 +          (size_t) mem_free_limit);
   1.483 +  }
   1.484 +
   1.485 +  bool print_gc_overhead_limit_would_be_exceeded = false;
   1.486 +  if (is_full_gc) {
   1.487 +    if (gc_cost() > gc_cost_limit &&
   1.488 +      free_in_old_gen < (size_t) mem_free_old_limit &&
   1.489 +      free_in_eden < (size_t) mem_free_eden_limit) {
   1.490 +      // Collections, on average, are taking too much time, and
   1.491 +      //      gc_cost() > gc_cost_limit
   1.492 +      // we have too little space available after a full gc.
   1.493 +      //      total_free_limit < mem_free_limit
   1.494 +      // where
   1.495 +      //   total_free_limit is the free space available in
   1.496 +      //     both generations
   1.497 +      //   total_mem is the total space available for allocation
   1.498 +      //     in both generations (survivor spaces are not included
   1.499 +      //     just as they are not included in eden_limit).
   1.500 +      //   mem_free_limit is a fraction of total_mem judged to be an
   1.501 +      //     acceptable amount that is still unused.
   1.502 +      // The heap can ask for the value of this variable when deciding
   1.503 +      // whether to thrown an OutOfMemory error.
   1.504 +      // Note that the gc time limit test only works for the collections
   1.505 +      // of the young gen + tenured gen and not for collections of the
   1.506 +      // permanent gen.  That is because the calculation of the space
   1.507 +      // freed by the collection is the free space in the young gen +
   1.508 +      // tenured gen.
   1.509 +      // At this point the GC overhead limit is being exceeded.
   1.510 +      inc_gc_overhead_limit_count();
   1.511 +      if (UseGCOverheadLimit) {
   1.512 +        if (gc_overhead_limit_count() >=
   1.513 +            AdaptiveSizePolicyGCTimeLimitThreshold){
   1.514 +          // All conditions have been met for throwing an out-of-memory
   1.515 +          set_gc_overhead_limit_exceeded(true);
   1.516 +          // Avoid consecutive OOM due to the gc time limit by resetting
   1.517 +          // the counter.
   1.518 +          reset_gc_overhead_limit_count();
   1.519 +        } else {
   1.520 +          // The required consecutive collections which exceed the
   1.521 +          // GC time limit may or may not have been reached. We
   1.522 +          // are approaching that condition and so as not to
   1.523 +          // throw an out-of-memory before all SoftRef's have been
   1.524 +          // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
   1.525 +          // The clearing will be done on the next GC.
   1.526 +          bool near_limit = gc_overhead_limit_near();
   1.527 +          if (near_limit) {
   1.528 +            collector_policy->set_should_clear_all_soft_refs(true);
   1.529 +            if (PrintGCDetails && Verbose) {
   1.530 +              gclog_or_tty->print_cr("  Nearing GC overhead limit, "
   1.531 +                "will be clearing all SoftReference");
   1.532 +            }
   1.533 +          }
   1.534 +        }
   1.535 +      }
   1.536 +      // Set this even when the overhead limit will not
   1.537 +      // cause an out-of-memory.  Diagnostic message indicating
   1.538 +      // that the overhead limit is being exceeded is sometimes
   1.539 +      // printed.
   1.540 +      print_gc_overhead_limit_would_be_exceeded = true;
   1.541 +
   1.542 +    } else {
   1.543 +      // Did not exceed overhead limits
   1.544 +      reset_gc_overhead_limit_count();
   1.545 +    }
   1.546 +  }
   1.547 +
   1.548 +  if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
   1.549 +    if (gc_overhead_limit_exceeded()) {
   1.550 +      gclog_or_tty->print_cr("      GC is exceeding overhead limit "
   1.551 +        "of %d%%", (int) GCTimeLimit);
   1.552 +      reset_gc_overhead_limit_count();
   1.553 +    } else if (print_gc_overhead_limit_would_be_exceeded) {
   1.554 +      assert(gc_overhead_limit_count() > 0, "Should not be printing");
   1.555 +      gclog_or_tty->print_cr("      GC would exceed overhead limit "
   1.556 +        "of %d%% %d consecutive time(s)",
   1.557 +        (int) GCTimeLimit, gc_overhead_limit_count());
   1.558 +    }
   1.559 +  }
   1.560 +}
   1.561 +// Printing
   1.562 +
   1.563 +bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
   1.564 +
   1.565 +  //  Should only be used with adaptive size policy turned on.
   1.566 +  // Otherwise, there may be variables that are undefined.
   1.567 +  if (!UseAdaptiveSizePolicy) return false;
   1.568 +
   1.569 +  // Print goal for which action is needed.
   1.570 +  char* action = NULL;
   1.571 +  bool change_for_pause = false;
   1.572 +  if ((change_old_gen_for_maj_pauses() ==
   1.573 +         decrease_old_gen_for_maj_pauses_true) ||
   1.574 +      (change_young_gen_for_min_pauses() ==
   1.575 +         decrease_young_gen_for_min_pauses_true)) {
   1.576 +    action = (char*) " *** pause time goal ***";
   1.577 +    change_for_pause = true;
   1.578 +  } else if ((change_old_gen_for_throughput() ==
   1.579 +               increase_old_gen_for_throughput_true) ||
   1.580 +            (change_young_gen_for_throughput() ==
   1.581 +               increase_young_gen_for_througput_true)) {
   1.582 +    action = (char*) " *** throughput goal ***";
   1.583 +  } else if (decrease_for_footprint()) {
   1.584 +    action = (char*) " *** reduced footprint ***";
   1.585 +  } else {
   1.586 +    // No actions were taken.  This can legitimately be the
   1.587 +    // situation if not enough data has been gathered to make
   1.588 +    // decisions.
   1.589 +    return false;
   1.590 +  }
   1.591 +
   1.592 +  // Pauses
   1.593 +  // Currently the size of the old gen is only adjusted to
   1.594 +  // change the major pause times.
   1.595 +  char* young_gen_action = NULL;
   1.596 +  char* tenured_gen_action = NULL;
   1.597 +
   1.598 +  char* shrink_msg = (char*) "(attempted to shrink)";
   1.599 +  char* grow_msg = (char*) "(attempted to grow)";
   1.600 +  char* no_change_msg = (char*) "(no change)";
   1.601 +  if (change_young_gen_for_min_pauses() ==
   1.602 +      decrease_young_gen_for_min_pauses_true) {
   1.603 +    young_gen_action = shrink_msg;
   1.604 +  } else if (change_for_pause) {
   1.605 +    young_gen_action = no_change_msg;
   1.606 +  }
   1.607 +
   1.608 +  if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
   1.609 +    tenured_gen_action = shrink_msg;
   1.610 +  } else if (change_for_pause) {
   1.611 +    tenured_gen_action = no_change_msg;
   1.612 +  }
   1.613 +
   1.614 +  // Throughput
   1.615 +  if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
   1.616 +    assert(change_young_gen_for_throughput() ==
   1.617 +           increase_young_gen_for_througput_true,
   1.618 +           "Both generations should be growing");
   1.619 +    young_gen_action = grow_msg;
   1.620 +    tenured_gen_action = grow_msg;
   1.621 +  } else if (change_young_gen_for_throughput() ==
   1.622 +             increase_young_gen_for_througput_true) {
   1.623 +    // Only the young generation may grow at start up (before
   1.624 +    // enough full collections have been done to grow the old generation).
   1.625 +    young_gen_action = grow_msg;
   1.626 +    tenured_gen_action = no_change_msg;
   1.627 +  }
   1.628 +
   1.629 +  // Minimum footprint
   1.630 +  if (decrease_for_footprint() != 0) {
   1.631 +    young_gen_action = shrink_msg;
   1.632 +    tenured_gen_action = shrink_msg;
   1.633 +  }
   1.634 +
   1.635 +  st->print_cr("    UseAdaptiveSizePolicy actions to meet %s", action);
   1.636 +  st->print_cr("                       GC overhead (%%)");
   1.637 +  st->print_cr("    Young generation:     %7.2f\t  %s",
   1.638 +    100.0 * avg_minor_gc_cost()->average(),
   1.639 +    young_gen_action);
   1.640 +  st->print_cr("    Tenured generation:   %7.2f\t  %s",
   1.641 +    100.0 * avg_major_gc_cost()->average(),
   1.642 +    tenured_gen_action);
   1.643 +  return true;
   1.644 +}
   1.645 +
   1.646 +bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
   1.647 +                                            outputStream* st,
   1.648 +                                            uint tenuring_threshold_arg) const {
   1.649 +  if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
   1.650 +    return false;
   1.651 +  }
   1.652 +
   1.653 +  // Tenuring threshold
   1.654 +  bool tenuring_threshold_changed = true;
   1.655 +  if (decrement_tenuring_threshold_for_survivor_limit()) {
   1.656 +    st->print("    Tenuring threshold:    (attempted to decrease to avoid"
   1.657 +              " survivor space overflow) = ");
   1.658 +  } else if (decrement_tenuring_threshold_for_gc_cost()) {
   1.659 +    st->print("    Tenuring threshold:    (attempted to decrease to balance"
   1.660 +              " GC costs) = ");
   1.661 +  } else if (increment_tenuring_threshold_for_gc_cost()) {
   1.662 +    st->print("    Tenuring threshold:    (attempted to increase to balance"
   1.663 +              " GC costs) = ");
   1.664 +  } else {
   1.665 +    tenuring_threshold_changed = false;
   1.666 +    assert(!tenuring_threshold_change(), "(no change was attempted)");
   1.667 +  }
   1.668 +  if (tenuring_threshold_changed) {
   1.669 +    st->print_cr("%u", tenuring_threshold_arg);
   1.670 +  }
   1.671 +  return true;
   1.672 +}

mercurial