src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp

Thu, 20 Sep 2012 09:52:56 -0700

author
johnc
date
Thu, 20 Sep 2012 09:52:56 -0700
changeset 4067
b2ef234911c9
parent 3294
bca17e38de00
child 4129
22b8d3d181d9
permissions
-rw-r--r--

7190666: G1: assert(_unused == 0) failed: Inconsistency in PLAB stats
Summary: Reset the fields in ParGCAllocBuffer, that are used for accumulating values for the ResizePLAB sensors in PLABStats, to zero after flushing the values to the PLABStats fields. Flush PLABStats values only when retiring the final allocation buffers prior to disposing of a G1ParScanThreadState object, rather than when retiring every allocation buffer.
Reviewed-by: jwilhelm, jmasa, ysr

     1 /*
     2  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    27 #include "gc_interface/gcCause.hpp"
    28 #include "memory/collectorPolicy.hpp"
    29 #include "runtime/timer.hpp"
    30 #include "utilities/ostream.hpp"
    31 #include "utilities/workgroup.hpp"
    32 elapsedTimer AdaptiveSizePolicy::_minor_timer;
    33 elapsedTimer AdaptiveSizePolicy::_major_timer;
    34 bool AdaptiveSizePolicy::_debug_perturbation = false;
    36 // The throughput goal is implemented as
    37 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
    38 // gc_cost_ratio is the ratio
    39 //      application cost / gc cost
    40 // For example a gc_cost_ratio of 4 translates into a
    41 // throughput goal of .80
    43 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
    44                                        size_t init_promo_size,
    45                                        size_t init_survivor_size,
    46                                        double gc_pause_goal_sec,
    47                                        uint gc_cost_ratio) :
    48     _eden_size(init_eden_size),
    49     _promo_size(init_promo_size),
    50     _survivor_size(init_survivor_size),
    51     _gc_pause_goal_sec(gc_pause_goal_sec),
    52     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
    53     _gc_overhead_limit_exceeded(false),
    54     _print_gc_overhead_limit_would_be_exceeded(false),
    55     _gc_overhead_limit_count(0),
    56     _latest_minor_mutator_interval_seconds(0),
    57     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
    58     _young_gen_change_for_minor_throughput(0),
    59     _old_gen_change_for_major_throughput(0) {
    60   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
    61     "No opportunity to clear SoftReferences before GC overhead limit");
    62   _avg_minor_pause    =
    63     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
    64   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    65   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    66   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    68   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    69   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    70   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    72   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
    73                                                   SurvivorPadding);
    74   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
    75                                                   AdaptiveSizePolicyWeight,
    76                                                   SurvivorPadding);
    78   _minor_pause_old_estimator =
    79     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    80   _minor_pause_young_estimator =
    81     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    82   _minor_collection_estimator =
    83     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    84   _major_collection_estimator =
    85     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    87   // Start the timers
    88   _minor_timer.start();
    90   _young_gen_policy_is_ready = false;
    91 }
    93 //  If the number of GC threads was set on the command line,
    94 // use it.
    95 //  Else
    96 //    Calculate the number of GC threads based on the number of Java threads.
    97 //    Calculate the number of GC threads based on the size of the heap.
    98 //    Use the larger.
   100 int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
   101                                             const uintx min_workers,
   102                                             uintx active_workers,
   103                                             uintx application_workers) {
   104   // If the user has specifically set the number of
   105   // GC threads, use them.
   107   // If the user has turned off using a dynamic number of GC threads
   108   // or the users has requested a specific number, set the active
   109   // number of workers to all the workers.
   111   uintx new_active_workers = total_workers;
   112   uintx prev_active_workers = active_workers;
   113   uintx active_workers_by_JT = 0;
   114   uintx active_workers_by_heap_size = 0;
   116   // Always use at least min_workers but use up to
   117   // GCThreadsPerJavaThreads * application threads.
   118   active_workers_by_JT =
   119     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
   120          min_workers);
   122   // Choose a number of GC threads based on the current size
   123   // of the heap.  This may be complicated because the size of
   124   // the heap depends on factors such as the thoughput goal.
   125   // Still a large heap should be collected by more GC threads.
   126   active_workers_by_heap_size =
   127       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
   129   uintx max_active_workers =
   130     MAX2(active_workers_by_JT, active_workers_by_heap_size);
   132   // Limit the number of workers to the the number created,
   133   // (workers()).
   134   new_active_workers = MIN2(max_active_workers,
   135                                 (uintx) total_workers);
   137   // Increase GC workers instantly but decrease them more
   138   // slowly.
   139   if (new_active_workers < prev_active_workers) {
   140     new_active_workers =
   141       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
   142   }
   144   // Check once more that the number of workers is within the limits.
   145   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
   146   assert(new_active_workers >= min_workers, "Minimum workers not observed");
   147   assert(new_active_workers <= total_workers, "Total workers not observed");
   149   if (ForceDynamicNumberOfGCThreads) {
   150     // Assume this is debugging and jiggle the number of GC threads.
   151     if (new_active_workers == prev_active_workers) {
   152       if (new_active_workers < total_workers) {
   153         new_active_workers++;
   154       } else if (new_active_workers > min_workers) {
   155         new_active_workers--;
   156       }
   157     }
   158     if (new_active_workers == total_workers) {
   159       if (_debug_perturbation) {
   160         new_active_workers =  min_workers;
   161       }
   162       _debug_perturbation = !_debug_perturbation;
   163     }
   164     assert((new_active_workers <= (uintx) ParallelGCThreads) &&
   165            (new_active_workers >= min_workers),
   166       "Jiggled active workers too much");
   167   }
   169   if (TraceDynamicGCThreads) {
   170      gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
   171        "active_workers(): %d  new_acitve_workers: %d  "
   172        "prev_active_workers: %d\n"
   173        " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
   174        active_workers, new_active_workers, prev_active_workers,
   175        active_workers_by_JT, active_workers_by_heap_size);
   176   }
   177   assert(new_active_workers > 0, "Always need at least 1");
   178   return new_active_workers;
   179 }
   181 int AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
   182                                             uintx active_workers,
   183                                             uintx application_workers) {
   184   // If the user has specifically set the number of
   185   // GC threads, use them.
   187   // If the user has turned off using a dynamic number of GC threads
   188   // or the users has requested a specific number, set the active
   189   // number of workers to all the workers.
   191   int new_active_workers;
   192   if (!UseDynamicNumberOfGCThreads ||
   193      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   194     new_active_workers = total_workers;
   195   } else {
   196     new_active_workers = calc_default_active_workers(total_workers,
   197                                                      2, /* Minimum number of workers */
   198                                                      active_workers,
   199                                                      application_workers);
   200   }
   201   assert(new_active_workers > 0, "Always need at least 1");
   202   return new_active_workers;
   203 }
   205 int AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
   206                                                  uintx active_workers,
   207                                                  uintx application_workers) {
   208   if (!UseDynamicNumberOfGCThreads ||
   209      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   210     return ConcGCThreads;
   211   } else {
   212     int no_of_gc_threads = calc_default_active_workers(
   213                              total_workers,
   214                              1, /* Minimum number of workers */
   215                              active_workers,
   216                              application_workers);
   217     return no_of_gc_threads;
   218   }
   219 }
   221 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
   222   return decrement_tenuring_threshold_for_gc_cost() ||
   223          increment_tenuring_threshold_for_gc_cost() ||
   224          decrement_tenuring_threshold_for_survivor_limit();
   225 }
   227 void AdaptiveSizePolicy::minor_collection_begin() {
   228   // Update the interval time
   229   _minor_timer.stop();
   230   // Save most recent collection time
   231   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
   232   _minor_timer.reset();
   233   _minor_timer.start();
   234 }
   236 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
   237     double minor_pause_in_ms) {
   238   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   239   _minor_pause_young_estimator->update(eden_size_in_mbytes,
   240     minor_pause_in_ms);
   241 }
   243 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
   244   // Update the pause time.
   245   _minor_timer.stop();
   247   if (gc_cause != GCCause::_java_lang_system_gc ||
   248       UseAdaptiveSizePolicyWithSystemGC) {
   249     double minor_pause_in_seconds = _minor_timer.seconds();
   250     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
   252     // Sample for performance counter
   253     _avg_minor_pause->sample(minor_pause_in_seconds);
   255     // Cost of collection (unit-less)
   256     double collection_cost = 0.0;
   257     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
   258         (minor_pause_in_seconds > 0.0)) {
   259       double interval_in_seconds =
   260         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
   261       collection_cost =
   262         minor_pause_in_seconds / interval_in_seconds;
   263       _avg_minor_gc_cost->sample(collection_cost);
   264       // Sample for performance counter
   265       _avg_minor_interval->sample(interval_in_seconds);
   266     }
   268     // The policy does not have enough data until at least some
   269     // minor collections have been done.
   270     _young_gen_policy_is_ready =
   271       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
   273     // Calculate variables used to estimate pause time vs. gen sizes
   274     double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   275     update_minor_pause_young_estimator(minor_pause_in_ms);
   276     update_minor_pause_old_estimator(minor_pause_in_ms);
   278     if (PrintAdaptiveSizePolicy && Verbose) {
   279       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
   280         "minor gc cost: %f  average: %f", collection_cost,
   281         _avg_minor_gc_cost->average());
   282       gclog_or_tty->print_cr("  minor pause: %f minor period %f",
   283         minor_pause_in_ms,
   284         _latest_minor_mutator_interval_seconds * MILLIUNITS);
   285     }
   287     // Calculate variable used to estimate collection cost vs. gen sizes
   288     assert(collection_cost >= 0.0, "Expected to be non-negative");
   289     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
   290   }
   292   // Interval times use this timer to measure the mutator time.
   293   // Reset the timer after the GC pause.
   294   _minor_timer.reset();
   295   _minor_timer.start();
   296 }
   298 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
   299                                             uint percent_change) {
   300   size_t eden_heap_delta;
   301   eden_heap_delta = cur_eden / 100 * percent_change;
   302   return eden_heap_delta;
   303 }
   305 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
   306   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
   307 }
   309 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
   310   size_t eden_heap_delta = eden_increment(cur_eden) /
   311     AdaptiveSizeDecrementScaleFactor;
   312   return eden_heap_delta;
   313 }
   315 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
   316                                              uint percent_change) {
   317   size_t promo_heap_delta;
   318   promo_heap_delta = cur_promo / 100 * percent_change;
   319   return promo_heap_delta;
   320 }
   322 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
   323   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
   324 }
   326 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
   327   size_t promo_heap_delta = promo_increment(cur_promo);
   328   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
   329   return promo_heap_delta;
   330 }
   332 double AdaptiveSizePolicy::time_since_major_gc() const {
   333   _major_timer.stop();
   334   double result = _major_timer.seconds();
   335   _major_timer.start();
   336   return result;
   337 }
   339 // Linear decay of major gc cost
   340 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
   341   double major_interval = major_gc_interval_average_for_decay();
   342   double major_gc_cost_average = major_gc_cost();
   343   double decayed_major_gc_cost = major_gc_cost_average;
   344   if(time_since_major_gc() > 0.0) {
   345     decayed_major_gc_cost = major_gc_cost() *
   346       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
   347       / time_since_major_gc();
   348   }
   350   // The decayed cost should always be smaller than the
   351   // average cost but the vagaries of finite arithmetic could
   352   // produce a larger value in decayed_major_gc_cost so protect
   353   // against that.
   354   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
   355 }
   357 // Use a value of the major gc cost that has been decayed
   358 // by the factor
   359 //
   360 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
   361 //        time-since-last-major-gc
   362 //
   363 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
   364 // is less than time-since-last-major-gc.
   365 //
   366 // In cases where there are initial major gc's that
   367 // are of a relatively high cost but no later major
   368 // gc's, the total gc cost can remain high because
   369 // the major gc cost remains unchanged (since there are no major
   370 // gc's).  In such a situation the value of the unchanging
   371 // major gc cost can keep the mutator throughput below
   372 // the goal when in fact the major gc cost is becoming diminishingly
   373 // small.  Use the decaying gc cost only to decide whether to
   374 // adjust for throughput.  Using it also to determine the adjustment
   375 // to be made for throughput also seems reasonable but there is
   376 // no test case to use to decide if it is the right thing to do
   377 // don't do it yet.
   379 double AdaptiveSizePolicy::decaying_gc_cost() const {
   380   double decayed_major_gc_cost = major_gc_cost();
   381   double avg_major_interval = major_gc_interval_average_for_decay();
   382   if (UseAdaptiveSizeDecayMajorGCCost &&
   383       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
   384       (avg_major_interval > 0.00)) {
   385     double time_since_last_major_gc = time_since_major_gc();
   387     // Decay the major gc cost?
   388     if (time_since_last_major_gc >
   389         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
   391       // Decay using the time-since-last-major-gc
   392       decayed_major_gc_cost = decaying_major_gc_cost();
   393       if (PrintGCDetails && Verbose) {
   394         gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
   395           " %f  time since last major gc: %f",
   396           avg_major_interval, time_since_last_major_gc);
   397         gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
   398           major_gc_cost(), decayed_major_gc_cost);
   399       }
   400     }
   401   }
   402   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
   403   return result;
   404 }
   407 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
   408   set_change_young_gen_for_min_pauses(0);
   409   set_change_old_gen_for_maj_pauses(0);
   411   set_change_old_gen_for_throughput(0);
   412   set_change_young_gen_for_throughput(0);
   413   set_decrease_for_footprint(0);
   414   set_decide_at_full_gc(0);
   415 }
   417 void AdaptiveSizePolicy::check_gc_overhead_limit(
   418                                           size_t young_live,
   419                                           size_t eden_live,
   420                                           size_t max_old_gen_size,
   421                                           size_t max_eden_size,
   422                                           bool   is_full_gc,
   423                                           GCCause::Cause gc_cause,
   424                                           CollectorPolicy* collector_policy) {
   426   // Ignore explicit GC's.  Exiting here does not set the flag and
   427   // does not reset the count.  Updating of the averages for system
   428   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
   429   if (GCCause::is_user_requested_gc(gc_cause) ||
   430       GCCause::is_serviceability_requested_gc(gc_cause)) {
   431     return;
   432   }
   433   // eden_limit is the upper limit on the size of eden based on
   434   // the maximum size of the young generation and the sizes
   435   // of the survivor space.
   436   // The question being asked is whether the gc costs are high
   437   // and the space being recovered by a collection is low.
   438   // free_in_young_gen is the free space in the young generation
   439   // after a collection and promo_live is the free space in the old
   440   // generation after a collection.
   441   //
   442   // Use the minimum of the current value of the live in the
   443   // young gen or the average of the live in the young gen.
   444   // If the current value drops quickly, that should be taken
   445   // into account (i.e., don't trigger if the amount of free
   446   // space has suddenly jumped up).  If the current is much
   447   // higher than the average, use the average since it represents
   448   // the longer term behavor.
   449   const size_t live_in_eden =
   450     MIN2(eden_live, (size_t) avg_eden_live()->average());
   451   const size_t free_in_eden = max_eden_size > live_in_eden ?
   452     max_eden_size - live_in_eden : 0;
   453   const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
   454   const size_t total_free_limit = free_in_old_gen + free_in_eden;
   455   const size_t total_mem = max_old_gen_size + max_eden_size;
   456   const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
   457   const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
   458   const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
   459   const double gc_cost_limit = GCTimeLimit/100.0;
   460   size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
   461   // But don't force a promo size below the current promo size. Otherwise,
   462   // the promo size will shrink for no good reason.
   463   promo_limit = MAX2(promo_limit, _promo_size);
   466   if (PrintAdaptiveSizePolicy && (Verbose ||
   467       (free_in_old_gen < (size_t) mem_free_old_limit &&
   468        free_in_eden < (size_t) mem_free_eden_limit))) {
   469     gclog_or_tty->print_cr(
   470           "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
   471           " promo_limit: " SIZE_FORMAT
   472           " max_eden_size: " SIZE_FORMAT
   473           " total_free_limit: " SIZE_FORMAT
   474           " max_old_gen_size: " SIZE_FORMAT
   475           " max_eden_size: " SIZE_FORMAT
   476           " mem_free_limit: " SIZE_FORMAT,
   477           promo_limit, max_eden_size, total_free_limit,
   478           max_old_gen_size, max_eden_size,
   479           (size_t) mem_free_limit);
   480   }
   482   bool print_gc_overhead_limit_would_be_exceeded = false;
   483   if (is_full_gc) {
   484     if (gc_cost() > gc_cost_limit &&
   485       free_in_old_gen < (size_t) mem_free_old_limit &&
   486       free_in_eden < (size_t) mem_free_eden_limit) {
   487       // Collections, on average, are taking too much time, and
   488       //      gc_cost() > gc_cost_limit
   489       // we have too little space available after a full gc.
   490       //      total_free_limit < mem_free_limit
   491       // where
   492       //   total_free_limit is the free space available in
   493       //     both generations
   494       //   total_mem is the total space available for allocation
   495       //     in both generations (survivor spaces are not included
   496       //     just as they are not included in eden_limit).
   497       //   mem_free_limit is a fraction of total_mem judged to be an
   498       //     acceptable amount that is still unused.
   499       // The heap can ask for the value of this variable when deciding
   500       // whether to thrown an OutOfMemory error.
   501       // Note that the gc time limit test only works for the collections
   502       // of the young gen + tenured gen and not for collections of the
   503       // permanent gen.  That is because the calculation of the space
   504       // freed by the collection is the free space in the young gen +
   505       // tenured gen.
   506       // At this point the GC overhead limit is being exceeded.
   507       inc_gc_overhead_limit_count();
   508       if (UseGCOverheadLimit) {
   509         if (gc_overhead_limit_count() >=
   510             AdaptiveSizePolicyGCTimeLimitThreshold){
   511           // All conditions have been met for throwing an out-of-memory
   512           set_gc_overhead_limit_exceeded(true);
   513           // Avoid consecutive OOM due to the gc time limit by resetting
   514           // the counter.
   515           reset_gc_overhead_limit_count();
   516         } else {
   517           // The required consecutive collections which exceed the
   518           // GC time limit may or may not have been reached. We
   519           // are approaching that condition and so as not to
   520           // throw an out-of-memory before all SoftRef's have been
   521           // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
   522           // The clearing will be done on the next GC.
   523           bool near_limit = gc_overhead_limit_near();
   524           if (near_limit) {
   525             collector_policy->set_should_clear_all_soft_refs(true);
   526             if (PrintGCDetails && Verbose) {
   527               gclog_or_tty->print_cr("  Nearing GC overhead limit, "
   528                 "will be clearing all SoftReference");
   529             }
   530           }
   531         }
   532       }
   533       // Set this even when the overhead limit will not
   534       // cause an out-of-memory.  Diagnostic message indicating
   535       // that the overhead limit is being exceeded is sometimes
   536       // printed.
   537       print_gc_overhead_limit_would_be_exceeded = true;
   539     } else {
   540       // Did not exceed overhead limits
   541       reset_gc_overhead_limit_count();
   542     }
   543   }
   545   if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
   546     if (gc_overhead_limit_exceeded()) {
   547       gclog_or_tty->print_cr("      GC is exceeding overhead limit "
   548         "of %d%%", GCTimeLimit);
   549       reset_gc_overhead_limit_count();
   550     } else if (print_gc_overhead_limit_would_be_exceeded) {
   551       assert(gc_overhead_limit_count() > 0, "Should not be printing");
   552       gclog_or_tty->print_cr("      GC would exceed overhead limit "
   553         "of %d%% %d consecutive time(s)",
   554         GCTimeLimit, gc_overhead_limit_count());
   555     }
   556   }
   557 }
   558 // Printing
   560 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
   562   //  Should only be used with adaptive size policy turned on.
   563   // Otherwise, there may be variables that are undefined.
   564   if (!UseAdaptiveSizePolicy) return false;
   566   // Print goal for which action is needed.
   567   char* action = NULL;
   568   bool change_for_pause = false;
   569   if ((change_old_gen_for_maj_pauses() ==
   570          decrease_old_gen_for_maj_pauses_true) ||
   571       (change_young_gen_for_min_pauses() ==
   572          decrease_young_gen_for_min_pauses_true)) {
   573     action = (char*) " *** pause time goal ***";
   574     change_for_pause = true;
   575   } else if ((change_old_gen_for_throughput() ==
   576                increase_old_gen_for_throughput_true) ||
   577             (change_young_gen_for_throughput() ==
   578                increase_young_gen_for_througput_true)) {
   579     action = (char*) " *** throughput goal ***";
   580   } else if (decrease_for_footprint()) {
   581     action = (char*) " *** reduced footprint ***";
   582   } else {
   583     // No actions were taken.  This can legitimately be the
   584     // situation if not enough data has been gathered to make
   585     // decisions.
   586     return false;
   587   }
   589   // Pauses
   590   // Currently the size of the old gen is only adjusted to
   591   // change the major pause times.
   592   char* young_gen_action = NULL;
   593   char* tenured_gen_action = NULL;
   595   char* shrink_msg = (char*) "(attempted to shrink)";
   596   char* grow_msg = (char*) "(attempted to grow)";
   597   char* no_change_msg = (char*) "(no change)";
   598   if (change_young_gen_for_min_pauses() ==
   599       decrease_young_gen_for_min_pauses_true) {
   600     young_gen_action = shrink_msg;
   601   } else if (change_for_pause) {
   602     young_gen_action = no_change_msg;
   603   }
   605   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
   606     tenured_gen_action = shrink_msg;
   607   } else if (change_for_pause) {
   608     tenured_gen_action = no_change_msg;
   609   }
   611   // Throughput
   612   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
   613     assert(change_young_gen_for_throughput() ==
   614            increase_young_gen_for_througput_true,
   615            "Both generations should be growing");
   616     young_gen_action = grow_msg;
   617     tenured_gen_action = grow_msg;
   618   } else if (change_young_gen_for_throughput() ==
   619              increase_young_gen_for_througput_true) {
   620     // Only the young generation may grow at start up (before
   621     // enough full collections have been done to grow the old generation).
   622     young_gen_action = grow_msg;
   623     tenured_gen_action = no_change_msg;
   624   }
   626   // Minimum footprint
   627   if (decrease_for_footprint() != 0) {
   628     young_gen_action = shrink_msg;
   629     tenured_gen_action = shrink_msg;
   630   }
   632   st->print_cr("    UseAdaptiveSizePolicy actions to meet %s", action);
   633   st->print_cr("                       GC overhead (%%)");
   634   st->print_cr("    Young generation:     %7.2f\t  %s",
   635     100.0 * avg_minor_gc_cost()->average(),
   636     young_gen_action);
   637   st->print_cr("    Tenured generation:   %7.2f\t  %s",
   638     100.0 * avg_major_gc_cost()->average(),
   639     tenured_gen_action);
   640   return true;
   641 }
   643 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
   644                                             outputStream* st,
   645                                             int tenuring_threshold_arg) const {
   646   if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
   647     return false;
   648   }
   650   // Tenuring threshold
   651   bool tenuring_threshold_changed = true;
   652   if (decrement_tenuring_threshold_for_survivor_limit()) {
   653     st->print("    Tenuring threshold:    (attempted to decrease to avoid"
   654               " survivor space overflow) = ");
   655   } else if (decrement_tenuring_threshold_for_gc_cost()) {
   656     st->print("    Tenuring threshold:    (attempted to decrease to balance"
   657               " GC costs) = ");
   658   } else if (increment_tenuring_threshold_for_gc_cost()) {
   659     st->print("    Tenuring threshold:    (attempted to increase to balance"
   660               " GC costs) = ");
   661   } else {
   662     tenuring_threshold_changed = false;
   663     assert(!tenuring_threshold_change(), "(no change was attempted)");
   664   }
   665   if (tenuring_threshold_changed) {
   666     st->print_cr("%d", tenuring_threshold_arg);
   667   }
   668   return true;
   669 }

mercurial