src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp

Wed, 31 Jul 2019 14:28:51 -0400

author
kbarrett
date
Wed, 31 Jul 2019 14:28:51 -0400
changeset 9787
9f28a4cac6d9
parent 8432
79351ea143ee
child 8604
04d83ba48607
permissions
-rw-r--r--

8048556: Unnecessary GCLocker-initiated young GCs
Summary: Fixed recognition of unnecessary GCLocker collections.
Reviewed-by: pliden, tschatzl
Contributed-by: johnc@azul.com

     1 /*
     2  * Copyright (c) 2004, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    27 #include "gc_interface/gcCause.hpp"
    28 #include "memory/collectorPolicy.hpp"
    29 #include "runtime/timer.hpp"
    30 #include "utilities/ostream.hpp"
    31 #include "utilities/workgroup.hpp"
    32 elapsedTimer AdaptiveSizePolicy::_minor_timer;
    33 elapsedTimer AdaptiveSizePolicy::_major_timer;
    34 bool AdaptiveSizePolicy::_debug_perturbation = false;
    36 // The throughput goal is implemented as
    37 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
    38 // gc_cost_ratio is the ratio
    39 //      application cost / gc cost
    40 // For example a gc_cost_ratio of 4 translates into a
    41 // throughput goal of .80
    43 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
    44                                        size_t init_promo_size,
    45                                        size_t init_survivor_size,
    46                                        double gc_pause_goal_sec,
    47                                        uint gc_cost_ratio) :
    48     _eden_size(init_eden_size),
    49     _promo_size(init_promo_size),
    50     _survivor_size(init_survivor_size),
    51     _gc_pause_goal_sec(gc_pause_goal_sec),
    52     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
    53     _gc_overhead_limit_exceeded(false),
    54     _print_gc_overhead_limit_would_be_exceeded(false),
    55     _gc_overhead_limit_count(0),
    56     _latest_minor_mutator_interval_seconds(0),
    57     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
    58     _young_gen_change_for_minor_throughput(0),
    59     _old_gen_change_for_major_throughput(0) {
    60   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
    61     "No opportunity to clear SoftReferences before GC overhead limit");
    62   _avg_minor_pause    =
    63     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
    64   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    65   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    66   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
    68   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    69   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    70   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
    72   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
    73                                                   SurvivorPadding);
    74   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
    75                                                   AdaptiveSizePolicyWeight,
    76                                                   SurvivorPadding);
    78   _minor_pause_old_estimator =
    79     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    80   _minor_pause_young_estimator =
    81     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    82   _minor_collection_estimator =
    83     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    84   _major_collection_estimator =
    85     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
    87   // Start the timers
    88   _minor_timer.start();
    90   _young_gen_policy_is_ready = false;
    91 }
    93 //  If the number of GC threads was set on the command line,
    94 // use it.
    95 //  Else
    96 //    Calculate the number of GC threads based on the number of Java threads.
    97 //    Calculate the number of GC threads based on the size of the heap.
    98 //    Use the larger.
   100 int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
   101                                             const uintx min_workers,
   102                                             uintx active_workers,
   103                                             uintx application_workers) {
   104   // If the user has specifically set the number of
   105   // GC threads, use them.
   107   // If the user has turned off using a dynamic number of GC threads
   108   // or the users has requested a specific number, set the active
   109   // number of workers to all the workers.
   111   uintx new_active_workers = total_workers;
   112   uintx prev_active_workers = active_workers;
   113   uintx active_workers_by_JT = 0;
   114   uintx active_workers_by_heap_size = 0;
   116   // Always use at least min_workers but use up to
   117   // GCThreadsPerJavaThreads * application threads.
   118   active_workers_by_JT =
   119     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
   120          min_workers);
   122   // Choose a number of GC threads based on the current size
   123   // of the heap.  This may be complicated because the size of
   124   // the heap depends on factors such as the thoughput goal.
   125   // Still a large heap should be collected by more GC threads.
   126   active_workers_by_heap_size =
   127       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
   129   uintx max_active_workers =
   130     MAX2(active_workers_by_JT, active_workers_by_heap_size);
   132   // Limit the number of workers to the the number created,
   133   // (workers()).
   134   new_active_workers = MIN2(max_active_workers,
   135                                 (uintx) total_workers);
   137   // Increase GC workers instantly but decrease them more
   138   // slowly.
   139   if (new_active_workers < prev_active_workers) {
   140     new_active_workers =
   141       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
   142   }
   144   // Check once more that the number of workers is within the limits.
   145   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
   146   assert(new_active_workers >= min_workers, "Minimum workers not observed");
   147   assert(new_active_workers <= total_workers, "Total workers not observed");
   149   if (ForceDynamicNumberOfGCThreads) {
   150     // Assume this is debugging and jiggle the number of GC threads.
   151     if (new_active_workers == prev_active_workers) {
   152       if (new_active_workers < total_workers) {
   153         new_active_workers++;
   154       } else if (new_active_workers > min_workers) {
   155         new_active_workers--;
   156       }
   157     }
   158     if (new_active_workers == total_workers) {
   159       if (_debug_perturbation) {
   160         new_active_workers =  min_workers;
   161       }
   162       _debug_perturbation = !_debug_perturbation;
   163     }
   164     assert((new_active_workers <= (uintx) ParallelGCThreads) &&
   165            (new_active_workers >= min_workers),
   166       "Jiggled active workers too much");
   167   }
   169   if (TraceDynamicGCThreads) {
   170      gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
   171        "active_workers(): %d  new_active_workers: %d  "
   172        "prev_active_workers: %d\n"
   173        " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
   174        (int) active_workers, (int) new_active_workers, (int) prev_active_workers,
   175        (int) active_workers_by_JT, (int) active_workers_by_heap_size);
   176   }
   177   assert(new_active_workers > 0, "Always need at least 1");
   178   return new_active_workers;
   179 }
   181 int AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
   182                                             uintx active_workers,
   183                                             uintx application_workers) {
   184   // If the user has specifically set the number of
   185   // GC threads, use them.
   187   // If the user has turned off using a dynamic number of GC threads
   188   // or the users has requested a specific number, set the active
   189   // number of workers to all the workers.
   191   int new_active_workers;
   192   if (!UseDynamicNumberOfGCThreads ||
   193      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   194     new_active_workers = total_workers;
   195   } else {
   196     uintx min_workers = (total_workers == 1) ? 1 : 2;
   197     new_active_workers = calc_default_active_workers(total_workers,
   198                                                      min_workers,
   199                                                      active_workers,
   200                                                      application_workers);
   201   }
   202   assert(new_active_workers > 0, "Always need at least 1");
   203   return new_active_workers;
   204 }
   206 int AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
   207                                                  uintx active_workers,
   208                                                  uintx application_workers) {
   209   if (!UseDynamicNumberOfGCThreads ||
   210      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   211     return ConcGCThreads;
   212   } else {
   213     int no_of_gc_threads = calc_default_active_workers(
   214                              total_workers,
   215                              1, /* Minimum number of workers */
   216                              active_workers,
   217                              application_workers);
   218     return no_of_gc_threads;
   219   }
   220 }
   222 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
   223   return decrement_tenuring_threshold_for_gc_cost() ||
   224          increment_tenuring_threshold_for_gc_cost() ||
   225          decrement_tenuring_threshold_for_survivor_limit();
   226 }
   228 void AdaptiveSizePolicy::minor_collection_begin() {
   229   // Update the interval time
   230   _minor_timer.stop();
   231   // Save most recent collection time
   232   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
   233   _minor_timer.reset();
   234   _minor_timer.start();
   235 }
   237 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
   238     double minor_pause_in_ms) {
   239   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   240   _minor_pause_young_estimator->update(eden_size_in_mbytes,
   241     minor_pause_in_ms);
   242 }
   244 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
   245   // Update the pause time.
   246   _minor_timer.stop();
   248   if (gc_cause != GCCause::_java_lang_system_gc ||
   249       UseAdaptiveSizePolicyWithSystemGC) {
   250     double minor_pause_in_seconds = _minor_timer.seconds();
   251     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
   253     // Sample for performance counter
   254     _avg_minor_pause->sample(minor_pause_in_seconds);
   256     // Cost of collection (unit-less)
   257     double collection_cost = 0.0;
   258     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
   259         (minor_pause_in_seconds > 0.0)) {
   260       double interval_in_seconds =
   261         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
   262       collection_cost =
   263         minor_pause_in_seconds / interval_in_seconds;
   264       _avg_minor_gc_cost->sample(collection_cost);
   265       // Sample for performance counter
   266       _avg_minor_interval->sample(interval_in_seconds);
   267     }
   269     // The policy does not have enough data until at least some
   270     // minor collections have been done.
   271     _young_gen_policy_is_ready =
   272       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
   274     // Calculate variables used to estimate pause time vs. gen sizes
   275     double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
   276     update_minor_pause_young_estimator(minor_pause_in_ms);
   277     update_minor_pause_old_estimator(minor_pause_in_ms);
   279     if (PrintAdaptiveSizePolicy && Verbose) {
   280       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
   281         "minor gc cost: %f  average: %f", collection_cost,
   282         _avg_minor_gc_cost->average());
   283       gclog_or_tty->print_cr("  minor pause: %f minor period %f",
   284         minor_pause_in_ms,
   285         _latest_minor_mutator_interval_seconds * MILLIUNITS);
   286     }
   288     // Calculate variable used to estimate collection cost vs. gen sizes
   289     assert(collection_cost >= 0.0, "Expected to be non-negative");
   290     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
   291   }
   293   // Interval times use this timer to measure the mutator time.
   294   // Reset the timer after the GC pause.
   295   _minor_timer.reset();
   296   _minor_timer.start();
   297 }
   299 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden,
   300                                             uint percent_change) {
   301   size_t eden_heap_delta;
   302   eden_heap_delta = cur_eden / 100 * percent_change;
   303   return eden_heap_delta;
   304 }
   306 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
   307   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
   308 }
   310 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
   311   size_t eden_heap_delta = eden_increment(cur_eden) /
   312     AdaptiveSizeDecrementScaleFactor;
   313   return eden_heap_delta;
   314 }
   316 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo,
   317                                              uint percent_change) {
   318   size_t promo_heap_delta;
   319   promo_heap_delta = cur_promo / 100 * percent_change;
   320   return promo_heap_delta;
   321 }
   323 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
   324   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
   325 }
   327 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
   328   size_t promo_heap_delta = promo_increment(cur_promo);
   329   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
   330   return promo_heap_delta;
   331 }
   333 double AdaptiveSizePolicy::time_since_major_gc() const {
   334   _major_timer.stop();
   335   double result = _major_timer.seconds();
   336   _major_timer.start();
   337   return result;
   338 }
   340 // Linear decay of major gc cost
   341 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
   342   double major_interval = major_gc_interval_average_for_decay();
   343   double major_gc_cost_average = major_gc_cost();
   344   double decayed_major_gc_cost = major_gc_cost_average;
   345   if(time_since_major_gc() > 0.0) {
   346     decayed_major_gc_cost = major_gc_cost() *
   347       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
   348       / time_since_major_gc();
   349   }
   351   // The decayed cost should always be smaller than the
   352   // average cost but the vagaries of finite arithmetic could
   353   // produce a larger value in decayed_major_gc_cost so protect
   354   // against that.
   355   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
   356 }
   358 // Use a value of the major gc cost that has been decayed
   359 // by the factor
   360 //
   361 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
   362 //        time-since-last-major-gc
   363 //
   364 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
   365 // is less than time-since-last-major-gc.
   366 //
   367 // In cases where there are initial major gc's that
   368 // are of a relatively high cost but no later major
   369 // gc's, the total gc cost can remain high because
   370 // the major gc cost remains unchanged (since there are no major
   371 // gc's).  In such a situation the value of the unchanging
   372 // major gc cost can keep the mutator throughput below
   373 // the goal when in fact the major gc cost is becoming diminishingly
   374 // small.  Use the decaying gc cost only to decide whether to
   375 // adjust for throughput.  Using it also to determine the adjustment
   376 // to be made for throughput also seems reasonable but there is
   377 // no test case to use to decide if it is the right thing to do
   378 // don't do it yet.
   380 double AdaptiveSizePolicy::decaying_gc_cost() const {
   381   double decayed_major_gc_cost = major_gc_cost();
   382   double avg_major_interval = major_gc_interval_average_for_decay();
   383   if (UseAdaptiveSizeDecayMajorGCCost &&
   384       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
   385       (avg_major_interval > 0.00)) {
   386     double time_since_last_major_gc = time_since_major_gc();
   388     // Decay the major gc cost?
   389     if (time_since_last_major_gc >
   390         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
   392       // Decay using the time-since-last-major-gc
   393       decayed_major_gc_cost = decaying_major_gc_cost();
   394       if (PrintGCDetails && Verbose) {
   395         gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
   396           " %f  time since last major gc: %f",
   397           avg_major_interval, time_since_last_major_gc);
   398         gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
   399           major_gc_cost(), decayed_major_gc_cost);
   400       }
   401     }
   402   }
   403   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
   404   return result;
   405 }
   408 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
   409   set_change_young_gen_for_min_pauses(0);
   410   set_change_old_gen_for_maj_pauses(0);
   412   set_change_old_gen_for_throughput(0);
   413   set_change_young_gen_for_throughput(0);
   414   set_decrease_for_footprint(0);
   415   set_decide_at_full_gc(0);
   416 }
   418 void AdaptiveSizePolicy::check_gc_overhead_limit(
   419                                           size_t young_live,
   420                                           size_t eden_live,
   421                                           size_t max_old_gen_size,
   422                                           size_t max_eden_size,
   423                                           bool   is_full_gc,
   424                                           GCCause::Cause gc_cause,
   425                                           CollectorPolicy* collector_policy) {
   427   // Ignore explicit GC's.  Exiting here does not set the flag and
   428   // does not reset the count.  Updating of the averages for system
   429   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
   430   if (GCCause::is_user_requested_gc(gc_cause) ||
   431       GCCause::is_serviceability_requested_gc(gc_cause)) {
   432     return;
   433   }
   434   // eden_limit is the upper limit on the size of eden based on
   435   // the maximum size of the young generation and the sizes
   436   // of the survivor space.
   437   // The question being asked is whether the gc costs are high
   438   // and the space being recovered by a collection is low.
   439   // free_in_young_gen is the free space in the young generation
   440   // after a collection and promo_live is the free space in the old
   441   // generation after a collection.
   442   //
   443   // Use the minimum of the current value of the live in the
   444   // young gen or the average of the live in the young gen.
   445   // If the current value drops quickly, that should be taken
   446   // into account (i.e., don't trigger if the amount of free
   447   // space has suddenly jumped up).  If the current is much
   448   // higher than the average, use the average since it represents
   449   // the longer term behavor.
   450   const size_t live_in_eden =
   451     MIN2(eden_live, (size_t) avg_eden_live()->average());
   452   const size_t free_in_eden = max_eden_size > live_in_eden ?
   453     max_eden_size - live_in_eden : 0;
   454   const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
   455   const size_t total_free_limit = free_in_old_gen + free_in_eden;
   456   const size_t total_mem = max_old_gen_size + max_eden_size;
   457   const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
   458   const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
   459   const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
   460   const double gc_cost_limit = GCTimeLimit/100.0;
   461   size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
   462   // But don't force a promo size below the current promo size. Otherwise,
   463   // the promo size will shrink for no good reason.
   464   promo_limit = MAX2(promo_limit, _promo_size);
   467   if (PrintAdaptiveSizePolicy && (Verbose ||
   468       (free_in_old_gen < (size_t) mem_free_old_limit &&
   469        free_in_eden < (size_t) mem_free_eden_limit))) {
   470     gclog_or_tty->print_cr(
   471           "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
   472           " promo_limit: " SIZE_FORMAT
   473           " max_eden_size: " SIZE_FORMAT
   474           " total_free_limit: " SIZE_FORMAT
   475           " max_old_gen_size: " SIZE_FORMAT
   476           " max_eden_size: " SIZE_FORMAT
   477           " mem_free_limit: " SIZE_FORMAT,
   478           promo_limit, max_eden_size, total_free_limit,
   479           max_old_gen_size, max_eden_size,
   480           (size_t) mem_free_limit);
   481   }
   483   bool print_gc_overhead_limit_would_be_exceeded = false;
   484   if (is_full_gc) {
   485     if (gc_cost() > gc_cost_limit &&
   486       free_in_old_gen < (size_t) mem_free_old_limit &&
   487       free_in_eden < (size_t) mem_free_eden_limit) {
   488       // Collections, on average, are taking too much time, and
   489       //      gc_cost() > gc_cost_limit
   490       // we have too little space available after a full gc.
   491       //      total_free_limit < mem_free_limit
   492       // where
   493       //   total_free_limit is the free space available in
   494       //     both generations
   495       //   total_mem is the total space available for allocation
   496       //     in both generations (survivor spaces are not included
   497       //     just as they are not included in eden_limit).
   498       //   mem_free_limit is a fraction of total_mem judged to be an
   499       //     acceptable amount that is still unused.
   500       // The heap can ask for the value of this variable when deciding
   501       // whether to thrown an OutOfMemory error.
   502       // Note that the gc time limit test only works for the collections
   503       // of the young gen + tenured gen and not for collections of the
   504       // permanent gen.  That is because the calculation of the space
   505       // freed by the collection is the free space in the young gen +
   506       // tenured gen.
   507       // At this point the GC overhead limit is being exceeded.
   508       inc_gc_overhead_limit_count();
   509       if (UseGCOverheadLimit) {
   510         if (gc_overhead_limit_count() >=
   511             AdaptiveSizePolicyGCTimeLimitThreshold){
   512           // All conditions have been met for throwing an out-of-memory
   513           set_gc_overhead_limit_exceeded(true);
   514           // Avoid consecutive OOM due to the gc time limit by resetting
   515           // the counter.
   516           reset_gc_overhead_limit_count();
   517         } else {
   518           // The required consecutive collections which exceed the
   519           // GC time limit may or may not have been reached. We
   520           // are approaching that condition and so as not to
   521           // throw an out-of-memory before all SoftRef's have been
   522           // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
   523           // The clearing will be done on the next GC.
   524           bool near_limit = gc_overhead_limit_near();
   525           if (near_limit) {
   526             collector_policy->set_should_clear_all_soft_refs(true);
   527             if (PrintGCDetails && Verbose) {
   528               gclog_or_tty->print_cr("  Nearing GC overhead limit, "
   529                 "will be clearing all SoftReference");
   530             }
   531           }
   532         }
   533       }
   534       // Set this even when the overhead limit will not
   535       // cause an out-of-memory.  Diagnostic message indicating
   536       // that the overhead limit is being exceeded is sometimes
   537       // printed.
   538       print_gc_overhead_limit_would_be_exceeded = true;
   540     } else {
   541       // Did not exceed overhead limits
   542       reset_gc_overhead_limit_count();
   543     }
   544   }
   546   if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
   547     if (gc_overhead_limit_exceeded()) {
   548       gclog_or_tty->print_cr("      GC is exceeding overhead limit "
   549         "of %d%%", (int) GCTimeLimit);
   550       reset_gc_overhead_limit_count();
   551     } else if (print_gc_overhead_limit_would_be_exceeded) {
   552       assert(gc_overhead_limit_count() > 0, "Should not be printing");
   553       gclog_or_tty->print_cr("      GC would exceed overhead limit "
   554         "of %d%% %d consecutive time(s)",
   555         (int) GCTimeLimit, gc_overhead_limit_count());
   556     }
   557   }
   558 }
   559 // Printing
   561 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
   563   //  Should only be used with adaptive size policy turned on.
   564   // Otherwise, there may be variables that are undefined.
   565   if (!UseAdaptiveSizePolicy) return false;
   567   // Print goal for which action is needed.
   568   char* action = NULL;
   569   bool change_for_pause = false;
   570   if ((change_old_gen_for_maj_pauses() ==
   571          decrease_old_gen_for_maj_pauses_true) ||
   572       (change_young_gen_for_min_pauses() ==
   573          decrease_young_gen_for_min_pauses_true)) {
   574     action = (char*) " *** pause time goal ***";
   575     change_for_pause = true;
   576   } else if ((change_old_gen_for_throughput() ==
   577                increase_old_gen_for_throughput_true) ||
   578             (change_young_gen_for_throughput() ==
   579                increase_young_gen_for_througput_true)) {
   580     action = (char*) " *** throughput goal ***";
   581   } else if (decrease_for_footprint()) {
   582     action = (char*) " *** reduced footprint ***";
   583   } else {
   584     // No actions were taken.  This can legitimately be the
   585     // situation if not enough data has been gathered to make
   586     // decisions.
   587     return false;
   588   }
   590   // Pauses
   591   // Currently the size of the old gen is only adjusted to
   592   // change the major pause times.
   593   char* young_gen_action = NULL;
   594   char* tenured_gen_action = NULL;
   596   char* shrink_msg = (char*) "(attempted to shrink)";
   597   char* grow_msg = (char*) "(attempted to grow)";
   598   char* no_change_msg = (char*) "(no change)";
   599   if (change_young_gen_for_min_pauses() ==
   600       decrease_young_gen_for_min_pauses_true) {
   601     young_gen_action = shrink_msg;
   602   } else if (change_for_pause) {
   603     young_gen_action = no_change_msg;
   604   }
   606   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
   607     tenured_gen_action = shrink_msg;
   608   } else if (change_for_pause) {
   609     tenured_gen_action = no_change_msg;
   610   }
   612   // Throughput
   613   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
   614     assert(change_young_gen_for_throughput() ==
   615            increase_young_gen_for_througput_true,
   616            "Both generations should be growing");
   617     young_gen_action = grow_msg;
   618     tenured_gen_action = grow_msg;
   619   } else if (change_young_gen_for_throughput() ==
   620              increase_young_gen_for_througput_true) {
   621     // Only the young generation may grow at start up (before
   622     // enough full collections have been done to grow the old generation).
   623     young_gen_action = grow_msg;
   624     tenured_gen_action = no_change_msg;
   625   }
   627   // Minimum footprint
   628   if (decrease_for_footprint() != 0) {
   629     young_gen_action = shrink_msg;
   630     tenured_gen_action = shrink_msg;
   631   }
   633   st->print_cr("    UseAdaptiveSizePolicy actions to meet %s", action);
   634   st->print_cr("                       GC overhead (%%)");
   635   st->print_cr("    Young generation:     %7.2f\t  %s",
   636     100.0 * avg_minor_gc_cost()->average(),
   637     young_gen_action);
   638   st->print_cr("    Tenured generation:   %7.2f\t  %s",
   639     100.0 * avg_major_gc_cost()->average(),
   640     tenured_gen_action);
   641   return true;
   642 }
   644 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
   645                                             outputStream* st,
   646                                             uint tenuring_threshold_arg) const {
   647   if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
   648     return false;
   649   }
   651   // Tenuring threshold
   652   bool tenuring_threshold_changed = true;
   653   if (decrement_tenuring_threshold_for_survivor_limit()) {
   654     st->print("    Tenuring threshold:    (attempted to decrease to avoid"
   655               " survivor space overflow) = ");
   656   } else if (decrement_tenuring_threshold_for_gc_cost()) {
   657     st->print("    Tenuring threshold:    (attempted to decrease to balance"
   658               " GC costs) = ");
   659   } else if (increment_tenuring_threshold_for_gc_cost()) {
   660     st->print("    Tenuring threshold:    (attempted to increase to balance"
   661               " GC costs) = ");
   662   } else {
   663     tenuring_threshold_changed = false;
   664     assert(!tenuring_threshold_change(), "(no change was attempted)");
   665   }
   666   if (tenuring_threshold_changed) {
   667     st->print_cr("%u", tenuring_threshold_arg);
   668   }
   669   return true;
   670 }

mercurial