src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7195
c02ec279b062
child 7369
b840813adfcc
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef __clang_major__
    26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
    27 #endif
    29 #include "precompiled.hpp"
    30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    31 #include "gc_implementation/g1/concurrentMark.hpp"
    32 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    37 #include "gc_implementation/g1/g1Log.hpp"
    38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    39 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    40 #include "runtime/arguments.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/mutexLocker.hpp"
    43 #include "utilities/debug.hpp"
    45 // Different defaults for different number of GC threads
    46 // They were chosen by running GCOld and SPECjbb on debris with different
    47 //   numbers of GC threads and choosing them based on the results
    49 // all the same
    50 static double rs_length_diff_defaults[] = {
    51   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    52 };
    54 static double cost_per_card_ms_defaults[] = {
    55   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    56 };
    58 // all the same
    59 static double young_cards_per_entry_ratio_defaults[] = {
    60   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    61 };
    63 static double cost_per_entry_ms_defaults[] = {
    64   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    65 };
    67 static double cost_per_byte_ms_defaults[] = {
    68   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    69 };
    71 // these should be pretty consistent
    72 static double constant_other_time_ms_defaults[] = {
    73   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    74 };
    77 static double young_other_cost_per_region_ms_defaults[] = {
    78   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    79 };
    81 static double non_young_other_cost_per_region_ms_defaults[] = {
    82   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    83 };
    85 G1CollectorPolicy::G1CollectorPolicy() :
    86   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
    87                         ? ParallelGCThreads : 1),
    89   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    90   _stop_world_start(0.0),
    92   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    93   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    95   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    96   _prev_collection_pause_end_ms(0.0),
    97   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
    98   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    99   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   100   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   101   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   102   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   103   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   104   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   105   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   106   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   107   _non_young_other_cost_per_region_ms_seq(
   108                                          new TruncatedSeq(TruncatedSeqLength)),
   110   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   111   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   113   _pause_time_target_ms((double) MaxGCPauseMillis),
   115   _gcs_are_young(true),
   117   _during_marking(false),
   118   _in_marking_window(false),
   119   _in_marking_window_im(false),
   121   _recent_prev_end_times_for_all_gcs_sec(
   122                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   124   _recent_avg_pause_time_ratio(0.0),
   126   _initiate_conc_mark_if_possible(false),
   127   _during_initial_mark_pause(false),
   128   _last_young_gc(false),
   129   _last_gc_was_young(false),
   131   _eden_used_bytes_before_gc(0),
   132   _survivor_used_bytes_before_gc(0),
   133   _heap_used_bytes_before_gc(0),
   134   _metaspace_used_bytes_before_gc(0),
   135   _eden_capacity_bytes_before_gc(0),
   136   _heap_capacity_bytes_before_gc(0),
   138   _eden_cset_region_length(0),
   139   _survivor_cset_region_length(0),
   140   _old_cset_region_length(0),
   142   _collection_set(NULL),
   143   _collection_set_bytes_used_before(0),
   145   // Incremental CSet attributes
   146   _inc_cset_build_state(Inactive),
   147   _inc_cset_head(NULL),
   148   _inc_cset_tail(NULL),
   149   _inc_cset_bytes_used_before(0),
   150   _inc_cset_max_finger(NULL),
   151   _inc_cset_recorded_rs_lengths(0),
   152   _inc_cset_recorded_rs_lengths_diffs(0),
   153   _inc_cset_predicted_elapsed_time_ms(0.0),
   154   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   156 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   157 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   158 #endif // _MSC_VER
   160   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   161                                                  G1YoungSurvRateNumRegionsSummary)),
   162   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   163                                               G1YoungSurvRateNumRegionsSummary)),
   164   // add here any more surv rate groups
   165   _recorded_survivor_regions(0),
   166   _recorded_survivor_head(NULL),
   167   _recorded_survivor_tail(NULL),
   168   _survivors_age_table(true),
   170   _gc_overhead_perc(0.0) {
   172   // Set up the region size and associated fields. Given that the
   173   // policy is created before the heap, we have to set this up here,
   174   // so it's done as soon as possible.
   176   // It would have been natural to pass initial_heap_byte_size() and
   177   // max_heap_byte_size() to setup_heap_region_size() but those have
   178   // not been set up at this point since they should be aligned with
   179   // the region size. So, there is a circular dependency here. We base
   180   // the region size on the heap size, but the heap size should be
   181   // aligned with the region size. To get around this we use the
   182   // unaligned values for the heap.
   183   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   184   HeapRegionRemSet::setup_remset_size();
   186   G1ErgoVerbose::initialize();
   187   if (PrintAdaptiveSizePolicy) {
   188     // Currently, we only use a single switch for all the heuristics.
   189     G1ErgoVerbose::set_enabled(true);
   190     // Given that we don't currently have a verboseness level
   191     // parameter, we'll hardcode this to high. This can be easily
   192     // changed in the future.
   193     G1ErgoVerbose::set_level(ErgoHigh);
   194   } else {
   195     G1ErgoVerbose::set_enabled(false);
   196   }
   198   // Verify PLAB sizes
   199   const size_t region_size = HeapRegion::GrainWords;
   200   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   201     char buffer[128];
   202     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   203                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   204     vm_exit_during_initialization(buffer);
   205   }
   207   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   208   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   210   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
   212   int index = MIN2(_parallel_gc_threads - 1, 7);
   214   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   215   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   216   _young_cards_per_entry_ratio_seq->add(
   217                                   young_cards_per_entry_ratio_defaults[index]);
   218   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   219   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   220   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   221   _young_other_cost_per_region_ms_seq->add(
   222                                young_other_cost_per_region_ms_defaults[index]);
   223   _non_young_other_cost_per_region_ms_seq->add(
   224                            non_young_other_cost_per_region_ms_defaults[index]);
   226   // Below, we might need to calculate the pause time target based on
   227   // the pause interval. When we do so we are going to give G1 maximum
   228   // flexibility and allow it to do pauses when it needs to. So, we'll
   229   // arrange that the pause interval to be pause time target + 1 to
   230   // ensure that a) the pause time target is maximized with respect to
   231   // the pause interval and b) we maintain the invariant that pause
   232   // time target < pause interval. If the user does not want this
   233   // maximum flexibility, they will have to set the pause interval
   234   // explicitly.
   236   // First make sure that, if either parameter is set, its value is
   237   // reasonable.
   238   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   239     if (MaxGCPauseMillis < 1) {
   240       vm_exit_during_initialization("MaxGCPauseMillis should be "
   241                                     "greater than 0");
   242     }
   243   }
   244   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   245     if (GCPauseIntervalMillis < 1) {
   246       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   247                                     "greater than 0");
   248     }
   249   }
   251   // Then, if the pause time target parameter was not set, set it to
   252   // the default value.
   253   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   254     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   255       // The default pause time target in G1 is 200ms
   256       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   257     } else {
   258       // We do not allow the pause interval to be set without the
   259       // pause time target
   260       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   261                                     "without setting MaxGCPauseMillis");
   262     }
   263   }
   265   // Then, if the interval parameter was not set, set it according to
   266   // the pause time target (this will also deal with the case when the
   267   // pause time target is the default value).
   268   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   269     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   270   }
   272   // Finally, make sure that the two parameters are consistent.
   273   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   274     char buffer[256];
   275     jio_snprintf(buffer, 256,
   276                  "MaxGCPauseMillis (%u) should be less than "
   277                  "GCPauseIntervalMillis (%u)",
   278                  MaxGCPauseMillis, GCPauseIntervalMillis);
   279     vm_exit_during_initialization(buffer);
   280   }
   282   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   283   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   284   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   286   uintx confidence_perc = G1ConfidencePercent;
   287   // Put an artificial ceiling on this so that it's not set to a silly value.
   288   if (confidence_perc > 100) {
   289     confidence_perc = 100;
   290     warning("G1ConfidencePercent is set to a value that is too large, "
   291             "it's been updated to %u", confidence_perc);
   292   }
   293   _sigma = (double) confidence_perc / 100.0;
   295   // start conservatively (around 50ms is about right)
   296   _concurrent_mark_remark_times_ms->add(0.05);
   297   _concurrent_mark_cleanup_times_ms->add(0.20);
   298   _tenuring_threshold = MaxTenuringThreshold;
   299   // _max_survivor_regions will be calculated by
   300   // update_young_list_target_length() during initialization.
   301   _max_survivor_regions = 0;
   303   assert(GCTimeRatio > 0,
   304          "we should have set it to a default value set_g1_gc_flags() "
   305          "if a user set it to 0");
   306   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   308   uintx reserve_perc = G1ReservePercent;
   309   // Put an artificial ceiling on this so that it's not set to a silly value.
   310   if (reserve_perc > 50) {
   311     reserve_perc = 50;
   312     warning("G1ReservePercent is set to a value that is too large, "
   313             "it's been updated to %u", reserve_perc);
   314   }
   315   _reserve_factor = (double) reserve_perc / 100.0;
   316   // This will be set when the heap is expanded
   317   // for the first time during initialization.
   318   _reserve_regions = 0;
   320   _collectionSetChooser = new CollectionSetChooser();
   321 }
   323 void G1CollectorPolicy::initialize_alignments() {
   324   _space_alignment = HeapRegion::GrainBytes;
   325   size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   326   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   327   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
   328 }
   330 void G1CollectorPolicy::initialize_flags() {
   331   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
   332     FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
   333   }
   335   if (SurvivorRatio < 1) {
   336     vm_exit_during_initialization("Invalid survivor ratio specified");
   337   }
   338   CollectorPolicy::initialize_flags();
   339   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   340 }
   342 void G1CollectorPolicy::post_heap_initialize() {
   343   uintx max_regions = G1CollectedHeap::heap()->max_regions();
   344   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
   345   if (max_young_size != MaxNewSize) {
   346     FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
   347   }
   348 }
   350 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
   351         _min_desired_young_length(0), _max_desired_young_length(0) {
   352   if (FLAG_IS_CMDLINE(NewRatio)) {
   353     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   354       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   355     } else {
   356       _sizer_kind = SizerNewRatio;
   357       _adaptive_size = false;
   358       return;
   359     }
   360   }
   362   if (NewSize > MaxNewSize) {
   363     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   364       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
   365               "A new max generation size of " SIZE_FORMAT "k will be used.",
   366               NewSize/K, MaxNewSize/K, NewSize/K);
   367     }
   368     MaxNewSize = NewSize;
   369   }
   371   if (FLAG_IS_CMDLINE(NewSize)) {
   372     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
   373                                      1U);
   374     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   375       _max_desired_young_length =
   376                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   377                                   1U);
   378       _sizer_kind = SizerMaxAndNewSize;
   379       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   380     } else {
   381       _sizer_kind = SizerNewSizeOnly;
   382     }
   383   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   384     _max_desired_young_length =
   385                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   386                                   1U);
   387     _sizer_kind = SizerMaxNewSizeOnly;
   388   }
   389 }
   391 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
   392   uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
   393   return MAX2(1U, default_value);
   394 }
   396 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
   397   uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
   398   return MAX2(1U, default_value);
   399 }
   401 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
   402   assert(number_of_heap_regions > 0, "Heap must be initialized");
   404   switch (_sizer_kind) {
   405     case SizerDefaults:
   406       *min_young_length = calculate_default_min_length(number_of_heap_regions);
   407       *max_young_length = calculate_default_max_length(number_of_heap_regions);
   408       break;
   409     case SizerNewSizeOnly:
   410       *max_young_length = calculate_default_max_length(number_of_heap_regions);
   411       *max_young_length = MAX2(*min_young_length, *max_young_length);
   412       break;
   413     case SizerMaxNewSizeOnly:
   414       *min_young_length = calculate_default_min_length(number_of_heap_regions);
   415       *min_young_length = MIN2(*min_young_length, *max_young_length);
   416       break;
   417     case SizerMaxAndNewSize:
   418       // Do nothing. Values set on the command line, don't update them at runtime.
   419       break;
   420     case SizerNewRatio:
   421       *min_young_length = number_of_heap_regions / (NewRatio + 1);
   422       *max_young_length = *min_young_length;
   423       break;
   424     default:
   425       ShouldNotReachHere();
   426   }
   428   assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
   429 }
   431 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
   432   // We need to pass the desired values because recalculation may not update these
   433   // values in some cases.
   434   uint temp = _min_desired_young_length;
   435   uint result = _max_desired_young_length;
   436   recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
   437   return result;
   438 }
   440 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
   441   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
   442           &_max_desired_young_length);
   443 }
   445 void G1CollectorPolicy::init() {
   446   // Set aside an initial future to_space.
   447   _g1 = G1CollectedHeap::heap();
   449   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   451   initialize_gc_policy_counters();
   453   if (adaptive_young_list_length()) {
   454     _young_list_fixed_length = 0;
   455   } else {
   456     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   457   }
   458   _free_regions_at_end_of_collection = _g1->num_free_regions();
   459   update_young_list_target_length();
   461   // We may immediately start allocating regions and placing them on the
   462   // collection set list. Initialize the per-collection set info
   463   start_incremental_cset_building();
   464 }
   466 // Create the jstat counters for the policy.
   467 void G1CollectorPolicy::initialize_gc_policy_counters() {
   468   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   469 }
   471 bool G1CollectorPolicy::predict_will_fit(uint young_length,
   472                                          double base_time_ms,
   473                                          uint base_free_regions,
   474                                          double target_pause_time_ms) {
   475   if (young_length >= base_free_regions) {
   476     // end condition 1: not enough space for the young regions
   477     return false;
   478   }
   480   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
   481   size_t bytes_to_copy =
   482                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   483   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   484   double young_other_time_ms = predict_young_other_time_ms(young_length);
   485   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   486   if (pause_time_ms > target_pause_time_ms) {
   487     // end condition 2: prediction is over the target pause time
   488     return false;
   489   }
   491   size_t free_bytes =
   492                    (base_free_regions - young_length) * HeapRegion::GrainBytes;
   493   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   494     // end condition 3: out-of-space (conservatively!)
   495     return false;
   496   }
   498   // success!
   499   return true;
   500 }
   502 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
   503   // re-calculate the necessary reserve
   504   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   505   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   506   // smaller than 1.0) we'll get 1.
   507   _reserve_regions = (uint) ceil(reserve_regions_d);
   509   _young_gen_sizer->heap_size_changed(new_number_of_regions);
   510 }
   512 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
   513                                                        uint base_min_length) {
   514   uint desired_min_length = 0;
   515   if (adaptive_young_list_length()) {
   516     if (_alloc_rate_ms_seq->num() > 3) {
   517       double now_sec = os::elapsedTime();
   518       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   519       double alloc_rate_ms = predict_alloc_rate_ms();
   520       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
   521     } else {
   522       // otherwise we don't have enough info to make the prediction
   523     }
   524   }
   525   desired_min_length += base_min_length;
   526   // make sure we don't go below any user-defined minimum bound
   527   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   528 }
   530 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
   531   // Here, we might want to also take into account any additional
   532   // constraints (i.e., user-defined minimum bound). Currently, we
   533   // effectively don't set this bound.
   534   return _young_gen_sizer->max_desired_young_length();
   535 }
   537 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   538   if (rs_lengths == (size_t) -1) {
   539     // if it's set to the default value (-1), we should predict it;
   540     // otherwise, use the given value.
   541     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   542   }
   544   // Calculate the absolute and desired min bounds.
   546   // This is how many young regions we already have (currently: the survivors).
   547   uint base_min_length = recorded_survivor_regions();
   548   // This is the absolute minimum young length, which ensures that we
   549   // can allocate one eden region in the worst-case.
   550   uint absolute_min_length = base_min_length + 1;
   551   uint desired_min_length =
   552                      calculate_young_list_desired_min_length(base_min_length);
   553   if (desired_min_length < absolute_min_length) {
   554     desired_min_length = absolute_min_length;
   555   }
   557   // Calculate the absolute and desired max bounds.
   559   // We will try our best not to "eat" into the reserve.
   560   uint absolute_max_length = 0;
   561   if (_free_regions_at_end_of_collection > _reserve_regions) {
   562     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   563   }
   564   uint desired_max_length = calculate_young_list_desired_max_length();
   565   if (desired_max_length > absolute_max_length) {
   566     desired_max_length = absolute_max_length;
   567   }
   569   uint young_list_target_length = 0;
   570   if (adaptive_young_list_length()) {
   571     if (gcs_are_young()) {
   572       young_list_target_length =
   573                         calculate_young_list_target_length(rs_lengths,
   574                                                            base_min_length,
   575                                                            desired_min_length,
   576                                                            desired_max_length);
   577       _rs_lengths_prediction = rs_lengths;
   578     } else {
   579       // Don't calculate anything and let the code below bound it to
   580       // the desired_min_length, i.e., do the next GC as soon as
   581       // possible to maximize how many old regions we can add to it.
   582     }
   583   } else {
   584     // The user asked for a fixed young gen so we'll fix the young gen
   585     // whether the next GC is young or mixed.
   586     young_list_target_length = _young_list_fixed_length;
   587   }
   589   // Make sure we don't go over the desired max length, nor under the
   590   // desired min length. In case they clash, desired_min_length wins
   591   // which is why that test is second.
   592   if (young_list_target_length > desired_max_length) {
   593     young_list_target_length = desired_max_length;
   594   }
   595   if (young_list_target_length < desired_min_length) {
   596     young_list_target_length = desired_min_length;
   597   }
   599   assert(young_list_target_length > recorded_survivor_regions(),
   600          "we should be able to allocate at least one eden region");
   601   assert(young_list_target_length >= absolute_min_length, "post-condition");
   602   _young_list_target_length = young_list_target_length;
   604   update_max_gc_locker_expansion();
   605 }
   607 uint
   608 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   609                                                      uint base_min_length,
   610                                                      uint desired_min_length,
   611                                                      uint desired_max_length) {
   612   assert(adaptive_young_list_length(), "pre-condition");
   613   assert(gcs_are_young(), "only call this for young GCs");
   615   // In case some edge-condition makes the desired max length too small...
   616   if (desired_max_length <= desired_min_length) {
   617     return desired_min_length;
   618   }
   620   // We'll adjust min_young_length and max_young_length not to include
   621   // the already allocated young regions (i.e., so they reflect the
   622   // min and max eden regions we'll allocate). The base_min_length
   623   // will be reflected in the predictions by the
   624   // survivor_regions_evac_time prediction.
   625   assert(desired_min_length > base_min_length, "invariant");
   626   uint min_young_length = desired_min_length - base_min_length;
   627   assert(desired_max_length > base_min_length, "invariant");
   628   uint max_young_length = desired_max_length - base_min_length;
   630   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   631   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   632   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   633   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   634   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   635   double base_time_ms =
   636     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   637     survivor_regions_evac_time;
   638   uint available_free_regions = _free_regions_at_end_of_collection;
   639   uint base_free_regions = 0;
   640   if (available_free_regions > _reserve_regions) {
   641     base_free_regions = available_free_regions - _reserve_regions;
   642   }
   644   // Here, we will make sure that the shortest young length that
   645   // makes sense fits within the target pause time.
   647   if (predict_will_fit(min_young_length, base_time_ms,
   648                        base_free_regions, target_pause_time_ms)) {
   649     // The shortest young length will fit into the target pause time;
   650     // we'll now check whether the absolute maximum number of young
   651     // regions will fit in the target pause time. If not, we'll do
   652     // a binary search between min_young_length and max_young_length.
   653     if (predict_will_fit(max_young_length, base_time_ms,
   654                          base_free_regions, target_pause_time_ms)) {
   655       // The maximum young length will fit into the target pause time.
   656       // We are done so set min young length to the maximum length (as
   657       // the result is assumed to be returned in min_young_length).
   658       min_young_length = max_young_length;
   659     } else {
   660       // The maximum possible number of young regions will not fit within
   661       // the target pause time so we'll search for the optimal
   662       // length. The loop invariants are:
   663       //
   664       // min_young_length < max_young_length
   665       // min_young_length is known to fit into the target pause time
   666       // max_young_length is known not to fit into the target pause time
   667       //
   668       // Going into the loop we know the above hold as we've just
   669       // checked them. Every time around the loop we check whether
   670       // the middle value between min_young_length and
   671       // max_young_length fits into the target pause time. If it
   672       // does, it becomes the new min. If it doesn't, it becomes
   673       // the new max. This way we maintain the loop invariants.
   675       assert(min_young_length < max_young_length, "invariant");
   676       uint diff = (max_young_length - min_young_length) / 2;
   677       while (diff > 0) {
   678         uint young_length = min_young_length + diff;
   679         if (predict_will_fit(young_length, base_time_ms,
   680                              base_free_regions, target_pause_time_ms)) {
   681           min_young_length = young_length;
   682         } else {
   683           max_young_length = young_length;
   684         }
   685         assert(min_young_length <  max_young_length, "invariant");
   686         diff = (max_young_length - min_young_length) / 2;
   687       }
   688       // The results is min_young_length which, according to the
   689       // loop invariants, should fit within the target pause time.
   691       // These are the post-conditions of the binary search above:
   692       assert(min_young_length < max_young_length,
   693              "otherwise we should have discovered that max_young_length "
   694              "fits into the pause target and not done the binary search");
   695       assert(predict_will_fit(min_young_length, base_time_ms,
   696                               base_free_regions, target_pause_time_ms),
   697              "min_young_length, the result of the binary search, should "
   698              "fit into the pause target");
   699       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   700                                base_free_regions, target_pause_time_ms),
   701              "min_young_length, the result of the binary search, should be "
   702              "optimal, so no larger length should fit into the pause target");
   703     }
   704   } else {
   705     // Even the minimum length doesn't fit into the pause time
   706     // target, return it as the result nevertheless.
   707   }
   708   return base_min_length + min_young_length;
   709 }
   711 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   712   double survivor_regions_evac_time = 0.0;
   713   for (HeapRegion * r = _recorded_survivor_head;
   714        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   715        r = r->get_next_young_region()) {
   716     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
   717   }
   718   return survivor_regions_evac_time;
   719 }
   721 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   722   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   724   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   725   if (rs_lengths > _rs_lengths_prediction) {
   726     // add 10% to avoid having to recalculate often
   727     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   728     update_young_list_target_length(rs_lengths_prediction);
   729   }
   730 }
   734 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   735                                                bool is_tlab,
   736                                                bool* gc_overhead_limit_was_exceeded) {
   737   guarantee(false, "Not using this policy feature yet.");
   738   return NULL;
   739 }
   741 // This method controls how a collector handles one or more
   742 // of its generations being fully allocated.
   743 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   744                                                        bool is_tlab) {
   745   guarantee(false, "Not using this policy feature yet.");
   746   return NULL;
   747 }
   750 #ifndef PRODUCT
   751 bool G1CollectorPolicy::verify_young_ages() {
   752   HeapRegion* head = _g1->young_list()->first_region();
   753   return
   754     verify_young_ages(head, _short_lived_surv_rate_group);
   755   // also call verify_young_ages on any additional surv rate groups
   756 }
   758 bool
   759 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   760                                      SurvRateGroup *surv_rate_group) {
   761   guarantee( surv_rate_group != NULL, "pre-condition" );
   763   const char* name = surv_rate_group->name();
   764   bool ret = true;
   765   int prev_age = -1;
   767   for (HeapRegion* curr = head;
   768        curr != NULL;
   769        curr = curr->get_next_young_region()) {
   770     SurvRateGroup* group = curr->surv_rate_group();
   771     if (group == NULL && !curr->is_survivor()) {
   772       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   773       ret = false;
   774     }
   776     if (surv_rate_group == group) {
   777       int age = curr->age_in_surv_rate_group();
   779       if (age < 0) {
   780         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   781         ret = false;
   782       }
   784       if (age <= prev_age) {
   785         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   786                                "(%d, %d)", name, age, prev_age);
   787         ret = false;
   788       }
   789       prev_age = age;
   790     }
   791   }
   793   return ret;
   794 }
   795 #endif // PRODUCT
   797 void G1CollectorPolicy::record_full_collection_start() {
   798   _full_collection_start_sec = os::elapsedTime();
   799   record_heap_size_info_at_start(true /* full */);
   800   // Release the future to-space so that it is available for compaction into.
   801   _g1->set_full_collection();
   802 }
   804 void G1CollectorPolicy::record_full_collection_end() {
   805   // Consider this like a collection pause for the purposes of allocation
   806   // since last pause.
   807   double end_sec = os::elapsedTime();
   808   double full_gc_time_sec = end_sec - _full_collection_start_sec;
   809   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   811   _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
   813   update_recent_gc_times(end_sec, full_gc_time_ms);
   815   _g1->clear_full_collection();
   817   // "Nuke" the heuristics that control the young/mixed GC
   818   // transitions and make sure we start with young GCs after the Full GC.
   819   set_gcs_are_young(true);
   820   _last_young_gc = false;
   821   clear_initiate_conc_mark_if_possible();
   822   clear_during_initial_mark_pause();
   823   _in_marking_window = false;
   824   _in_marking_window_im = false;
   826   _short_lived_surv_rate_group->start_adding_regions();
   827   // also call this on any additional surv rate groups
   829   record_survivor_regions(0, NULL, NULL);
   831   _free_regions_at_end_of_collection = _g1->num_free_regions();
   832   // Reset survivors SurvRateGroup.
   833   _survivor_surv_rate_group->reset();
   834   update_young_list_target_length();
   835   _collectionSetChooser->clear();
   836 }
   838 void G1CollectorPolicy::record_stop_world_start() {
   839   _stop_world_start = os::elapsedTime();
   840 }
   842 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
   843   // We only need to do this here as the policy will only be applied
   844   // to the GC we're about to start. so, no point is calculating this
   845   // every time we calculate / recalculate the target young length.
   846   update_survivors_policy();
   848   assert(_g1->used() == _g1->recalculate_used(),
   849          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   850                  _g1->used(), _g1->recalculate_used()));
   852   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   853   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
   854   _stop_world_start = 0.0;
   856   record_heap_size_info_at_start(false /* full */);
   858   phase_times()->record_cur_collection_start_sec(start_time_sec);
   859   _pending_cards = _g1->pending_card_num();
   861   _collection_set_bytes_used_before = 0;
   862   _bytes_copied_during_gc = 0;
   864   _last_gc_was_young = false;
   866   // do that for any other surv rate groups
   867   _short_lived_surv_rate_group->stop_adding_regions();
   868   _survivors_age_table.clear();
   870   assert( verify_young_ages(), "region age verification" );
   871 }
   873 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   874                                                    mark_init_elapsed_time_ms) {
   875   _during_marking = true;
   876   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   877   clear_during_initial_mark_pause();
   878   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   879 }
   881 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   882   _mark_remark_start_sec = os::elapsedTime();
   883   _during_marking = false;
   884 }
   886 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   887   double end_time_sec = os::elapsedTime();
   888   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   889   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   890   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   891   _prev_collection_pause_end_ms += elapsed_time_ms;
   893   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   894 }
   896 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   897   _mark_cleanup_start_sec = os::elapsedTime();
   898 }
   900 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   901   _last_young_gc = true;
   902   _in_marking_window = false;
   903 }
   905 void G1CollectorPolicy::record_concurrent_pause() {
   906   if (_stop_world_start > 0.0) {
   907     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   908     _trace_gen0_time_data.record_yield_time(yield_ms);
   909   }
   910 }
   912 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   913   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
   914     return false;
   915   }
   917   size_t marking_initiating_used_threshold =
   918     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   919   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   920   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   922   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
   923     if (gcs_are_young() && !_last_young_gc) {
   924       ergo_verbose5(ErgoConcCycles,
   925         "request concurrent cycle initiation",
   926         ergo_format_reason("occupancy higher than threshold")
   927         ergo_format_byte("occupancy")
   928         ergo_format_byte("allocation request")
   929         ergo_format_byte_perc("threshold")
   930         ergo_format_str("source"),
   931         cur_used_bytes,
   932         alloc_byte_size,
   933         marking_initiating_used_threshold,
   934         (double) InitiatingHeapOccupancyPercent,
   935         source);
   936       return true;
   937     } else {
   938       ergo_verbose5(ErgoConcCycles,
   939         "do not request concurrent cycle initiation",
   940         ergo_format_reason("still doing mixed collections")
   941         ergo_format_byte("occupancy")
   942         ergo_format_byte("allocation request")
   943         ergo_format_byte_perc("threshold")
   944         ergo_format_str("source"),
   945         cur_used_bytes,
   946         alloc_byte_size,
   947         marking_initiating_used_threshold,
   948         (double) InitiatingHeapOccupancyPercent,
   949         source);
   950     }
   951   }
   953   return false;
   954 }
   956 // Anything below that is considered to be zero
   957 #define MIN_TIMER_GRANULARITY 0.0000001
   959 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
   960   double end_time_sec = os::elapsedTime();
   961   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
   962          "otherwise, the subtraction below does not make sense");
   963   size_t rs_size =
   964             _cur_collection_pause_used_regions_at_start - cset_region_length();
   965   size_t cur_used_bytes = _g1->used();
   966   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
   967   bool last_pause_included_initial_mark = false;
   968   bool update_stats = !_g1->evacuation_failed();
   970 #ifndef PRODUCT
   971   if (G1YoungSurvRateVerbose) {
   972     gclog_or_tty->cr();
   973     _short_lived_surv_rate_group->print();
   974     // do that for any other surv rate groups too
   975   }
   976 #endif // PRODUCT
   978   last_pause_included_initial_mark = during_initial_mark_pause();
   979   if (last_pause_included_initial_mark) {
   980     record_concurrent_mark_init_end(0.0);
   981   } else if (need_to_start_conc_mark("end of GC")) {
   982     // Note: this might have already been set, if during the last
   983     // pause we decided to start a cycle but at the beginning of
   984     // this pause we decided to postpone it. That's OK.
   985     set_initiate_conc_mark_if_possible();
   986   }
   988   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
   989                           end_time_sec, false);
   991   evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
   992   evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
   994   if (update_stats) {
   995     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
   996     // this is where we update the allocation rate of the application
   997     double app_time_ms =
   998       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
   999     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1000       // This usually happens due to the timer not having the required
  1001       // granularity. Some Linuxes are the usual culprits.
  1002       // We'll just set it to something (arbitrarily) small.
  1003       app_time_ms = 1.0;
  1005     // We maintain the invariant that all objects allocated by mutator
  1006     // threads will be allocated out of eden regions. So, we can use
  1007     // the eden region number allocated since the previous GC to
  1008     // calculate the application's allocate rate. The only exception
  1009     // to that is humongous objects that are allocated separately. But
  1010     // given that humongous object allocations do not really affect
  1011     // either the pause's duration nor when the next pause will take
  1012     // place we can safely ignore them here.
  1013     uint regions_allocated = eden_cset_region_length();
  1014     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1015     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1017     double interval_ms =
  1018       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1019     update_recent_gc_times(end_time_sec, pause_time_ms);
  1020     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1021     if (recent_avg_pause_time_ratio() < 0.0 ||
  1022         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1023 #ifndef PRODUCT
  1024       // Dump info to allow post-facto debugging
  1025       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1026       gclog_or_tty->print_cr("-------------------------------------------");
  1027       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1028       _recent_gc_times_ms->dump();
  1029       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1030       _recent_prev_end_times_for_all_gcs_sec->dump();
  1031       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1032                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1033       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1034       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1035 #endif  // !PRODUCT
  1036       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1037       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1038       if (_recent_avg_pause_time_ratio < 0.0) {
  1039         _recent_avg_pause_time_ratio = 0.0;
  1040       } else {
  1041         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1042         _recent_avg_pause_time_ratio = 1.0;
  1047   bool new_in_marking_window = _in_marking_window;
  1048   bool new_in_marking_window_im = false;
  1049   if (last_pause_included_initial_mark) {
  1050     new_in_marking_window = true;
  1051     new_in_marking_window_im = true;
  1054   if (_last_young_gc) {
  1055     // This is supposed to to be the "last young GC" before we start
  1056     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
  1058     if (!last_pause_included_initial_mark) {
  1059       if (next_gc_should_be_mixed("start mixed GCs",
  1060                                   "do not start mixed GCs")) {
  1061         set_gcs_are_young(false);
  1063     } else {
  1064       ergo_verbose0(ErgoMixedGCs,
  1065                     "do not start mixed GCs",
  1066                     ergo_format_reason("concurrent cycle is about to start"));
  1068     _last_young_gc = false;
  1071   if (!_last_gc_was_young) {
  1072     // This is a mixed GC. Here we decide whether to continue doing
  1073     // mixed GCs or not.
  1075     if (!next_gc_should_be_mixed("continue mixed GCs",
  1076                                  "do not continue mixed GCs")) {
  1077       set_gcs_are_young(true);
  1081   _short_lived_surv_rate_group->start_adding_regions();
  1082   // do that for any other surv rate groupsx
  1084   if (update_stats) {
  1085     double cost_per_card_ms = 0.0;
  1086     if (_pending_cards > 0) {
  1087       cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
  1088       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1091     size_t cards_scanned = _g1->cards_scanned();
  1093     double cost_per_entry_ms = 0.0;
  1094     if (cards_scanned > 10) {
  1095       cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
  1096       if (_last_gc_was_young) {
  1097         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1098       } else {
  1099         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1103     if (_max_rs_lengths > 0) {
  1104       double cards_per_entry_ratio =
  1105         (double) cards_scanned / (double) _max_rs_lengths;
  1106       if (_last_gc_was_young) {
  1107         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1108       } else {
  1109         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1113     // This is defensive. For a while _max_rs_lengths could get
  1114     // smaller than _recorded_rs_lengths which was causing
  1115     // rs_length_diff to get very large and mess up the RSet length
  1116     // predictions. The reason was unsafe concurrent updates to the
  1117     // _inc_cset_recorded_rs_lengths field which the code below guards
  1118     // against (see CR 7118202). This bug has now been fixed (see CR
  1119     // 7119027). However, I'm still worried that
  1120     // _inc_cset_recorded_rs_lengths might still end up somewhat
  1121     // inaccurate. The concurrent refinement thread calculates an
  1122     // RSet's length concurrently with other CR threads updating it
  1123     // which might cause it to calculate the length incorrectly (if,
  1124     // say, it's in mid-coarsening). So I'll leave in the defensive
  1125     // conditional below just in case.
  1126     size_t rs_length_diff = 0;
  1127     if (_max_rs_lengths > _recorded_rs_lengths) {
  1128       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1130     _rs_length_diff_seq->add((double) rs_length_diff);
  1132     size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
  1133     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
  1134     double cost_per_byte_ms = 0.0;
  1136     if (copied_bytes > 0) {
  1137       cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
  1138       if (_in_marking_window) {
  1139         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1140       } else {
  1141         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1145     double all_other_time_ms = pause_time_ms -
  1146       (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
  1147       + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
  1149     double young_other_time_ms = 0.0;
  1150     if (young_cset_region_length() > 0) {
  1151       young_other_time_ms =
  1152         phase_times()->young_cset_choice_time_ms() +
  1153         phase_times()->young_free_cset_time_ms();
  1154       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1155                                           (double) young_cset_region_length());
  1157     double non_young_other_time_ms = 0.0;
  1158     if (old_cset_region_length() > 0) {
  1159       non_young_other_time_ms =
  1160         phase_times()->non_young_cset_choice_time_ms() +
  1161         phase_times()->non_young_free_cset_time_ms();
  1163       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1164                                             (double) old_cset_region_length());
  1167     double constant_other_time_ms = all_other_time_ms -
  1168       (young_other_time_ms + non_young_other_time_ms);
  1169     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1171     double survival_ratio = 0.0;
  1172     if (_collection_set_bytes_used_before > 0) {
  1173       survival_ratio = (double) _bytes_copied_during_gc /
  1174                                    (double) _collection_set_bytes_used_before;
  1177     _pending_cards_seq->add((double) _pending_cards);
  1178     _rs_lengths_seq->add((double) _max_rs_lengths);
  1181   _in_marking_window = new_in_marking_window;
  1182   _in_marking_window_im = new_in_marking_window_im;
  1183   _free_regions_at_end_of_collection = _g1->num_free_regions();
  1184   update_young_list_target_length();
  1186   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1187   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1188   adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
  1189                                phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
  1191   _collectionSetChooser->verify();
  1194 #define EXT_SIZE_FORMAT "%.1f%s"
  1195 #define EXT_SIZE_PARAMS(bytes)                                  \
  1196   byte_size_in_proper_unit((double)(bytes)),                    \
  1197   proper_unit_for_byte_size((bytes))
  1199 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
  1200   YoungList* young_list = _g1->young_list();
  1201   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
  1202   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
  1203   _heap_capacity_bytes_before_gc = _g1->capacity();
  1204   _heap_used_bytes_before_gc = _g1->used();
  1205   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
  1207   _eden_capacity_bytes_before_gc =
  1208          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
  1210   if (full) {
  1211     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
  1215 void G1CollectorPolicy::print_heap_transition() {
  1216   _g1->print_size_transition(gclog_or_tty,
  1217                              _heap_used_bytes_before_gc,
  1218                              _g1->used(),
  1219                              _g1->capacity());
  1222 void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
  1223   YoungList* young_list = _g1->young_list();
  1225   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
  1226   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
  1227   size_t heap_used_bytes_after_gc = _g1->used();
  1229   size_t heap_capacity_bytes_after_gc = _g1->capacity();
  1230   size_t eden_capacity_bytes_after_gc =
  1231     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
  1233   gclog_or_tty->print(
  1234     "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1235     "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1236     "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1237     EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1238     EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
  1239     EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
  1240     EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
  1241     EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
  1242     EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
  1243     EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
  1244     EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
  1245     EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
  1246     EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
  1247     EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
  1249   if (full) {
  1250     MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
  1253   gclog_or_tty->cr();
  1256 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1257                                                      double update_rs_processed_buffers,
  1258                                                      double goal_ms) {
  1259   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1260   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1262   if (G1UseAdaptiveConcRefinement) {
  1263     const int k_gy = 3, k_gr = 6;
  1264     const double inc_k = 1.1, dec_k = 0.9;
  1266     int g = cg1r->green_zone();
  1267     if (update_rs_time > goal_ms) {
  1268       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1269     } else {
  1270       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1271         g = (int)MAX2(g * inc_k, g + 1.0);
  1274     // Change the refinement threads params
  1275     cg1r->set_green_zone(g);
  1276     cg1r->set_yellow_zone(g * k_gy);
  1277     cg1r->set_red_zone(g * k_gr);
  1278     cg1r->reinitialize_threads();
  1280     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1281     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1282                                     cg1r->yellow_zone());
  1283     // Change the barrier params
  1284     dcqs.set_process_completed_threshold(processing_threshold);
  1285     dcqs.set_max_completed_queue(cg1r->red_zone());
  1288   int curr_queue_size = dcqs.completed_buffers_num();
  1289   if (curr_queue_size >= cg1r->yellow_zone()) {
  1290     dcqs.set_completed_queue_padding(curr_queue_size);
  1291   } else {
  1292     dcqs.set_completed_queue_padding(0);
  1294   dcqs.notify_if_necessary();
  1297 double
  1298 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1299                                                 size_t scanned_cards) {
  1300   return
  1301     predict_rs_update_time_ms(pending_cards) +
  1302     predict_rs_scan_time_ms(scanned_cards) +
  1303     predict_constant_other_time_ms();
  1306 double
  1307 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1308   size_t rs_length = predict_rs_length_diff();
  1309   size_t card_num;
  1310   if (gcs_are_young()) {
  1311     card_num = predict_young_card_num(rs_length);
  1312   } else {
  1313     card_num = predict_non_young_card_num(rs_length);
  1315   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1318 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1319   size_t bytes_to_copy;
  1320   if (hr->is_marked())
  1321     bytes_to_copy = hr->max_live_bytes();
  1322   else {
  1323     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
  1324     int age = hr->age_in_surv_rate_group();
  1325     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1326     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1328   return bytes_to_copy;
  1331 double
  1332 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1333                                                   bool for_young_gc) {
  1334   size_t rs_length = hr->rem_set()->occupied();
  1335   size_t card_num;
  1337   // Predicting the number of cards is based on which type of GC
  1338   // we're predicting for.
  1339   if (for_young_gc) {
  1340     card_num = predict_young_card_num(rs_length);
  1341   } else {
  1342     card_num = predict_non_young_card_num(rs_length);
  1344   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1346   double region_elapsed_time_ms =
  1347     predict_rs_scan_time_ms(card_num) +
  1348     predict_object_copy_time_ms(bytes_to_copy);
  1350   // The prediction of the "other" time for this region is based
  1351   // upon the region type and NOT the GC type.
  1352   if (hr->is_young()) {
  1353     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1354   } else {
  1355     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1357   return region_elapsed_time_ms;
  1360 void
  1361 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
  1362                                             uint survivor_cset_region_length) {
  1363   _eden_cset_region_length     = eden_cset_region_length;
  1364   _survivor_cset_region_length = survivor_cset_region_length;
  1365   _old_cset_region_length      = 0;
  1368 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1369   _recorded_rs_lengths = rs_lengths;
  1372 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1373                                                double elapsed_ms) {
  1374   _recent_gc_times_ms->add(elapsed_ms);
  1375   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1376   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1379 size_t G1CollectorPolicy::expansion_amount() {
  1380   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1381   double threshold = _gc_overhead_perc;
  1382   if (recent_gc_overhead > threshold) {
  1383     // We will double the existing space, or take
  1384     // G1ExpandByPercentOfAvailable % of the available expansion
  1385     // space, whichever is smaller, bounded below by a minimum
  1386     // expansion (unless that's all that's left.)
  1387     const size_t min_expand_bytes = 1*M;
  1388     size_t reserved_bytes = _g1->max_capacity();
  1389     size_t committed_bytes = _g1->capacity();
  1390     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1391     size_t expand_bytes;
  1392     size_t expand_bytes_via_pct =
  1393       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1394     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1395     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1396     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1398     ergo_verbose5(ErgoHeapSizing,
  1399                   "attempt heap expansion",
  1400                   ergo_format_reason("recent GC overhead higher than "
  1401                                      "threshold after GC")
  1402                   ergo_format_perc("recent GC overhead")
  1403                   ergo_format_perc("threshold")
  1404                   ergo_format_byte("uncommitted")
  1405                   ergo_format_byte_perc("calculated expansion amount"),
  1406                   recent_gc_overhead, threshold,
  1407                   uncommitted_bytes,
  1408                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1410     return expand_bytes;
  1411   } else {
  1412     return 0;
  1416 void G1CollectorPolicy::print_tracing_info() const {
  1417   _trace_gen0_time_data.print();
  1418   _trace_gen1_time_data.print();
  1421 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  1422 #ifndef PRODUCT
  1423   _short_lived_surv_rate_group->print_surv_rate_summary();
  1424   // add this call for any other surv rate groups
  1425 #endif // PRODUCT
  1428 uint G1CollectorPolicy::max_regions(int purpose) {
  1429   switch (purpose) {
  1430     case GCAllocForSurvived:
  1431       return _max_survivor_regions;
  1432     case GCAllocForTenured:
  1433       return REGIONS_UNLIMITED;
  1434     default:
  1435       ShouldNotReachHere();
  1436       return REGIONS_UNLIMITED;
  1437   };
  1440 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  1441   uint expansion_region_num = 0;
  1442   if (GCLockerEdenExpansionPercent > 0) {
  1443     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  1444     double expansion_region_num_d = perc * (double) _young_list_target_length;
  1445     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  1446     // less than 1.0) we'll get 1.
  1447     expansion_region_num = (uint) ceil(expansion_region_num_d);
  1448   } else {
  1449     assert(expansion_region_num == 0, "sanity");
  1451   _young_list_max_length = _young_list_target_length + expansion_region_num;
  1452   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  1455 // Calculates survivor space parameters.
  1456 void G1CollectorPolicy::update_survivors_policy() {
  1457   double max_survivor_regions_d =
  1458                  (double) _young_list_target_length / (double) SurvivorRatio;
  1459   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  1460   // smaller than 1.0) we'll get 1.
  1461   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
  1463   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  1464         HeapRegion::GrainWords * _max_survivor_regions);
  1467 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  1468                                                      GCCause::Cause gc_cause) {
  1469   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1470   if (!during_cycle) {
  1471     ergo_verbose1(ErgoConcCycles,
  1472                   "request concurrent cycle initiation",
  1473                   ergo_format_reason("requested by GC cause")
  1474                   ergo_format_str("GC cause"),
  1475                   GCCause::to_string(gc_cause));
  1476     set_initiate_conc_mark_if_possible();
  1477     return true;
  1478   } else {
  1479     ergo_verbose1(ErgoConcCycles,
  1480                   "do not request concurrent cycle initiation",
  1481                   ergo_format_reason("concurrent cycle already in progress")
  1482                   ergo_format_str("GC cause"),
  1483                   GCCause::to_string(gc_cause));
  1484     return false;
  1488 void
  1489 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  1490   // We are about to decide on whether this pause will be an
  1491   // initial-mark pause.
  1493   // First, during_initial_mark_pause() should not be already set. We
  1494   // will set it here if we have to. However, it should be cleared by
  1495   // the end of the pause (it's only set for the duration of an
  1496   // initial-mark pause).
  1497   assert(!during_initial_mark_pause(), "pre-condition");
  1499   if (initiate_conc_mark_if_possible()) {
  1500     // We had noticed on a previous pause that the heap occupancy has
  1501     // gone over the initiating threshold and we should start a
  1502     // concurrent marking cycle. So we might initiate one.
  1504     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1505     if (!during_cycle) {
  1506       // The concurrent marking thread is not "during a cycle", i.e.,
  1507       // it has completed the last one. So we can go ahead and
  1508       // initiate a new cycle.
  1510       set_during_initial_mark_pause();
  1511       // We do not allow mixed GCs during marking.
  1512       if (!gcs_are_young()) {
  1513         set_gcs_are_young(true);
  1514         ergo_verbose0(ErgoMixedGCs,
  1515                       "end mixed GCs",
  1516                       ergo_format_reason("concurrent cycle is about to start"));
  1519       // And we can now clear initiate_conc_mark_if_possible() as
  1520       // we've already acted on it.
  1521       clear_initiate_conc_mark_if_possible();
  1523       ergo_verbose0(ErgoConcCycles,
  1524                   "initiate concurrent cycle",
  1525                   ergo_format_reason("concurrent cycle initiation requested"));
  1526     } else {
  1527       // The concurrent marking thread is still finishing up the
  1528       // previous cycle. If we start one right now the two cycles
  1529       // overlap. In particular, the concurrent marking thread might
  1530       // be in the process of clearing the next marking bitmap (which
  1531       // we will use for the next cycle if we start one). Starting a
  1532       // cycle now will be bad given that parts of the marking
  1533       // information might get cleared by the marking thread. And we
  1534       // cannot wait for the marking thread to finish the cycle as it
  1535       // periodically yields while clearing the next marking bitmap
  1536       // and, if it's in a yield point, it's waiting for us to
  1537       // finish. So, at this point we will not start a cycle and we'll
  1538       // let the concurrent marking thread complete the last one.
  1539       ergo_verbose0(ErgoConcCycles,
  1540                     "do not initiate concurrent cycle",
  1541                     ergo_format_reason("concurrent cycle already in progress"));
  1546 class KnownGarbageClosure: public HeapRegionClosure {
  1547   G1CollectedHeap* _g1h;
  1548   CollectionSetChooser* _hrSorted;
  1550 public:
  1551   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  1552     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
  1554   bool doHeapRegion(HeapRegion* r) {
  1555     // We only include humongous regions in collection
  1556     // sets when concurrent mark shows that their contained object is
  1557     // unreachable.
  1559     // Do we have any marking information for this region?
  1560     if (r->is_marked()) {
  1561       // We will skip any region that's currently used as an old GC
  1562       // alloc region (we should not consider those for collection
  1563       // before we fill them up).
  1564       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1565         _hrSorted->add_region(r);
  1568     return false;
  1570 };
  1572 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  1573   G1CollectedHeap* _g1h;
  1574   CSetChooserParUpdater _cset_updater;
  1576 public:
  1577   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  1578                            uint chunk_size) :
  1579     _g1h(G1CollectedHeap::heap()),
  1580     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
  1582   bool doHeapRegion(HeapRegion* r) {
  1583     // Do we have any marking information for this region?
  1584     if (r->is_marked()) {
  1585       // We will skip any region that's currently used as an old GC
  1586       // alloc region (we should not consider those for collection
  1587       // before we fill them up).
  1588       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1589         _cset_updater.add_region(r);
  1592     return false;
  1594 };
  1596 class ParKnownGarbageTask: public AbstractGangTask {
  1597   CollectionSetChooser* _hrSorted;
  1598   uint _chunk_size;
  1599   G1CollectedHeap* _g1;
  1600 public:
  1601   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
  1602     AbstractGangTask("ParKnownGarbageTask"),
  1603     _hrSorted(hrSorted), _chunk_size(chunk_size),
  1604     _g1(G1CollectedHeap::heap()) { }
  1606   void work(uint worker_id) {
  1607     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
  1609     // Back to zero for the claim value.
  1610     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  1611                                          _g1->workers()->active_workers(),
  1612                                          HeapRegion::InitialClaimValue);
  1614 };
  1616 void
  1617 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  1618   _collectionSetChooser->clear();
  1620   uint region_num = _g1->num_regions();
  1621   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1622     const uint OverpartitionFactor = 4;
  1623     uint WorkUnit;
  1624     // The use of MinChunkSize = 8 in the original code
  1625     // causes some assertion failures when the total number of
  1626     // region is less than 8.  The code here tries to fix that.
  1627     // Should the original code also be fixed?
  1628     if (no_of_gc_threads > 0) {
  1629       const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
  1630       WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
  1631                       MinWorkUnit);
  1632     } else {
  1633       assert(no_of_gc_threads > 0,
  1634         "The active gc workers should be greater than 0");
  1635       // In a product build do something reasonable to avoid a crash.
  1636       const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
  1637       WorkUnit =
  1638         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
  1639              MinWorkUnit);
  1641     _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
  1642                                                            WorkUnit);
  1643     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  1644                                             (int) WorkUnit);
  1645     _g1->workers()->run_task(&parKnownGarbageTask);
  1647     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  1648            "sanity check");
  1649   } else {
  1650     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  1651     _g1->heap_region_iterate(&knownGarbagecl);
  1654   _collectionSetChooser->sort_regions();
  1656   double end_sec = os::elapsedTime();
  1657   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1658   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1659   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1660   _prev_collection_pause_end_ms += elapsed_time_ms;
  1661   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  1664 // Add the heap region at the head of the non-incremental collection set
  1665 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  1666   assert(_inc_cset_build_state == Active, "Precondition");
  1667   assert(hr->is_old(), "the region should be old");
  1669   assert(!hr->in_collection_set(), "should not already be in the CSet");
  1670   hr->set_in_collection_set(true);
  1671   hr->set_next_in_collection_set(_collection_set);
  1672   _collection_set = hr;
  1673   _collection_set_bytes_used_before += hr->used();
  1674   _g1->register_region_with_in_cset_fast_test(hr);
  1675   size_t rs_length = hr->rem_set()->occupied();
  1676   _recorded_rs_lengths += rs_length;
  1677   _old_cset_region_length += 1;
  1680 // Initialize the per-collection-set information
  1681 void G1CollectorPolicy::start_incremental_cset_building() {
  1682   assert(_inc_cset_build_state == Inactive, "Precondition");
  1684   _inc_cset_head = NULL;
  1685   _inc_cset_tail = NULL;
  1686   _inc_cset_bytes_used_before = 0;
  1688   _inc_cset_max_finger = 0;
  1689   _inc_cset_recorded_rs_lengths = 0;
  1690   _inc_cset_recorded_rs_lengths_diffs = 0;
  1691   _inc_cset_predicted_elapsed_time_ms = 0.0;
  1692   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1693   _inc_cset_build_state = Active;
  1696 void G1CollectorPolicy::finalize_incremental_cset_building() {
  1697   assert(_inc_cset_build_state == Active, "Precondition");
  1698   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  1700   // The two "main" fields, _inc_cset_recorded_rs_lengths and
  1701   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  1702   // that adds a new region to the CSet. Further updates by the
  1703   // concurrent refinement thread that samples the young RSet lengths
  1704   // are accumulated in the *_diffs fields. Here we add the diffs to
  1705   // the "main" fields.
  1707   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  1708     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  1709   } else {
  1710     // This is defensive. The diff should in theory be always positive
  1711     // as RSets can only grow between GCs. However, given that we
  1712     // sample their size concurrently with other threads updating them
  1713     // it's possible that we might get the wrong size back, which
  1714     // could make the calculations somewhat inaccurate.
  1715     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  1716     if (_inc_cset_recorded_rs_lengths >= diffs) {
  1717       _inc_cset_recorded_rs_lengths -= diffs;
  1718     } else {
  1719       _inc_cset_recorded_rs_lengths = 0;
  1722   _inc_cset_predicted_elapsed_time_ms +=
  1723                                      _inc_cset_predicted_elapsed_time_ms_diffs;
  1725   _inc_cset_recorded_rs_lengths_diffs = 0;
  1726   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1729 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  1730   // This routine is used when:
  1731   // * adding survivor regions to the incremental cset at the end of an
  1732   //   evacuation pause,
  1733   // * adding the current allocation region to the incremental cset
  1734   //   when it is retired, and
  1735   // * updating existing policy information for a region in the
  1736   //   incremental cset via young list RSet sampling.
  1737   // Therefore this routine may be called at a safepoint by the
  1738   // VM thread, or in-between safepoints by mutator threads (when
  1739   // retiring the current allocation region) or a concurrent
  1740   // refine thread (RSet sampling).
  1742   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1743   size_t used_bytes = hr->used();
  1744   _inc_cset_recorded_rs_lengths += rs_length;
  1745   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  1746   _inc_cset_bytes_used_before += used_bytes;
  1748   // Cache the values we have added to the aggregated informtion
  1749   // in the heap region in case we have to remove this region from
  1750   // the incremental collection set, or it is updated by the
  1751   // rset sampling code
  1752   hr->set_recorded_rs_length(rs_length);
  1753   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  1756 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  1757                                                      size_t new_rs_length) {
  1758   // Update the CSet information that is dependent on the new RS length
  1759   assert(hr->is_young(), "Precondition");
  1760   assert(!SafepointSynchronize::is_at_safepoint(),
  1761                                                "should not be at a safepoint");
  1763   // We could have updated _inc_cset_recorded_rs_lengths and
  1764   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  1765   // that atomically, as this code is executed by a concurrent
  1766   // refinement thread, potentially concurrently with a mutator thread
  1767   // allocating a new region and also updating the same fields. To
  1768   // avoid the atomic operations we accumulate these updates on two
  1769   // separate fields (*_diffs) and we'll just add them to the "main"
  1770   // fields at the start of a GC.
  1772   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  1773   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  1774   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  1776   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  1777   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1778   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  1779   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  1781   hr->set_recorded_rs_length(new_rs_length);
  1782   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  1785 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  1786   assert(hr->is_young(), "invariant");
  1787   assert(hr->young_index_in_cset() > -1, "should have already been set");
  1788   assert(_inc_cset_build_state == Active, "Precondition");
  1790   // We need to clear and set the cached recorded/cached collection set
  1791   // information in the heap region here (before the region gets added
  1792   // to the collection set). An individual heap region's cached values
  1793   // are calculated, aggregated with the policy collection set info,
  1794   // and cached in the heap region here (initially) and (subsequently)
  1795   // by the Young List sampling code.
  1797   size_t rs_length = hr->rem_set()->occupied();
  1798   add_to_incremental_cset_info(hr, rs_length);
  1800   HeapWord* hr_end = hr->end();
  1801   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  1803   assert(!hr->in_collection_set(), "invariant");
  1804   hr->set_in_collection_set(true);
  1805   assert( hr->next_in_collection_set() == NULL, "invariant");
  1807   _g1->register_region_with_in_cset_fast_test(hr);
  1810 // Add the region at the RHS of the incremental cset
  1811 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  1812   // We should only ever be appending survivors at the end of a pause
  1813   assert(hr->is_survivor(), "Logic");
  1815   // Do the 'common' stuff
  1816   add_region_to_incremental_cset_common(hr);
  1818   // Now add the region at the right hand side
  1819   if (_inc_cset_tail == NULL) {
  1820     assert(_inc_cset_head == NULL, "invariant");
  1821     _inc_cset_head = hr;
  1822   } else {
  1823     _inc_cset_tail->set_next_in_collection_set(hr);
  1825   _inc_cset_tail = hr;
  1828 // Add the region to the LHS of the incremental cset
  1829 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  1830   // Survivors should be added to the RHS at the end of a pause
  1831   assert(hr->is_eden(), "Logic");
  1833   // Do the 'common' stuff
  1834   add_region_to_incremental_cset_common(hr);
  1836   // Add the region at the left hand side
  1837   hr->set_next_in_collection_set(_inc_cset_head);
  1838   if (_inc_cset_head == NULL) {
  1839     assert(_inc_cset_tail == NULL, "Invariant");
  1840     _inc_cset_tail = hr;
  1842   _inc_cset_head = hr;
  1845 #ifndef PRODUCT
  1846 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  1847   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  1849   st->print_cr("\nCollection_set:");
  1850   HeapRegion* csr = list_head;
  1851   while (csr != NULL) {
  1852     HeapRegion* next = csr->next_in_collection_set();
  1853     assert(csr->in_collection_set(), "bad CS");
  1854     st->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
  1855                  HR_FORMAT_PARAMS(csr),
  1856                  csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
  1857                  csr->age_in_surv_rate_group_cond());
  1858     csr = next;
  1861 #endif // !PRODUCT
  1863 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
  1864   // Returns the given amount of reclaimable bytes (that represents
  1865   // the amount of reclaimable space still to be collected) as a
  1866   // percentage of the current heap capacity.
  1867   size_t capacity_bytes = _g1->capacity();
  1868   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  1871 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  1872                                                 const char* false_action_str) {
  1873   CollectionSetChooser* cset_chooser = _collectionSetChooser;
  1874   if (cset_chooser->is_empty()) {
  1875     ergo_verbose0(ErgoMixedGCs,
  1876                   false_action_str,
  1877                   ergo_format_reason("candidate old regions not available"));
  1878     return false;
  1881   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
  1882   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  1883   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  1884   double threshold = (double) G1HeapWastePercent;
  1885   if (reclaimable_perc <= threshold) {
  1886     ergo_verbose4(ErgoMixedGCs,
  1887               false_action_str,
  1888               ergo_format_reason("reclaimable percentage not over threshold")
  1889               ergo_format_region("candidate old regions")
  1890               ergo_format_byte_perc("reclaimable")
  1891               ergo_format_perc("threshold"),
  1892               cset_chooser->remaining_regions(),
  1893               reclaimable_bytes,
  1894               reclaimable_perc, threshold);
  1895     return false;
  1898   ergo_verbose4(ErgoMixedGCs,
  1899                 true_action_str,
  1900                 ergo_format_reason("candidate old regions available")
  1901                 ergo_format_region("candidate old regions")
  1902                 ergo_format_byte_perc("reclaimable")
  1903                 ergo_format_perc("threshold"),
  1904                 cset_chooser->remaining_regions(),
  1905                 reclaimable_bytes,
  1906                 reclaimable_perc, threshold);
  1907   return true;
  1910 uint G1CollectorPolicy::calc_min_old_cset_length() {
  1911   // The min old CSet region bound is based on the maximum desired
  1912   // number of mixed GCs after a cycle. I.e., even if some old regions
  1913   // look expensive, we should add them to the CSet anyway to make
  1914   // sure we go through the available old regions in no more than the
  1915   // maximum desired number of mixed GCs.
  1916   //
  1917   // The calculation is based on the number of marked regions we added
  1918   // to the CSet chooser in the first place, not how many remain, so
  1919   // that the result is the same during all mixed GCs that follow a cycle.
  1921   const size_t region_num = (size_t) _collectionSetChooser->length();
  1922   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
  1923   size_t result = region_num / gc_num;
  1924   // emulate ceiling
  1925   if (result * gc_num < region_num) {
  1926     result += 1;
  1928   return (uint) result;
  1931 uint G1CollectorPolicy::calc_max_old_cset_length() {
  1932   // The max old CSet region bound is based on the threshold expressed
  1933   // as a percentage of the heap size. I.e., it should bound the
  1934   // number of old regions added to the CSet irrespective of how many
  1935   // of them are available.
  1937   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1938   const size_t region_num = g1h->num_regions();
  1939   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
  1940   size_t result = region_num * perc / 100;
  1941   // emulate ceiling
  1942   if (100 * result < region_num * perc) {
  1943     result += 1;
  1945   return (uint) result;
  1949 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
  1950   double young_start_time_sec = os::elapsedTime();
  1952   YoungList* young_list = _g1->young_list();
  1953   finalize_incremental_cset_building();
  1955   guarantee(target_pause_time_ms > 0.0,
  1956             err_msg("target_pause_time_ms = %1.6lf should be positive",
  1957                     target_pause_time_ms));
  1958   guarantee(_collection_set == NULL, "Precondition");
  1960   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  1961   double predicted_pause_time_ms = base_time_ms;
  1962   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
  1964   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
  1965                 "start choosing CSet",
  1966                 ergo_format_size("_pending_cards")
  1967                 ergo_format_ms("predicted base time")
  1968                 ergo_format_ms("remaining time")
  1969                 ergo_format_ms("target pause time"),
  1970                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
  1972   _last_gc_was_young = gcs_are_young() ? true : false;
  1974   if (_last_gc_was_young) {
  1975     _trace_gen0_time_data.increment_young_collection_count();
  1976   } else {
  1977     _trace_gen0_time_data.increment_mixed_collection_count();
  1980   // The young list is laid with the survivor regions from the previous
  1981   // pause are appended to the RHS of the young list, i.e.
  1982   //   [Newly Young Regions ++ Survivors from last pause].
  1984   uint survivor_region_length = young_list->survivor_length();
  1985   uint eden_region_length = young_list->length() - survivor_region_length;
  1986   init_cset_region_lengths(eden_region_length, survivor_region_length);
  1988   HeapRegion* hr = young_list->first_survivor_region();
  1989   while (hr != NULL) {
  1990     assert(hr->is_survivor(), "badly formed young list");
  1991     // There is a convention that all the young regions in the CSet
  1992     // are tagged as "eden", so we do this for the survivors here. We
  1993     // use the special set_eden_pre_gc() as it doesn't check that the
  1994     // region is free (which is not the case here).
  1995     hr->set_eden_pre_gc();
  1996     hr = hr->get_next_young_region();
  1999   // Clear the fields that point to the survivor list - they are all young now.
  2000   young_list->clear_survivors();
  2002   _collection_set = _inc_cset_head;
  2003   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2004   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
  2005   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2007   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2008                 "add young regions to CSet",
  2009                 ergo_format_region("eden")
  2010                 ergo_format_region("survivors")
  2011                 ergo_format_ms("predicted young region time"),
  2012                 eden_region_length, survivor_region_length,
  2013                 _inc_cset_predicted_elapsed_time_ms);
  2015   // The number of recorded young regions is the incremental
  2016   // collection set's current size
  2017   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2019   double young_end_time_sec = os::elapsedTime();
  2020   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
  2022   // Set the start of the non-young choice time.
  2023   double non_young_start_time_sec = young_end_time_sec;
  2025   if (!gcs_are_young()) {
  2026     CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2027     cset_chooser->verify();
  2028     const uint min_old_cset_length = calc_min_old_cset_length();
  2029     const uint max_old_cset_length = calc_max_old_cset_length();
  2031     uint expensive_region_num = 0;
  2032     bool check_time_remaining = adaptive_young_list_length();
  2034     HeapRegion* hr = cset_chooser->peek();
  2035     while (hr != NULL) {
  2036       if (old_cset_region_length() >= max_old_cset_length) {
  2037         // Added maximum number of old regions to the CSet.
  2038         ergo_verbose2(ErgoCSetConstruction,
  2039                       "finish adding old regions to CSet",
  2040                       ergo_format_reason("old CSet region num reached max")
  2041                       ergo_format_region("old")
  2042                       ergo_format_region("max"),
  2043                       old_cset_region_length(), max_old_cset_length);
  2044         break;
  2048       // Stop adding regions if the remaining reclaimable space is
  2049       // not above G1HeapWastePercent.
  2050       size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  2051       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  2052       double threshold = (double) G1HeapWastePercent;
  2053       if (reclaimable_perc <= threshold) {
  2054         // We've added enough old regions that the amount of uncollected
  2055         // reclaimable space is at or below the waste threshold. Stop
  2056         // adding old regions to the CSet.
  2057         ergo_verbose5(ErgoCSetConstruction,
  2058                       "finish adding old regions to CSet",
  2059                       ergo_format_reason("reclaimable percentage not over threshold")
  2060                       ergo_format_region("old")
  2061                       ergo_format_region("max")
  2062                       ergo_format_byte_perc("reclaimable")
  2063                       ergo_format_perc("threshold"),
  2064                       old_cset_region_length(),
  2065                       max_old_cset_length,
  2066                       reclaimable_bytes,
  2067                       reclaimable_perc, threshold);
  2068         break;
  2071       double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  2072       if (check_time_remaining) {
  2073         if (predicted_time_ms > time_remaining_ms) {
  2074           // Too expensive for the current CSet.
  2076           if (old_cset_region_length() >= min_old_cset_length) {
  2077             // We have added the minimum number of old regions to the CSet,
  2078             // we are done with this CSet.
  2079             ergo_verbose4(ErgoCSetConstruction,
  2080                           "finish adding old regions to CSet",
  2081                           ergo_format_reason("predicted time is too high")
  2082                           ergo_format_ms("predicted time")
  2083                           ergo_format_ms("remaining time")
  2084                           ergo_format_region("old")
  2085                           ergo_format_region("min"),
  2086                           predicted_time_ms, time_remaining_ms,
  2087                           old_cset_region_length(), min_old_cset_length);
  2088             break;
  2091           // We'll add it anyway given that we haven't reached the
  2092           // minimum number of old regions.
  2093           expensive_region_num += 1;
  2095       } else {
  2096         if (old_cset_region_length() >= min_old_cset_length) {
  2097           // In the non-auto-tuning case, we'll finish adding regions
  2098           // to the CSet if we reach the minimum.
  2099           ergo_verbose2(ErgoCSetConstruction,
  2100                         "finish adding old regions to CSet",
  2101                         ergo_format_reason("old CSet region num reached min")
  2102                         ergo_format_region("old")
  2103                         ergo_format_region("min"),
  2104                         old_cset_region_length(), min_old_cset_length);
  2105           break;
  2109       // We will add this region to the CSet.
  2110       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
  2111       predicted_pause_time_ms += predicted_time_ms;
  2112       cset_chooser->remove_and_move_to_next(hr);
  2113       _g1->old_set_remove(hr);
  2114       add_old_region_to_cset(hr);
  2116       hr = cset_chooser->peek();
  2118     if (hr == NULL) {
  2119       ergo_verbose0(ErgoCSetConstruction,
  2120                     "finish adding old regions to CSet",
  2121                     ergo_format_reason("candidate old regions not available"));
  2124     if (expensive_region_num > 0) {
  2125       // We print the information once here at the end, predicated on
  2126       // whether we added any apparently expensive regions or not, to
  2127       // avoid generating output per region.
  2128       ergo_verbose4(ErgoCSetConstruction,
  2129                     "added expensive regions to CSet",
  2130                     ergo_format_reason("old CSet region num not reached min")
  2131                     ergo_format_region("old")
  2132                     ergo_format_region("expensive")
  2133                     ergo_format_region("min")
  2134                     ergo_format_ms("remaining time"),
  2135                     old_cset_region_length(),
  2136                     expensive_region_num,
  2137                     min_old_cset_length,
  2138                     time_remaining_ms);
  2141     cset_chooser->verify();
  2144   stop_incremental_cset_building();
  2146   ergo_verbose5(ErgoCSetConstruction,
  2147                 "finish choosing CSet",
  2148                 ergo_format_region("eden")
  2149                 ergo_format_region("survivors")
  2150                 ergo_format_region("old")
  2151                 ergo_format_ms("predicted pause time")
  2152                 ergo_format_ms("target pause time"),
  2153                 eden_region_length, survivor_region_length,
  2154                 old_cset_region_length(),
  2155                 predicted_pause_time_ms, target_pause_time_ms);
  2157   double non_young_end_time_sec = os::elapsedTime();
  2158   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
  2159   evacuation_info.set_collectionset_regions(cset_region_length());
  2162 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
  2163   if(TraceGen0Time) {
  2164     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
  2168 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
  2169   if(TraceGen0Time) {
  2170     _all_yield_times_ms.add(yield_time_ms);
  2174 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
  2175   if(TraceGen0Time) {
  2176     _total.add(pause_time_ms);
  2177     _other.add(pause_time_ms - phase_times->accounted_time_ms());
  2178     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
  2179     _parallel.add(phase_times->cur_collection_par_time_ms());
  2180     _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
  2181     _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
  2182     _update_rs.add(phase_times->average_last_update_rs_time());
  2183     _scan_rs.add(phase_times->average_last_scan_rs_time());
  2184     _obj_copy.add(phase_times->average_last_obj_copy_time());
  2185     _termination.add(phase_times->average_last_termination_time());
  2187     double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
  2188       phase_times->average_last_satb_filtering_times_ms() +
  2189       phase_times->average_last_update_rs_time() +
  2190       phase_times->average_last_scan_rs_time() +
  2191       phase_times->average_last_obj_copy_time() +
  2192       + phase_times->average_last_termination_time();
  2194     double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
  2195     _parallel_other.add(parallel_other_time);
  2196     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
  2200 void TraceGen0TimeData::increment_young_collection_count() {
  2201   if(TraceGen0Time) {
  2202     ++_young_pause_num;
  2206 void TraceGen0TimeData::increment_mixed_collection_count() {
  2207   if(TraceGen0Time) {
  2208     ++_mixed_pause_num;
  2212 void TraceGen0TimeData::print_summary(const char* str,
  2213                                       const NumberSeq* seq) const {
  2214   double sum = seq->sum();
  2215   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
  2216                 str, sum / 1000.0, seq->avg());
  2219 void TraceGen0TimeData::print_summary_sd(const char* str,
  2220                                          const NumberSeq* seq) const {
  2221   print_summary(str, seq);
  2222   gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2223                 "(num", seq->num(), seq->sd(), seq->maximum());
  2226 void TraceGen0TimeData::print() const {
  2227   if (!TraceGen0Time) {
  2228     return;
  2231   gclog_or_tty->print_cr("ALL PAUSES");
  2232   print_summary_sd("   Total", &_total);
  2233   gclog_or_tty->cr();
  2234   gclog_or_tty->cr();
  2235   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  2236   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  2237   gclog_or_tty->cr();
  2239   gclog_or_tty->print_cr("EVACUATION PAUSES");
  2241   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
  2242     gclog_or_tty->print_cr("none");
  2243   } else {
  2244     print_summary_sd("   Evacuation Pauses", &_total);
  2245     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
  2246     print_summary("      Parallel Time", &_parallel);
  2247     print_summary("         Ext Root Scanning", &_ext_root_scan);
  2248     print_summary("         SATB Filtering", &_satb_filtering);
  2249     print_summary("         Update RS", &_update_rs);
  2250     print_summary("         Scan RS", &_scan_rs);
  2251     print_summary("         Object Copy", &_obj_copy);
  2252     print_summary("         Termination", &_termination);
  2253     print_summary("         Parallel Other", &_parallel_other);
  2254     print_summary("      Clear CT", &_clear_ct);
  2255     print_summary("      Other", &_other);
  2257   gclog_or_tty->cr();
  2259   gclog_or_tty->print_cr("MISC");
  2260   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
  2261   print_summary_sd("   Yields", &_all_yield_times_ms);
  2264 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
  2265   if (TraceGen1Time) {
  2266     _all_full_gc_times.add(full_gc_time_ms);
  2270 void TraceGen1TimeData::print() const {
  2271   if (!TraceGen1Time) {
  2272     return;
  2275   if (_all_full_gc_times.num() > 0) {
  2276     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2277       _all_full_gc_times.num(),
  2278       _all_full_gc_times.sum() / 1000.0);
  2279     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
  2280     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2281       _all_full_gc_times.sd(),
  2282       _all_full_gc_times.maximum());

mercurial