src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Mon, 24 Mar 2014 15:30:14 +0100

author
tschatzl
date
Mon, 24 Mar 2014 15:30:14 +0100
changeset 6402
191174b49bec
parent 6085
8f07aa079343
child 6609
270d7cb38f40
permissions
-rw-r--r--

8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by: mgerdin, brutisso

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    33 #include "gc_implementation/g1/g1Log.hpp"
    34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    35 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    36 #include "runtime/arguments.hpp"
    37 #include "runtime/java.hpp"
    38 #include "runtime/mutexLocker.hpp"
    39 #include "utilities/debug.hpp"
    41 // Different defaults for different number of GC threads
    42 // They were chosen by running GCOld and SPECjbb on debris with different
    43 //   numbers of GC threads and choosing them based on the results
    45 // all the same
    46 static double rs_length_diff_defaults[] = {
    47   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    48 };
    50 static double cost_per_card_ms_defaults[] = {
    51   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    52 };
    54 // all the same
    55 static double young_cards_per_entry_ratio_defaults[] = {
    56   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    57 };
    59 static double cost_per_entry_ms_defaults[] = {
    60   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    61 };
    63 static double cost_per_byte_ms_defaults[] = {
    64   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    65 };
    67 // these should be pretty consistent
    68 static double constant_other_time_ms_defaults[] = {
    69   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    70 };
    73 static double young_other_cost_per_region_ms_defaults[] = {
    74   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    75 };
    77 static double non_young_other_cost_per_region_ms_defaults[] = {
    78   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    79 };
    81 G1CollectorPolicy::G1CollectorPolicy() :
    82   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
    83                         ? ParallelGCThreads : 1),
    85   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    86   _stop_world_start(0.0),
    88   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    89   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    91   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    92   _prev_collection_pause_end_ms(0.0),
    93   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
    94   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    95   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
    96   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
    97   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    98   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    99   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   100   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   101   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   102   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   103   _non_young_other_cost_per_region_ms_seq(
   104                                          new TruncatedSeq(TruncatedSeqLength)),
   106   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   107   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   109   _pause_time_target_ms((double) MaxGCPauseMillis),
   111   _gcs_are_young(true),
   113   _during_marking(false),
   114   _in_marking_window(false),
   115   _in_marking_window_im(false),
   117   _recent_prev_end_times_for_all_gcs_sec(
   118                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   120   _recent_avg_pause_time_ratio(0.0),
   122   _initiate_conc_mark_if_possible(false),
   123   _during_initial_mark_pause(false),
   124   _last_young_gc(false),
   125   _last_gc_was_young(false),
   127   _eden_used_bytes_before_gc(0),
   128   _survivor_used_bytes_before_gc(0),
   129   _heap_used_bytes_before_gc(0),
   130   _metaspace_used_bytes_before_gc(0),
   131   _eden_capacity_bytes_before_gc(0),
   132   _heap_capacity_bytes_before_gc(0),
   134   _eden_cset_region_length(0),
   135   _survivor_cset_region_length(0),
   136   _old_cset_region_length(0),
   138   _collection_set(NULL),
   139   _collection_set_bytes_used_before(0),
   141   // Incremental CSet attributes
   142   _inc_cset_build_state(Inactive),
   143   _inc_cset_head(NULL),
   144   _inc_cset_tail(NULL),
   145   _inc_cset_bytes_used_before(0),
   146   _inc_cset_max_finger(NULL),
   147   _inc_cset_recorded_rs_lengths(0),
   148   _inc_cset_recorded_rs_lengths_diffs(0),
   149   _inc_cset_predicted_elapsed_time_ms(0.0),
   150   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   152 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   153 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   154 #endif // _MSC_VER
   156   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   157                                                  G1YoungSurvRateNumRegionsSummary)),
   158   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   159                                               G1YoungSurvRateNumRegionsSummary)),
   160   // add here any more surv rate groups
   161   _recorded_survivor_regions(0),
   162   _recorded_survivor_head(NULL),
   163   _recorded_survivor_tail(NULL),
   164   _survivors_age_table(true),
   166   _gc_overhead_perc(0.0) {
   168   // Set up the region size and associated fields. Given that the
   169   // policy is created before the heap, we have to set this up here,
   170   // so it's done as soon as possible.
   172   // It would have been natural to pass initial_heap_byte_size() and
   173   // max_heap_byte_size() to setup_heap_region_size() but those have
   174   // not been set up at this point since they should be aligned with
   175   // the region size. So, there is a circular dependency here. We base
   176   // the region size on the heap size, but the heap size should be
   177   // aligned with the region size. To get around this we use the
   178   // unaligned values for the heap.
   179   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   180   HeapRegionRemSet::setup_remset_size();
   182   G1ErgoVerbose::initialize();
   183   if (PrintAdaptiveSizePolicy) {
   184     // Currently, we only use a single switch for all the heuristics.
   185     G1ErgoVerbose::set_enabled(true);
   186     // Given that we don't currently have a verboseness level
   187     // parameter, we'll hardcode this to high. This can be easily
   188     // changed in the future.
   189     G1ErgoVerbose::set_level(ErgoHigh);
   190   } else {
   191     G1ErgoVerbose::set_enabled(false);
   192   }
   194   // Verify PLAB sizes
   195   const size_t region_size = HeapRegion::GrainWords;
   196   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   197     char buffer[128];
   198     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   199                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   200     vm_exit_during_initialization(buffer);
   201   }
   203   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   204   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   206   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
   208   int index = MIN2(_parallel_gc_threads - 1, 7);
   210   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   211   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   212   _young_cards_per_entry_ratio_seq->add(
   213                                   young_cards_per_entry_ratio_defaults[index]);
   214   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   215   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   216   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   217   _young_other_cost_per_region_ms_seq->add(
   218                                young_other_cost_per_region_ms_defaults[index]);
   219   _non_young_other_cost_per_region_ms_seq->add(
   220                            non_young_other_cost_per_region_ms_defaults[index]);
   222   // Below, we might need to calculate the pause time target based on
   223   // the pause interval. When we do so we are going to give G1 maximum
   224   // flexibility and allow it to do pauses when it needs to. So, we'll
   225   // arrange that the pause interval to be pause time target + 1 to
   226   // ensure that a) the pause time target is maximized with respect to
   227   // the pause interval and b) we maintain the invariant that pause
   228   // time target < pause interval. If the user does not want this
   229   // maximum flexibility, they will have to set the pause interval
   230   // explicitly.
   232   // First make sure that, if either parameter is set, its value is
   233   // reasonable.
   234   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   235     if (MaxGCPauseMillis < 1) {
   236       vm_exit_during_initialization("MaxGCPauseMillis should be "
   237                                     "greater than 0");
   238     }
   239   }
   240   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   241     if (GCPauseIntervalMillis < 1) {
   242       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   243                                     "greater than 0");
   244     }
   245   }
   247   // Then, if the pause time target parameter was not set, set it to
   248   // the default value.
   249   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   250     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   251       // The default pause time target in G1 is 200ms
   252       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   253     } else {
   254       // We do not allow the pause interval to be set without the
   255       // pause time target
   256       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   257                                     "without setting MaxGCPauseMillis");
   258     }
   259   }
   261   // Then, if the interval parameter was not set, set it according to
   262   // the pause time target (this will also deal with the case when the
   263   // pause time target is the default value).
   264   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   265     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   266   }
   268   // Finally, make sure that the two parameters are consistent.
   269   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   270     char buffer[256];
   271     jio_snprintf(buffer, 256,
   272                  "MaxGCPauseMillis (%u) should be less than "
   273                  "GCPauseIntervalMillis (%u)",
   274                  MaxGCPauseMillis, GCPauseIntervalMillis);
   275     vm_exit_during_initialization(buffer);
   276   }
   278   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   279   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   280   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   282   uintx confidence_perc = G1ConfidencePercent;
   283   // Put an artificial ceiling on this so that it's not set to a silly value.
   284   if (confidence_perc > 100) {
   285     confidence_perc = 100;
   286     warning("G1ConfidencePercent is set to a value that is too large, "
   287             "it's been updated to %u", confidence_perc);
   288   }
   289   _sigma = (double) confidence_perc / 100.0;
   291   // start conservatively (around 50ms is about right)
   292   _concurrent_mark_remark_times_ms->add(0.05);
   293   _concurrent_mark_cleanup_times_ms->add(0.20);
   294   _tenuring_threshold = MaxTenuringThreshold;
   295   // _max_survivor_regions will be calculated by
   296   // update_young_list_target_length() during initialization.
   297   _max_survivor_regions = 0;
   299   assert(GCTimeRatio > 0,
   300          "we should have set it to a default value set_g1_gc_flags() "
   301          "if a user set it to 0");
   302   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   304   uintx reserve_perc = G1ReservePercent;
   305   // Put an artificial ceiling on this so that it's not set to a silly value.
   306   if (reserve_perc > 50) {
   307     reserve_perc = 50;
   308     warning("G1ReservePercent is set to a value that is too large, "
   309             "it's been updated to %u", reserve_perc);
   310   }
   311   _reserve_factor = (double) reserve_perc / 100.0;
   312   // This will be set when the heap is expanded
   313   // for the first time during initialization.
   314   _reserve_regions = 0;
   316   _collectionSetChooser = new CollectionSetChooser();
   317 }
   319 void G1CollectorPolicy::initialize_alignments() {
   320   _space_alignment = HeapRegion::GrainBytes;
   321   size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   322   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   323   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
   324 }
   326 void G1CollectorPolicy::initialize_flags() {
   327   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
   328     FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
   329   }
   331   if (SurvivorRatio < 1) {
   332     vm_exit_during_initialization("Invalid survivor ratio specified");
   333   }
   334   CollectorPolicy::initialize_flags();
   335   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   336 }
   338 void G1CollectorPolicy::post_heap_initialize() {
   339   uintx max_regions = G1CollectedHeap::heap()->max_regions();
   340   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
   341   if (max_young_size != MaxNewSize) {
   342     FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
   343   }
   344 }
   346 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
   347         _min_desired_young_length(0), _max_desired_young_length(0) {
   348   if (FLAG_IS_CMDLINE(NewRatio)) {
   349     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   350       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   351     } else {
   352       _sizer_kind = SizerNewRatio;
   353       _adaptive_size = false;
   354       return;
   355     }
   356   }
   358   if (NewSize > MaxNewSize) {
   359     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   360       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
   361               "A new max generation size of " SIZE_FORMAT "k will be used.",
   362               NewSize/K, MaxNewSize/K, NewSize/K);
   363     }
   364     MaxNewSize = NewSize;
   365   }
   367   if (FLAG_IS_CMDLINE(NewSize)) {
   368     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
   369                                      1U);
   370     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   371       _max_desired_young_length =
   372                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   373                                   1U);
   374       _sizer_kind = SizerMaxAndNewSize;
   375       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   376     } else {
   377       _sizer_kind = SizerNewSizeOnly;
   378     }
   379   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   380     _max_desired_young_length =
   381                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   382                                   1U);
   383     _sizer_kind = SizerMaxNewSizeOnly;
   384   }
   385 }
   387 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
   388   uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
   389   return MAX2(1U, default_value);
   390 }
   392 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
   393   uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
   394   return MAX2(1U, default_value);
   395 }
   397 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
   398   assert(number_of_heap_regions > 0, "Heap must be initialized");
   400   switch (_sizer_kind) {
   401     case SizerDefaults:
   402       *min_young_length = calculate_default_min_length(number_of_heap_regions);
   403       *max_young_length = calculate_default_max_length(number_of_heap_regions);
   404       break;
   405     case SizerNewSizeOnly:
   406       *max_young_length = calculate_default_max_length(number_of_heap_regions);
   407       *max_young_length = MAX2(*min_young_length, *max_young_length);
   408       break;
   409     case SizerMaxNewSizeOnly:
   410       *min_young_length = calculate_default_min_length(number_of_heap_regions);
   411       *min_young_length = MIN2(*min_young_length, *max_young_length);
   412       break;
   413     case SizerMaxAndNewSize:
   414       // Do nothing. Values set on the command line, don't update them at runtime.
   415       break;
   416     case SizerNewRatio:
   417       *min_young_length = number_of_heap_regions / (NewRatio + 1);
   418       *max_young_length = *min_young_length;
   419       break;
   420     default:
   421       ShouldNotReachHere();
   422   }
   424   assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
   425 }
   427 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
   428   // We need to pass the desired values because recalculation may not update these
   429   // values in some cases.
   430   uint temp = _min_desired_young_length;
   431   uint result = _max_desired_young_length;
   432   recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
   433   return result;
   434 }
   436 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
   437   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
   438           &_max_desired_young_length);
   439 }
   441 void G1CollectorPolicy::init() {
   442   // Set aside an initial future to_space.
   443   _g1 = G1CollectedHeap::heap();
   445   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   447   initialize_gc_policy_counters();
   449   if (adaptive_young_list_length()) {
   450     _young_list_fixed_length = 0;
   451   } else {
   452     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   453   }
   454   _free_regions_at_end_of_collection = _g1->free_regions();
   455   update_young_list_target_length();
   457   // We may immediately start allocating regions and placing them on the
   458   // collection set list. Initialize the per-collection set info
   459   start_incremental_cset_building();
   460 }
   462 // Create the jstat counters for the policy.
   463 void G1CollectorPolicy::initialize_gc_policy_counters() {
   464   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   465 }
   467 bool G1CollectorPolicy::predict_will_fit(uint young_length,
   468                                          double base_time_ms,
   469                                          uint base_free_regions,
   470                                          double target_pause_time_ms) {
   471   if (young_length >= base_free_regions) {
   472     // end condition 1: not enough space for the young regions
   473     return false;
   474   }
   476   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
   477   size_t bytes_to_copy =
   478                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   479   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   480   double young_other_time_ms = predict_young_other_time_ms(young_length);
   481   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   482   if (pause_time_ms > target_pause_time_ms) {
   483     // end condition 2: prediction is over the target pause time
   484     return false;
   485   }
   487   size_t free_bytes =
   488                    (base_free_regions - young_length) * HeapRegion::GrainBytes;
   489   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   490     // end condition 3: out-of-space (conservatively!)
   491     return false;
   492   }
   494   // success!
   495   return true;
   496 }
   498 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
   499   // re-calculate the necessary reserve
   500   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   501   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   502   // smaller than 1.0) we'll get 1.
   503   _reserve_regions = (uint) ceil(reserve_regions_d);
   505   _young_gen_sizer->heap_size_changed(new_number_of_regions);
   506 }
   508 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
   509                                                        uint base_min_length) {
   510   uint desired_min_length = 0;
   511   if (adaptive_young_list_length()) {
   512     if (_alloc_rate_ms_seq->num() > 3) {
   513       double now_sec = os::elapsedTime();
   514       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   515       double alloc_rate_ms = predict_alloc_rate_ms();
   516       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
   517     } else {
   518       // otherwise we don't have enough info to make the prediction
   519     }
   520   }
   521   desired_min_length += base_min_length;
   522   // make sure we don't go below any user-defined minimum bound
   523   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   524 }
   526 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
   527   // Here, we might want to also take into account any additional
   528   // constraints (i.e., user-defined minimum bound). Currently, we
   529   // effectively don't set this bound.
   530   return _young_gen_sizer->max_desired_young_length();
   531 }
   533 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   534   if (rs_lengths == (size_t) -1) {
   535     // if it's set to the default value (-1), we should predict it;
   536     // otherwise, use the given value.
   537     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   538   }
   540   // Calculate the absolute and desired min bounds.
   542   // This is how many young regions we already have (currently: the survivors).
   543   uint base_min_length = recorded_survivor_regions();
   544   // This is the absolute minimum young length, which ensures that we
   545   // can allocate one eden region in the worst-case.
   546   uint absolute_min_length = base_min_length + 1;
   547   uint desired_min_length =
   548                      calculate_young_list_desired_min_length(base_min_length);
   549   if (desired_min_length < absolute_min_length) {
   550     desired_min_length = absolute_min_length;
   551   }
   553   // Calculate the absolute and desired max bounds.
   555   // We will try our best not to "eat" into the reserve.
   556   uint absolute_max_length = 0;
   557   if (_free_regions_at_end_of_collection > _reserve_regions) {
   558     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   559   }
   560   uint desired_max_length = calculate_young_list_desired_max_length();
   561   if (desired_max_length > absolute_max_length) {
   562     desired_max_length = absolute_max_length;
   563   }
   565   uint young_list_target_length = 0;
   566   if (adaptive_young_list_length()) {
   567     if (gcs_are_young()) {
   568       young_list_target_length =
   569                         calculate_young_list_target_length(rs_lengths,
   570                                                            base_min_length,
   571                                                            desired_min_length,
   572                                                            desired_max_length);
   573       _rs_lengths_prediction = rs_lengths;
   574     } else {
   575       // Don't calculate anything and let the code below bound it to
   576       // the desired_min_length, i.e., do the next GC as soon as
   577       // possible to maximize how many old regions we can add to it.
   578     }
   579   } else {
   580     // The user asked for a fixed young gen so we'll fix the young gen
   581     // whether the next GC is young or mixed.
   582     young_list_target_length = _young_list_fixed_length;
   583   }
   585   // Make sure we don't go over the desired max length, nor under the
   586   // desired min length. In case they clash, desired_min_length wins
   587   // which is why that test is second.
   588   if (young_list_target_length > desired_max_length) {
   589     young_list_target_length = desired_max_length;
   590   }
   591   if (young_list_target_length < desired_min_length) {
   592     young_list_target_length = desired_min_length;
   593   }
   595   assert(young_list_target_length > recorded_survivor_regions(),
   596          "we should be able to allocate at least one eden region");
   597   assert(young_list_target_length >= absolute_min_length, "post-condition");
   598   _young_list_target_length = young_list_target_length;
   600   update_max_gc_locker_expansion();
   601 }
   603 uint
   604 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   605                                                      uint base_min_length,
   606                                                      uint desired_min_length,
   607                                                      uint desired_max_length) {
   608   assert(adaptive_young_list_length(), "pre-condition");
   609   assert(gcs_are_young(), "only call this for young GCs");
   611   // In case some edge-condition makes the desired max length too small...
   612   if (desired_max_length <= desired_min_length) {
   613     return desired_min_length;
   614   }
   616   // We'll adjust min_young_length and max_young_length not to include
   617   // the already allocated young regions (i.e., so they reflect the
   618   // min and max eden regions we'll allocate). The base_min_length
   619   // will be reflected in the predictions by the
   620   // survivor_regions_evac_time prediction.
   621   assert(desired_min_length > base_min_length, "invariant");
   622   uint min_young_length = desired_min_length - base_min_length;
   623   assert(desired_max_length > base_min_length, "invariant");
   624   uint max_young_length = desired_max_length - base_min_length;
   626   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   627   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   628   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   629   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   630   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   631   double base_time_ms =
   632     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   633     survivor_regions_evac_time;
   634   uint available_free_regions = _free_regions_at_end_of_collection;
   635   uint base_free_regions = 0;
   636   if (available_free_regions > _reserve_regions) {
   637     base_free_regions = available_free_regions - _reserve_regions;
   638   }
   640   // Here, we will make sure that the shortest young length that
   641   // makes sense fits within the target pause time.
   643   if (predict_will_fit(min_young_length, base_time_ms,
   644                        base_free_regions, target_pause_time_ms)) {
   645     // The shortest young length will fit into the target pause time;
   646     // we'll now check whether the absolute maximum number of young
   647     // regions will fit in the target pause time. If not, we'll do
   648     // a binary search between min_young_length and max_young_length.
   649     if (predict_will_fit(max_young_length, base_time_ms,
   650                          base_free_regions, target_pause_time_ms)) {
   651       // The maximum young length will fit into the target pause time.
   652       // We are done so set min young length to the maximum length (as
   653       // the result is assumed to be returned in min_young_length).
   654       min_young_length = max_young_length;
   655     } else {
   656       // The maximum possible number of young regions will not fit within
   657       // the target pause time so we'll search for the optimal
   658       // length. The loop invariants are:
   659       //
   660       // min_young_length < max_young_length
   661       // min_young_length is known to fit into the target pause time
   662       // max_young_length is known not to fit into the target pause time
   663       //
   664       // Going into the loop we know the above hold as we've just
   665       // checked them. Every time around the loop we check whether
   666       // the middle value between min_young_length and
   667       // max_young_length fits into the target pause time. If it
   668       // does, it becomes the new min. If it doesn't, it becomes
   669       // the new max. This way we maintain the loop invariants.
   671       assert(min_young_length < max_young_length, "invariant");
   672       uint diff = (max_young_length - min_young_length) / 2;
   673       while (diff > 0) {
   674         uint young_length = min_young_length + diff;
   675         if (predict_will_fit(young_length, base_time_ms,
   676                              base_free_regions, target_pause_time_ms)) {
   677           min_young_length = young_length;
   678         } else {
   679           max_young_length = young_length;
   680         }
   681         assert(min_young_length <  max_young_length, "invariant");
   682         diff = (max_young_length - min_young_length) / 2;
   683       }
   684       // The results is min_young_length which, according to the
   685       // loop invariants, should fit within the target pause time.
   687       // These are the post-conditions of the binary search above:
   688       assert(min_young_length < max_young_length,
   689              "otherwise we should have discovered that max_young_length "
   690              "fits into the pause target and not done the binary search");
   691       assert(predict_will_fit(min_young_length, base_time_ms,
   692                               base_free_regions, target_pause_time_ms),
   693              "min_young_length, the result of the binary search, should "
   694              "fit into the pause target");
   695       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   696                                base_free_regions, target_pause_time_ms),
   697              "min_young_length, the result of the binary search, should be "
   698              "optimal, so no larger length should fit into the pause target");
   699     }
   700   } else {
   701     // Even the minimum length doesn't fit into the pause time
   702     // target, return it as the result nevertheless.
   703   }
   704   return base_min_length + min_young_length;
   705 }
   707 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   708   double survivor_regions_evac_time = 0.0;
   709   for (HeapRegion * r = _recorded_survivor_head;
   710        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   711        r = r->get_next_young_region()) {
   712     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
   713   }
   714   return survivor_regions_evac_time;
   715 }
   717 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   718   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   720   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   721   if (rs_lengths > _rs_lengths_prediction) {
   722     // add 10% to avoid having to recalculate often
   723     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   724     update_young_list_target_length(rs_lengths_prediction);
   725   }
   726 }
   730 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   731                                                bool is_tlab,
   732                                                bool* gc_overhead_limit_was_exceeded) {
   733   guarantee(false, "Not using this policy feature yet.");
   734   return NULL;
   735 }
   737 // This method controls how a collector handles one or more
   738 // of its generations being fully allocated.
   739 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   740                                                        bool is_tlab) {
   741   guarantee(false, "Not using this policy feature yet.");
   742   return NULL;
   743 }
   746 #ifndef PRODUCT
   747 bool G1CollectorPolicy::verify_young_ages() {
   748   HeapRegion* head = _g1->young_list()->first_region();
   749   return
   750     verify_young_ages(head, _short_lived_surv_rate_group);
   751   // also call verify_young_ages on any additional surv rate groups
   752 }
   754 bool
   755 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   756                                      SurvRateGroup *surv_rate_group) {
   757   guarantee( surv_rate_group != NULL, "pre-condition" );
   759   const char* name = surv_rate_group->name();
   760   bool ret = true;
   761   int prev_age = -1;
   763   for (HeapRegion* curr = head;
   764        curr != NULL;
   765        curr = curr->get_next_young_region()) {
   766     SurvRateGroup* group = curr->surv_rate_group();
   767     if (group == NULL && !curr->is_survivor()) {
   768       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   769       ret = false;
   770     }
   772     if (surv_rate_group == group) {
   773       int age = curr->age_in_surv_rate_group();
   775       if (age < 0) {
   776         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   777         ret = false;
   778       }
   780       if (age <= prev_age) {
   781         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   782                                "(%d, %d)", name, age, prev_age);
   783         ret = false;
   784       }
   785       prev_age = age;
   786     }
   787   }
   789   return ret;
   790 }
   791 #endif // PRODUCT
   793 void G1CollectorPolicy::record_full_collection_start() {
   794   _full_collection_start_sec = os::elapsedTime();
   795   record_heap_size_info_at_start(true /* full */);
   796   // Release the future to-space so that it is available for compaction into.
   797   _g1->set_full_collection();
   798 }
   800 void G1CollectorPolicy::record_full_collection_end() {
   801   // Consider this like a collection pause for the purposes of allocation
   802   // since last pause.
   803   double end_sec = os::elapsedTime();
   804   double full_gc_time_sec = end_sec - _full_collection_start_sec;
   805   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   807   _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
   809   update_recent_gc_times(end_sec, full_gc_time_ms);
   811   _g1->clear_full_collection();
   813   // "Nuke" the heuristics that control the young/mixed GC
   814   // transitions and make sure we start with young GCs after the Full GC.
   815   set_gcs_are_young(true);
   816   _last_young_gc = false;
   817   clear_initiate_conc_mark_if_possible();
   818   clear_during_initial_mark_pause();
   819   _in_marking_window = false;
   820   _in_marking_window_im = false;
   822   _short_lived_surv_rate_group->start_adding_regions();
   823   // also call this on any additional surv rate groups
   825   record_survivor_regions(0, NULL, NULL);
   827   _free_regions_at_end_of_collection = _g1->free_regions();
   828   // Reset survivors SurvRateGroup.
   829   _survivor_surv_rate_group->reset();
   830   update_young_list_target_length();
   831   _collectionSetChooser->clear();
   832 }
   834 void G1CollectorPolicy::record_stop_world_start() {
   835   _stop_world_start = os::elapsedTime();
   836 }
   838 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
   839   // We only need to do this here as the policy will only be applied
   840   // to the GC we're about to start. so, no point is calculating this
   841   // every time we calculate / recalculate the target young length.
   842   update_survivors_policy();
   844   assert(_g1->used() == _g1->recalculate_used(),
   845          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   846                  _g1->used(), _g1->recalculate_used()));
   848   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   849   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
   850   _stop_world_start = 0.0;
   852   record_heap_size_info_at_start(false /* full */);
   854   phase_times()->record_cur_collection_start_sec(start_time_sec);
   855   _pending_cards = _g1->pending_card_num();
   857   _collection_set_bytes_used_before = 0;
   858   _bytes_copied_during_gc = 0;
   860   _last_gc_was_young = false;
   862   // do that for any other surv rate groups
   863   _short_lived_surv_rate_group->stop_adding_regions();
   864   _survivors_age_table.clear();
   866   assert( verify_young_ages(), "region age verification" );
   867 }
   869 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   870                                                    mark_init_elapsed_time_ms) {
   871   _during_marking = true;
   872   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   873   clear_during_initial_mark_pause();
   874   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   875 }
   877 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   878   _mark_remark_start_sec = os::elapsedTime();
   879   _during_marking = false;
   880 }
   882 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   883   double end_time_sec = os::elapsedTime();
   884   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   885   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   886   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   887   _prev_collection_pause_end_ms += elapsed_time_ms;
   889   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   890 }
   892 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   893   _mark_cleanup_start_sec = os::elapsedTime();
   894 }
   896 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   897   _last_young_gc = true;
   898   _in_marking_window = false;
   899 }
   901 void G1CollectorPolicy::record_concurrent_pause() {
   902   if (_stop_world_start > 0.0) {
   903     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   904     _trace_gen0_time_data.record_yield_time(yield_ms);
   905   }
   906 }
   908 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   909   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
   910     return false;
   911   }
   913   size_t marking_initiating_used_threshold =
   914     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   915   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   916   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   918   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
   919     if (gcs_are_young() && !_last_young_gc) {
   920       ergo_verbose5(ErgoConcCycles,
   921         "request concurrent cycle initiation",
   922         ergo_format_reason("occupancy higher than threshold")
   923         ergo_format_byte("occupancy")
   924         ergo_format_byte("allocation request")
   925         ergo_format_byte_perc("threshold")
   926         ergo_format_str("source"),
   927         cur_used_bytes,
   928         alloc_byte_size,
   929         marking_initiating_used_threshold,
   930         (double) InitiatingHeapOccupancyPercent,
   931         source);
   932       return true;
   933     } else {
   934       ergo_verbose5(ErgoConcCycles,
   935         "do not request concurrent cycle initiation",
   936         ergo_format_reason("still doing mixed collections")
   937         ergo_format_byte("occupancy")
   938         ergo_format_byte("allocation request")
   939         ergo_format_byte_perc("threshold")
   940         ergo_format_str("source"),
   941         cur_used_bytes,
   942         alloc_byte_size,
   943         marking_initiating_used_threshold,
   944         (double) InitiatingHeapOccupancyPercent,
   945         source);
   946     }
   947   }
   949   return false;
   950 }
   952 // Anything below that is considered to be zero
   953 #define MIN_TIMER_GRANULARITY 0.0000001
   955 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
   956   double end_time_sec = os::elapsedTime();
   957   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
   958          "otherwise, the subtraction below does not make sense");
   959   size_t rs_size =
   960             _cur_collection_pause_used_regions_at_start - cset_region_length();
   961   size_t cur_used_bytes = _g1->used();
   962   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
   963   bool last_pause_included_initial_mark = false;
   964   bool update_stats = !_g1->evacuation_failed();
   966 #ifndef PRODUCT
   967   if (G1YoungSurvRateVerbose) {
   968     gclog_or_tty->print_cr("");
   969     _short_lived_surv_rate_group->print();
   970     // do that for any other surv rate groups too
   971   }
   972 #endif // PRODUCT
   974   last_pause_included_initial_mark = during_initial_mark_pause();
   975   if (last_pause_included_initial_mark) {
   976     record_concurrent_mark_init_end(0.0);
   977   } else if (need_to_start_conc_mark("end of GC")) {
   978     // Note: this might have already been set, if during the last
   979     // pause we decided to start a cycle but at the beginning of
   980     // this pause we decided to postpone it. That's OK.
   981     set_initiate_conc_mark_if_possible();
   982   }
   984   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
   985                           end_time_sec, false);
   987   evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
   988   evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
   990   if (update_stats) {
   991     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
   992     // this is where we update the allocation rate of the application
   993     double app_time_ms =
   994       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
   995     if (app_time_ms < MIN_TIMER_GRANULARITY) {
   996       // This usually happens due to the timer not having the required
   997       // granularity. Some Linuxes are the usual culprits.
   998       // We'll just set it to something (arbitrarily) small.
   999       app_time_ms = 1.0;
  1001     // We maintain the invariant that all objects allocated by mutator
  1002     // threads will be allocated out of eden regions. So, we can use
  1003     // the eden region number allocated since the previous GC to
  1004     // calculate the application's allocate rate. The only exception
  1005     // to that is humongous objects that are allocated separately. But
  1006     // given that humongous object allocations do not really affect
  1007     // either the pause's duration nor when the next pause will take
  1008     // place we can safely ignore them here.
  1009     uint regions_allocated = eden_cset_region_length();
  1010     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1011     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1013     double interval_ms =
  1014       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1015     update_recent_gc_times(end_time_sec, pause_time_ms);
  1016     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1017     if (recent_avg_pause_time_ratio() < 0.0 ||
  1018         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1019 #ifndef PRODUCT
  1020       // Dump info to allow post-facto debugging
  1021       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1022       gclog_or_tty->print_cr("-------------------------------------------");
  1023       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1024       _recent_gc_times_ms->dump();
  1025       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1026       _recent_prev_end_times_for_all_gcs_sec->dump();
  1027       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1028                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1029       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1030       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1031 #endif  // !PRODUCT
  1032       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1033       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1034       if (_recent_avg_pause_time_ratio < 0.0) {
  1035         _recent_avg_pause_time_ratio = 0.0;
  1036       } else {
  1037         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1038         _recent_avg_pause_time_ratio = 1.0;
  1043   bool new_in_marking_window = _in_marking_window;
  1044   bool new_in_marking_window_im = false;
  1045   if (during_initial_mark_pause()) {
  1046     new_in_marking_window = true;
  1047     new_in_marking_window_im = true;
  1050   if (_last_young_gc) {
  1051     // This is supposed to to be the "last young GC" before we start
  1052     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
  1054     if (!last_pause_included_initial_mark) {
  1055       if (next_gc_should_be_mixed("start mixed GCs",
  1056                                   "do not start mixed GCs")) {
  1057         set_gcs_are_young(false);
  1059     } else {
  1060       ergo_verbose0(ErgoMixedGCs,
  1061                     "do not start mixed GCs",
  1062                     ergo_format_reason("concurrent cycle is about to start"));
  1064     _last_young_gc = false;
  1067   if (!_last_gc_was_young) {
  1068     // This is a mixed GC. Here we decide whether to continue doing
  1069     // mixed GCs or not.
  1071     if (!next_gc_should_be_mixed("continue mixed GCs",
  1072                                  "do not continue mixed GCs")) {
  1073       set_gcs_are_young(true);
  1077   _short_lived_surv_rate_group->start_adding_regions();
  1078   // do that for any other surv rate groupsx
  1080   if (update_stats) {
  1081     double cost_per_card_ms = 0.0;
  1082     if (_pending_cards > 0) {
  1083       cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
  1084       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1087     size_t cards_scanned = _g1->cards_scanned();
  1089     double cost_per_entry_ms = 0.0;
  1090     if (cards_scanned > 10) {
  1091       cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
  1092       if (_last_gc_was_young) {
  1093         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1094       } else {
  1095         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1099     if (_max_rs_lengths > 0) {
  1100       double cards_per_entry_ratio =
  1101         (double) cards_scanned / (double) _max_rs_lengths;
  1102       if (_last_gc_was_young) {
  1103         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1104       } else {
  1105         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1109     // This is defensive. For a while _max_rs_lengths could get
  1110     // smaller than _recorded_rs_lengths which was causing
  1111     // rs_length_diff to get very large and mess up the RSet length
  1112     // predictions. The reason was unsafe concurrent updates to the
  1113     // _inc_cset_recorded_rs_lengths field which the code below guards
  1114     // against (see CR 7118202). This bug has now been fixed (see CR
  1115     // 7119027). However, I'm still worried that
  1116     // _inc_cset_recorded_rs_lengths might still end up somewhat
  1117     // inaccurate. The concurrent refinement thread calculates an
  1118     // RSet's length concurrently with other CR threads updating it
  1119     // which might cause it to calculate the length incorrectly (if,
  1120     // say, it's in mid-coarsening). So I'll leave in the defensive
  1121     // conditional below just in case.
  1122     size_t rs_length_diff = 0;
  1123     if (_max_rs_lengths > _recorded_rs_lengths) {
  1124       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1126     _rs_length_diff_seq->add((double) rs_length_diff);
  1128     size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
  1129     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
  1130     double cost_per_byte_ms = 0.0;
  1132     if (copied_bytes > 0) {
  1133       cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
  1134       if (_in_marking_window) {
  1135         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1136       } else {
  1137         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1141     double all_other_time_ms = pause_time_ms -
  1142       (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
  1143       + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
  1145     double young_other_time_ms = 0.0;
  1146     if (young_cset_region_length() > 0) {
  1147       young_other_time_ms =
  1148         phase_times()->young_cset_choice_time_ms() +
  1149         phase_times()->young_free_cset_time_ms();
  1150       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1151                                           (double) young_cset_region_length());
  1153     double non_young_other_time_ms = 0.0;
  1154     if (old_cset_region_length() > 0) {
  1155       non_young_other_time_ms =
  1156         phase_times()->non_young_cset_choice_time_ms() +
  1157         phase_times()->non_young_free_cset_time_ms();
  1159       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1160                                             (double) old_cset_region_length());
  1163     double constant_other_time_ms = all_other_time_ms -
  1164       (young_other_time_ms + non_young_other_time_ms);
  1165     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1167     double survival_ratio = 0.0;
  1168     if (_collection_set_bytes_used_before > 0) {
  1169       survival_ratio = (double) _bytes_copied_during_gc /
  1170                                    (double) _collection_set_bytes_used_before;
  1173     _pending_cards_seq->add((double) _pending_cards);
  1174     _rs_lengths_seq->add((double) _max_rs_lengths);
  1177   _in_marking_window = new_in_marking_window;
  1178   _in_marking_window_im = new_in_marking_window_im;
  1179   _free_regions_at_end_of_collection = _g1->free_regions();
  1180   update_young_list_target_length();
  1182   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1183   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1184   adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
  1185                                phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
  1187   _collectionSetChooser->verify();
  1190 #define EXT_SIZE_FORMAT "%.1f%s"
  1191 #define EXT_SIZE_PARAMS(bytes)                                  \
  1192   byte_size_in_proper_unit((double)(bytes)),                    \
  1193   proper_unit_for_byte_size((bytes))
  1195 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
  1196   YoungList* young_list = _g1->young_list();
  1197   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
  1198   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
  1199   _heap_capacity_bytes_before_gc = _g1->capacity();
  1200   _heap_used_bytes_before_gc = _g1->used();
  1201   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
  1203   _eden_capacity_bytes_before_gc =
  1204          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
  1206   if (full) {
  1207     _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
  1211 void G1CollectorPolicy::print_heap_transition() {
  1212   _g1->print_size_transition(gclog_or_tty,
  1213                              _heap_used_bytes_before_gc,
  1214                              _g1->used(),
  1215                              _g1->capacity());
  1218 void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
  1219   YoungList* young_list = _g1->young_list();
  1221   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
  1222   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
  1223   size_t heap_used_bytes_after_gc = _g1->used();
  1225   size_t heap_capacity_bytes_after_gc = _g1->capacity();
  1226   size_t eden_capacity_bytes_after_gc =
  1227     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
  1229   gclog_or_tty->print(
  1230     "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1231     "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1232     "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1233     EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1234     EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
  1235     EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
  1236     EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
  1237     EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
  1238     EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
  1239     EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
  1240     EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
  1241     EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
  1242     EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
  1243     EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
  1245   if (full) {
  1246     MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
  1249   gclog_or_tty->cr();
  1252 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1253                                                      double update_rs_processed_buffers,
  1254                                                      double goal_ms) {
  1255   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1256   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1258   if (G1UseAdaptiveConcRefinement) {
  1259     const int k_gy = 3, k_gr = 6;
  1260     const double inc_k = 1.1, dec_k = 0.9;
  1262     int g = cg1r->green_zone();
  1263     if (update_rs_time > goal_ms) {
  1264       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1265     } else {
  1266       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1267         g = (int)MAX2(g * inc_k, g + 1.0);
  1270     // Change the refinement threads params
  1271     cg1r->set_green_zone(g);
  1272     cg1r->set_yellow_zone(g * k_gy);
  1273     cg1r->set_red_zone(g * k_gr);
  1274     cg1r->reinitialize_threads();
  1276     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1277     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1278                                     cg1r->yellow_zone());
  1279     // Change the barrier params
  1280     dcqs.set_process_completed_threshold(processing_threshold);
  1281     dcqs.set_max_completed_queue(cg1r->red_zone());
  1284   int curr_queue_size = dcqs.completed_buffers_num();
  1285   if (curr_queue_size >= cg1r->yellow_zone()) {
  1286     dcqs.set_completed_queue_padding(curr_queue_size);
  1287   } else {
  1288     dcqs.set_completed_queue_padding(0);
  1290   dcqs.notify_if_necessary();
  1293 double
  1294 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1295                                                 size_t scanned_cards) {
  1296   return
  1297     predict_rs_update_time_ms(pending_cards) +
  1298     predict_rs_scan_time_ms(scanned_cards) +
  1299     predict_constant_other_time_ms();
  1302 double
  1303 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1304   size_t rs_length = predict_rs_length_diff();
  1305   size_t card_num;
  1306   if (gcs_are_young()) {
  1307     card_num = predict_young_card_num(rs_length);
  1308   } else {
  1309     card_num = predict_non_young_card_num(rs_length);
  1311   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1314 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1315   size_t bytes_to_copy;
  1316   if (hr->is_marked())
  1317     bytes_to_copy = hr->max_live_bytes();
  1318   else {
  1319     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
  1320     int age = hr->age_in_surv_rate_group();
  1321     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1322     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1324   return bytes_to_copy;
  1327 double
  1328 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1329                                                   bool for_young_gc) {
  1330   size_t rs_length = hr->rem_set()->occupied();
  1331   size_t card_num;
  1333   // Predicting the number of cards is based on which type of GC
  1334   // we're predicting for.
  1335   if (for_young_gc) {
  1336     card_num = predict_young_card_num(rs_length);
  1337   } else {
  1338     card_num = predict_non_young_card_num(rs_length);
  1340   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1342   double region_elapsed_time_ms =
  1343     predict_rs_scan_time_ms(card_num) +
  1344     predict_object_copy_time_ms(bytes_to_copy);
  1346   // The prediction of the "other" time for this region is based
  1347   // upon the region type and NOT the GC type.
  1348   if (hr->is_young()) {
  1349     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1350   } else {
  1351     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1353   return region_elapsed_time_ms;
  1356 void
  1357 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
  1358                                             uint survivor_cset_region_length) {
  1359   _eden_cset_region_length     = eden_cset_region_length;
  1360   _survivor_cset_region_length = survivor_cset_region_length;
  1361   _old_cset_region_length      = 0;
  1364 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1365   _recorded_rs_lengths = rs_lengths;
  1368 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1369                                                double elapsed_ms) {
  1370   _recent_gc_times_ms->add(elapsed_ms);
  1371   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1372   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1375 size_t G1CollectorPolicy::expansion_amount() {
  1376   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1377   double threshold = _gc_overhead_perc;
  1378   if (recent_gc_overhead > threshold) {
  1379     // We will double the existing space, or take
  1380     // G1ExpandByPercentOfAvailable % of the available expansion
  1381     // space, whichever is smaller, bounded below by a minimum
  1382     // expansion (unless that's all that's left.)
  1383     const size_t min_expand_bytes = 1*M;
  1384     size_t reserved_bytes = _g1->max_capacity();
  1385     size_t committed_bytes = _g1->capacity();
  1386     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1387     size_t expand_bytes;
  1388     size_t expand_bytes_via_pct =
  1389       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1390     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1391     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1392     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1394     ergo_verbose5(ErgoHeapSizing,
  1395                   "attempt heap expansion",
  1396                   ergo_format_reason("recent GC overhead higher than "
  1397                                      "threshold after GC")
  1398                   ergo_format_perc("recent GC overhead")
  1399                   ergo_format_perc("threshold")
  1400                   ergo_format_byte("uncommitted")
  1401                   ergo_format_byte_perc("calculated expansion amount"),
  1402                   recent_gc_overhead, threshold,
  1403                   uncommitted_bytes,
  1404                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1406     return expand_bytes;
  1407   } else {
  1408     return 0;
  1412 void G1CollectorPolicy::print_tracing_info() const {
  1413   _trace_gen0_time_data.print();
  1414   _trace_gen1_time_data.print();
  1417 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  1418 #ifndef PRODUCT
  1419   _short_lived_surv_rate_group->print_surv_rate_summary();
  1420   // add this call for any other surv rate groups
  1421 #endif // PRODUCT
  1424 uint G1CollectorPolicy::max_regions(int purpose) {
  1425   switch (purpose) {
  1426     case GCAllocForSurvived:
  1427       return _max_survivor_regions;
  1428     case GCAllocForTenured:
  1429       return REGIONS_UNLIMITED;
  1430     default:
  1431       ShouldNotReachHere();
  1432       return REGIONS_UNLIMITED;
  1433   };
  1436 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  1437   uint expansion_region_num = 0;
  1438   if (GCLockerEdenExpansionPercent > 0) {
  1439     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  1440     double expansion_region_num_d = perc * (double) _young_list_target_length;
  1441     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  1442     // less than 1.0) we'll get 1.
  1443     expansion_region_num = (uint) ceil(expansion_region_num_d);
  1444   } else {
  1445     assert(expansion_region_num == 0, "sanity");
  1447   _young_list_max_length = _young_list_target_length + expansion_region_num;
  1448   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  1451 // Calculates survivor space parameters.
  1452 void G1CollectorPolicy::update_survivors_policy() {
  1453   double max_survivor_regions_d =
  1454                  (double) _young_list_target_length / (double) SurvivorRatio;
  1455   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  1456   // smaller than 1.0) we'll get 1.
  1457   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
  1459   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  1460         HeapRegion::GrainWords * _max_survivor_regions);
  1463 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  1464                                                      GCCause::Cause gc_cause) {
  1465   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1466   if (!during_cycle) {
  1467     ergo_verbose1(ErgoConcCycles,
  1468                   "request concurrent cycle initiation",
  1469                   ergo_format_reason("requested by GC cause")
  1470                   ergo_format_str("GC cause"),
  1471                   GCCause::to_string(gc_cause));
  1472     set_initiate_conc_mark_if_possible();
  1473     return true;
  1474   } else {
  1475     ergo_verbose1(ErgoConcCycles,
  1476                   "do not request concurrent cycle initiation",
  1477                   ergo_format_reason("concurrent cycle already in progress")
  1478                   ergo_format_str("GC cause"),
  1479                   GCCause::to_string(gc_cause));
  1480     return false;
  1484 void
  1485 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  1486   // We are about to decide on whether this pause will be an
  1487   // initial-mark pause.
  1489   // First, during_initial_mark_pause() should not be already set. We
  1490   // will set it here if we have to. However, it should be cleared by
  1491   // the end of the pause (it's only set for the duration of an
  1492   // initial-mark pause).
  1493   assert(!during_initial_mark_pause(), "pre-condition");
  1495   if (initiate_conc_mark_if_possible()) {
  1496     // We had noticed on a previous pause that the heap occupancy has
  1497     // gone over the initiating threshold and we should start a
  1498     // concurrent marking cycle. So we might initiate one.
  1500     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1501     if (!during_cycle) {
  1502       // The concurrent marking thread is not "during a cycle", i.e.,
  1503       // it has completed the last one. So we can go ahead and
  1504       // initiate a new cycle.
  1506       set_during_initial_mark_pause();
  1507       // We do not allow mixed GCs during marking.
  1508       if (!gcs_are_young()) {
  1509         set_gcs_are_young(true);
  1510         ergo_verbose0(ErgoMixedGCs,
  1511                       "end mixed GCs",
  1512                       ergo_format_reason("concurrent cycle is about to start"));
  1515       // And we can now clear initiate_conc_mark_if_possible() as
  1516       // we've already acted on it.
  1517       clear_initiate_conc_mark_if_possible();
  1519       ergo_verbose0(ErgoConcCycles,
  1520                   "initiate concurrent cycle",
  1521                   ergo_format_reason("concurrent cycle initiation requested"));
  1522     } else {
  1523       // The concurrent marking thread is still finishing up the
  1524       // previous cycle. If we start one right now the two cycles
  1525       // overlap. In particular, the concurrent marking thread might
  1526       // be in the process of clearing the next marking bitmap (which
  1527       // we will use for the next cycle if we start one). Starting a
  1528       // cycle now will be bad given that parts of the marking
  1529       // information might get cleared by the marking thread. And we
  1530       // cannot wait for the marking thread to finish the cycle as it
  1531       // periodically yields while clearing the next marking bitmap
  1532       // and, if it's in a yield point, it's waiting for us to
  1533       // finish. So, at this point we will not start a cycle and we'll
  1534       // let the concurrent marking thread complete the last one.
  1535       ergo_verbose0(ErgoConcCycles,
  1536                     "do not initiate concurrent cycle",
  1537                     ergo_format_reason("concurrent cycle already in progress"));
  1542 class KnownGarbageClosure: public HeapRegionClosure {
  1543   G1CollectedHeap* _g1h;
  1544   CollectionSetChooser* _hrSorted;
  1546 public:
  1547   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  1548     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
  1550   bool doHeapRegion(HeapRegion* r) {
  1551     // We only include humongous regions in collection
  1552     // sets when concurrent mark shows that their contained object is
  1553     // unreachable.
  1555     // Do we have any marking information for this region?
  1556     if (r->is_marked()) {
  1557       // We will skip any region that's currently used as an old GC
  1558       // alloc region (we should not consider those for collection
  1559       // before we fill them up).
  1560       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1561         _hrSorted->add_region(r);
  1564     return false;
  1566 };
  1568 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  1569   G1CollectedHeap* _g1h;
  1570   CSetChooserParUpdater _cset_updater;
  1572 public:
  1573   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  1574                            uint chunk_size) :
  1575     _g1h(G1CollectedHeap::heap()),
  1576     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
  1578   bool doHeapRegion(HeapRegion* r) {
  1579     // Do we have any marking information for this region?
  1580     if (r->is_marked()) {
  1581       // We will skip any region that's currently used as an old GC
  1582       // alloc region (we should not consider those for collection
  1583       // before we fill them up).
  1584       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1585         _cset_updater.add_region(r);
  1588     return false;
  1590 };
  1592 class ParKnownGarbageTask: public AbstractGangTask {
  1593   CollectionSetChooser* _hrSorted;
  1594   uint _chunk_size;
  1595   G1CollectedHeap* _g1;
  1596 public:
  1597   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
  1598     AbstractGangTask("ParKnownGarbageTask"),
  1599     _hrSorted(hrSorted), _chunk_size(chunk_size),
  1600     _g1(G1CollectedHeap::heap()) { }
  1602   void work(uint worker_id) {
  1603     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
  1605     // Back to zero for the claim value.
  1606     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  1607                                          _g1->workers()->active_workers(),
  1608                                          HeapRegion::InitialClaimValue);
  1610 };
  1612 void
  1613 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  1614   _collectionSetChooser->clear();
  1616   uint region_num = _g1->n_regions();
  1617   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1618     const uint OverpartitionFactor = 4;
  1619     uint WorkUnit;
  1620     // The use of MinChunkSize = 8 in the original code
  1621     // causes some assertion failures when the total number of
  1622     // region is less than 8.  The code here tries to fix that.
  1623     // Should the original code also be fixed?
  1624     if (no_of_gc_threads > 0) {
  1625       const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
  1626       WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
  1627                       MinWorkUnit);
  1628     } else {
  1629       assert(no_of_gc_threads > 0,
  1630         "The active gc workers should be greater than 0");
  1631       // In a product build do something reasonable to avoid a crash.
  1632       const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
  1633       WorkUnit =
  1634         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
  1635              MinWorkUnit);
  1637     _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
  1638                                                            WorkUnit);
  1639     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  1640                                             (int) WorkUnit);
  1641     _g1->workers()->run_task(&parKnownGarbageTask);
  1643     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  1644            "sanity check");
  1645   } else {
  1646     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  1647     _g1->heap_region_iterate(&knownGarbagecl);
  1650   _collectionSetChooser->sort_regions();
  1652   double end_sec = os::elapsedTime();
  1653   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1654   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1655   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1656   _prev_collection_pause_end_ms += elapsed_time_ms;
  1657   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  1660 // Add the heap region at the head of the non-incremental collection set
  1661 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  1662   assert(_inc_cset_build_state == Active, "Precondition");
  1663   assert(!hr->is_young(), "non-incremental add of young region");
  1665   assert(!hr->in_collection_set(), "should not already be in the CSet");
  1666   hr->set_in_collection_set(true);
  1667   hr->set_next_in_collection_set(_collection_set);
  1668   _collection_set = hr;
  1669   _collection_set_bytes_used_before += hr->used();
  1670   _g1->register_region_with_in_cset_fast_test(hr);
  1671   size_t rs_length = hr->rem_set()->occupied();
  1672   _recorded_rs_lengths += rs_length;
  1673   _old_cset_region_length += 1;
  1676 // Initialize the per-collection-set information
  1677 void G1CollectorPolicy::start_incremental_cset_building() {
  1678   assert(_inc_cset_build_state == Inactive, "Precondition");
  1680   _inc_cset_head = NULL;
  1681   _inc_cset_tail = NULL;
  1682   _inc_cset_bytes_used_before = 0;
  1684   _inc_cset_max_finger = 0;
  1685   _inc_cset_recorded_rs_lengths = 0;
  1686   _inc_cset_recorded_rs_lengths_diffs = 0;
  1687   _inc_cset_predicted_elapsed_time_ms = 0.0;
  1688   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1689   _inc_cset_build_state = Active;
  1692 void G1CollectorPolicy::finalize_incremental_cset_building() {
  1693   assert(_inc_cset_build_state == Active, "Precondition");
  1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  1696   // The two "main" fields, _inc_cset_recorded_rs_lengths and
  1697   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  1698   // that adds a new region to the CSet. Further updates by the
  1699   // concurrent refinement thread that samples the young RSet lengths
  1700   // are accumulated in the *_diffs fields. Here we add the diffs to
  1701   // the "main" fields.
  1703   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  1704     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  1705   } else {
  1706     // This is defensive. The diff should in theory be always positive
  1707     // as RSets can only grow between GCs. However, given that we
  1708     // sample their size concurrently with other threads updating them
  1709     // it's possible that we might get the wrong size back, which
  1710     // could make the calculations somewhat inaccurate.
  1711     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  1712     if (_inc_cset_recorded_rs_lengths >= diffs) {
  1713       _inc_cset_recorded_rs_lengths -= diffs;
  1714     } else {
  1715       _inc_cset_recorded_rs_lengths = 0;
  1718   _inc_cset_predicted_elapsed_time_ms +=
  1719                                      _inc_cset_predicted_elapsed_time_ms_diffs;
  1721   _inc_cset_recorded_rs_lengths_diffs = 0;
  1722   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1725 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  1726   // This routine is used when:
  1727   // * adding survivor regions to the incremental cset at the end of an
  1728   //   evacuation pause,
  1729   // * adding the current allocation region to the incremental cset
  1730   //   when it is retired, and
  1731   // * updating existing policy information for a region in the
  1732   //   incremental cset via young list RSet sampling.
  1733   // Therefore this routine may be called at a safepoint by the
  1734   // VM thread, or in-between safepoints by mutator threads (when
  1735   // retiring the current allocation region) or a concurrent
  1736   // refine thread (RSet sampling).
  1738   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1739   size_t used_bytes = hr->used();
  1740   _inc_cset_recorded_rs_lengths += rs_length;
  1741   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  1742   _inc_cset_bytes_used_before += used_bytes;
  1744   // Cache the values we have added to the aggregated informtion
  1745   // in the heap region in case we have to remove this region from
  1746   // the incremental collection set, or it is updated by the
  1747   // rset sampling code
  1748   hr->set_recorded_rs_length(rs_length);
  1749   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  1752 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  1753                                                      size_t new_rs_length) {
  1754   // Update the CSet information that is dependent on the new RS length
  1755   assert(hr->is_young(), "Precondition");
  1756   assert(!SafepointSynchronize::is_at_safepoint(),
  1757                                                "should not be at a safepoint");
  1759   // We could have updated _inc_cset_recorded_rs_lengths and
  1760   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  1761   // that atomically, as this code is executed by a concurrent
  1762   // refinement thread, potentially concurrently with a mutator thread
  1763   // allocating a new region and also updating the same fields. To
  1764   // avoid the atomic operations we accumulate these updates on two
  1765   // separate fields (*_diffs) and we'll just add them to the "main"
  1766   // fields at the start of a GC.
  1768   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  1769   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  1770   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  1772   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  1773   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1774   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  1775   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  1777   hr->set_recorded_rs_length(new_rs_length);
  1778   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  1781 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  1782   assert(hr->is_young(), "invariant");
  1783   assert(hr->young_index_in_cset() > -1, "should have already been set");
  1784   assert(_inc_cset_build_state == Active, "Precondition");
  1786   // We need to clear and set the cached recorded/cached collection set
  1787   // information in the heap region here (before the region gets added
  1788   // to the collection set). An individual heap region's cached values
  1789   // are calculated, aggregated with the policy collection set info,
  1790   // and cached in the heap region here (initially) and (subsequently)
  1791   // by the Young List sampling code.
  1793   size_t rs_length = hr->rem_set()->occupied();
  1794   add_to_incremental_cset_info(hr, rs_length);
  1796   HeapWord* hr_end = hr->end();
  1797   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  1799   assert(!hr->in_collection_set(), "invariant");
  1800   hr->set_in_collection_set(true);
  1801   assert( hr->next_in_collection_set() == NULL, "invariant");
  1803   _g1->register_region_with_in_cset_fast_test(hr);
  1806 // Add the region at the RHS of the incremental cset
  1807 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  1808   // We should only ever be appending survivors at the end of a pause
  1809   assert( hr->is_survivor(), "Logic");
  1811   // Do the 'common' stuff
  1812   add_region_to_incremental_cset_common(hr);
  1814   // Now add the region at the right hand side
  1815   if (_inc_cset_tail == NULL) {
  1816     assert(_inc_cset_head == NULL, "invariant");
  1817     _inc_cset_head = hr;
  1818   } else {
  1819     _inc_cset_tail->set_next_in_collection_set(hr);
  1821   _inc_cset_tail = hr;
  1824 // Add the region to the LHS of the incremental cset
  1825 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  1826   // Survivors should be added to the RHS at the end of a pause
  1827   assert(!hr->is_survivor(), "Logic");
  1829   // Do the 'common' stuff
  1830   add_region_to_incremental_cset_common(hr);
  1832   // Add the region at the left hand side
  1833   hr->set_next_in_collection_set(_inc_cset_head);
  1834   if (_inc_cset_head == NULL) {
  1835     assert(_inc_cset_tail == NULL, "Invariant");
  1836     _inc_cset_tail = hr;
  1838   _inc_cset_head = hr;
  1841 #ifndef PRODUCT
  1842 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  1843   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  1845   st->print_cr("\nCollection_set:");
  1846   HeapRegion* csr = list_head;
  1847   while (csr != NULL) {
  1848     HeapRegion* next = csr->next_in_collection_set();
  1849     assert(csr->in_collection_set(), "bad CS");
  1850     st->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
  1851                  HR_FORMAT_PARAMS(csr),
  1852                  csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
  1853                  csr->age_in_surv_rate_group_cond());
  1854     csr = next;
  1857 #endif // !PRODUCT
  1859 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
  1860   // Returns the given amount of reclaimable bytes (that represents
  1861   // the amount of reclaimable space still to be collected) as a
  1862   // percentage of the current heap capacity.
  1863   size_t capacity_bytes = _g1->capacity();
  1864   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  1867 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  1868                                                 const char* false_action_str) {
  1869   CollectionSetChooser* cset_chooser = _collectionSetChooser;
  1870   if (cset_chooser->is_empty()) {
  1871     ergo_verbose0(ErgoMixedGCs,
  1872                   false_action_str,
  1873                   ergo_format_reason("candidate old regions not available"));
  1874     return false;
  1877   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
  1878   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  1879   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  1880   double threshold = (double) G1HeapWastePercent;
  1881   if (reclaimable_perc <= threshold) {
  1882     ergo_verbose4(ErgoMixedGCs,
  1883               false_action_str,
  1884               ergo_format_reason("reclaimable percentage not over threshold")
  1885               ergo_format_region("candidate old regions")
  1886               ergo_format_byte_perc("reclaimable")
  1887               ergo_format_perc("threshold"),
  1888               cset_chooser->remaining_regions(),
  1889               reclaimable_bytes,
  1890               reclaimable_perc, threshold);
  1891     return false;
  1894   ergo_verbose4(ErgoMixedGCs,
  1895                 true_action_str,
  1896                 ergo_format_reason("candidate old regions available")
  1897                 ergo_format_region("candidate old regions")
  1898                 ergo_format_byte_perc("reclaimable")
  1899                 ergo_format_perc("threshold"),
  1900                 cset_chooser->remaining_regions(),
  1901                 reclaimable_bytes,
  1902                 reclaimable_perc, threshold);
  1903   return true;
  1906 uint G1CollectorPolicy::calc_min_old_cset_length() {
  1907   // The min old CSet region bound is based on the maximum desired
  1908   // number of mixed GCs after a cycle. I.e., even if some old regions
  1909   // look expensive, we should add them to the CSet anyway to make
  1910   // sure we go through the available old regions in no more than the
  1911   // maximum desired number of mixed GCs.
  1912   //
  1913   // The calculation is based on the number of marked regions we added
  1914   // to the CSet chooser in the first place, not how many remain, so
  1915   // that the result is the same during all mixed GCs that follow a cycle.
  1917   const size_t region_num = (size_t) _collectionSetChooser->length();
  1918   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
  1919   size_t result = region_num / gc_num;
  1920   // emulate ceiling
  1921   if (result * gc_num < region_num) {
  1922     result += 1;
  1924   return (uint) result;
  1927 uint G1CollectorPolicy::calc_max_old_cset_length() {
  1928   // The max old CSet region bound is based on the threshold expressed
  1929   // as a percentage of the heap size. I.e., it should bound the
  1930   // number of old regions added to the CSet irrespective of how many
  1931   // of them are available.
  1933   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1934   const size_t region_num = g1h->n_regions();
  1935   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
  1936   size_t result = region_num * perc / 100;
  1937   // emulate ceiling
  1938   if (100 * result < region_num * perc) {
  1939     result += 1;
  1941   return (uint) result;
  1945 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
  1946   double young_start_time_sec = os::elapsedTime();
  1948   YoungList* young_list = _g1->young_list();
  1949   finalize_incremental_cset_building();
  1951   guarantee(target_pause_time_ms > 0.0,
  1952             err_msg("target_pause_time_ms = %1.6lf should be positive",
  1953                     target_pause_time_ms));
  1954   guarantee(_collection_set == NULL, "Precondition");
  1956   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  1957   double predicted_pause_time_ms = base_time_ms;
  1958   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
  1960   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
  1961                 "start choosing CSet",
  1962                 ergo_format_size("_pending_cards")
  1963                 ergo_format_ms("predicted base time")
  1964                 ergo_format_ms("remaining time")
  1965                 ergo_format_ms("target pause time"),
  1966                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
  1968   _last_gc_was_young = gcs_are_young() ? true : false;
  1970   if (_last_gc_was_young) {
  1971     _trace_gen0_time_data.increment_young_collection_count();
  1972   } else {
  1973     _trace_gen0_time_data.increment_mixed_collection_count();
  1976   // The young list is laid with the survivor regions from the previous
  1977   // pause are appended to the RHS of the young list, i.e.
  1978   //   [Newly Young Regions ++ Survivors from last pause].
  1980   uint survivor_region_length = young_list->survivor_length();
  1981   uint eden_region_length = young_list->length() - survivor_region_length;
  1982   init_cset_region_lengths(eden_region_length, survivor_region_length);
  1984   HeapRegion* hr = young_list->first_survivor_region();
  1985   while (hr != NULL) {
  1986     assert(hr->is_survivor(), "badly formed young list");
  1987     hr->set_young();
  1988     hr = hr->get_next_young_region();
  1991   // Clear the fields that point to the survivor list - they are all young now.
  1992   young_list->clear_survivors();
  1994   _collection_set = _inc_cset_head;
  1995   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  1996   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
  1997   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  1999   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2000                 "add young regions to CSet",
  2001                 ergo_format_region("eden")
  2002                 ergo_format_region("survivors")
  2003                 ergo_format_ms("predicted young region time"),
  2004                 eden_region_length, survivor_region_length,
  2005                 _inc_cset_predicted_elapsed_time_ms);
  2007   // The number of recorded young regions is the incremental
  2008   // collection set's current size
  2009   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2011   double young_end_time_sec = os::elapsedTime();
  2012   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
  2014   // Set the start of the non-young choice time.
  2015   double non_young_start_time_sec = young_end_time_sec;
  2017   if (!gcs_are_young()) {
  2018     CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2019     cset_chooser->verify();
  2020     const uint min_old_cset_length = calc_min_old_cset_length();
  2021     const uint max_old_cset_length = calc_max_old_cset_length();
  2023     uint expensive_region_num = 0;
  2024     bool check_time_remaining = adaptive_young_list_length();
  2026     HeapRegion* hr = cset_chooser->peek();
  2027     while (hr != NULL) {
  2028       if (old_cset_region_length() >= max_old_cset_length) {
  2029         // Added maximum number of old regions to the CSet.
  2030         ergo_verbose2(ErgoCSetConstruction,
  2031                       "finish adding old regions to CSet",
  2032                       ergo_format_reason("old CSet region num reached max")
  2033                       ergo_format_region("old")
  2034                       ergo_format_region("max"),
  2035                       old_cset_region_length(), max_old_cset_length);
  2036         break;
  2040       // Stop adding regions if the remaining reclaimable space is
  2041       // not above G1HeapWastePercent.
  2042       size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  2043       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  2044       double threshold = (double) G1HeapWastePercent;
  2045       if (reclaimable_perc <= threshold) {
  2046         // We've added enough old regions that the amount of uncollected
  2047         // reclaimable space is at or below the waste threshold. Stop
  2048         // adding old regions to the CSet.
  2049         ergo_verbose5(ErgoCSetConstruction,
  2050                       "finish adding old regions to CSet",
  2051                       ergo_format_reason("reclaimable percentage not over threshold")
  2052                       ergo_format_region("old")
  2053                       ergo_format_region("max")
  2054                       ergo_format_byte_perc("reclaimable")
  2055                       ergo_format_perc("threshold"),
  2056                       old_cset_region_length(),
  2057                       max_old_cset_length,
  2058                       reclaimable_bytes,
  2059                       reclaimable_perc, threshold);
  2060         break;
  2063       double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  2064       if (check_time_remaining) {
  2065         if (predicted_time_ms > time_remaining_ms) {
  2066           // Too expensive for the current CSet.
  2068           if (old_cset_region_length() >= min_old_cset_length) {
  2069             // We have added the minimum number of old regions to the CSet,
  2070             // we are done with this CSet.
  2071             ergo_verbose4(ErgoCSetConstruction,
  2072                           "finish adding old regions to CSet",
  2073                           ergo_format_reason("predicted time is too high")
  2074                           ergo_format_ms("predicted time")
  2075                           ergo_format_ms("remaining time")
  2076                           ergo_format_region("old")
  2077                           ergo_format_region("min"),
  2078                           predicted_time_ms, time_remaining_ms,
  2079                           old_cset_region_length(), min_old_cset_length);
  2080             break;
  2083           // We'll add it anyway given that we haven't reached the
  2084           // minimum number of old regions.
  2085           expensive_region_num += 1;
  2087       } else {
  2088         if (old_cset_region_length() >= min_old_cset_length) {
  2089           // In the non-auto-tuning case, we'll finish adding regions
  2090           // to the CSet if we reach the minimum.
  2091           ergo_verbose2(ErgoCSetConstruction,
  2092                         "finish adding old regions to CSet",
  2093                         ergo_format_reason("old CSet region num reached min")
  2094                         ergo_format_region("old")
  2095                         ergo_format_region("min"),
  2096                         old_cset_region_length(), min_old_cset_length);
  2097           break;
  2101       // We will add this region to the CSet.
  2102       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
  2103       predicted_pause_time_ms += predicted_time_ms;
  2104       cset_chooser->remove_and_move_to_next(hr);
  2105       _g1->old_set_remove(hr);
  2106       add_old_region_to_cset(hr);
  2108       hr = cset_chooser->peek();
  2110     if (hr == NULL) {
  2111       ergo_verbose0(ErgoCSetConstruction,
  2112                     "finish adding old regions to CSet",
  2113                     ergo_format_reason("candidate old regions not available"));
  2116     if (expensive_region_num > 0) {
  2117       // We print the information once here at the end, predicated on
  2118       // whether we added any apparently expensive regions or not, to
  2119       // avoid generating output per region.
  2120       ergo_verbose4(ErgoCSetConstruction,
  2121                     "added expensive regions to CSet",
  2122                     ergo_format_reason("old CSet region num not reached min")
  2123                     ergo_format_region("old")
  2124                     ergo_format_region("expensive")
  2125                     ergo_format_region("min")
  2126                     ergo_format_ms("remaining time"),
  2127                     old_cset_region_length(),
  2128                     expensive_region_num,
  2129                     min_old_cset_length,
  2130                     time_remaining_ms);
  2133     cset_chooser->verify();
  2136   stop_incremental_cset_building();
  2138   ergo_verbose5(ErgoCSetConstruction,
  2139                 "finish choosing CSet",
  2140                 ergo_format_region("eden")
  2141                 ergo_format_region("survivors")
  2142                 ergo_format_region("old")
  2143                 ergo_format_ms("predicted pause time")
  2144                 ergo_format_ms("target pause time"),
  2145                 eden_region_length, survivor_region_length,
  2146                 old_cset_region_length(),
  2147                 predicted_pause_time_ms, target_pause_time_ms);
  2149   double non_young_end_time_sec = os::elapsedTime();
  2150   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
  2151   evacuation_info.set_collectionset_regions(cset_region_length());
  2154 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
  2155   if(TraceGen0Time) {
  2156     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
  2160 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
  2161   if(TraceGen0Time) {
  2162     _all_yield_times_ms.add(yield_time_ms);
  2166 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
  2167   if(TraceGen0Time) {
  2168     _total.add(pause_time_ms);
  2169     _other.add(pause_time_ms - phase_times->accounted_time_ms());
  2170     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
  2171     _parallel.add(phase_times->cur_collection_par_time_ms());
  2172     _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
  2173     _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
  2174     _update_rs.add(phase_times->average_last_update_rs_time());
  2175     _scan_rs.add(phase_times->average_last_scan_rs_time());
  2176     _obj_copy.add(phase_times->average_last_obj_copy_time());
  2177     _termination.add(phase_times->average_last_termination_time());
  2179     double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
  2180       phase_times->average_last_satb_filtering_times_ms() +
  2181       phase_times->average_last_update_rs_time() +
  2182       phase_times->average_last_scan_rs_time() +
  2183       phase_times->average_last_obj_copy_time() +
  2184       + phase_times->average_last_termination_time();
  2186     double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
  2187     _parallel_other.add(parallel_other_time);
  2188     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
  2192 void TraceGen0TimeData::increment_young_collection_count() {
  2193   if(TraceGen0Time) {
  2194     ++_young_pause_num;
  2198 void TraceGen0TimeData::increment_mixed_collection_count() {
  2199   if(TraceGen0Time) {
  2200     ++_mixed_pause_num;
  2204 void TraceGen0TimeData::print_summary(const char* str,
  2205                                       const NumberSeq* seq) const {
  2206   double sum = seq->sum();
  2207   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
  2208                 str, sum / 1000.0, seq->avg());
  2211 void TraceGen0TimeData::print_summary_sd(const char* str,
  2212                                          const NumberSeq* seq) const {
  2213   print_summary(str, seq);
  2214   gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2215                 "(num", seq->num(), seq->sd(), seq->maximum());
  2218 void TraceGen0TimeData::print() const {
  2219   if (!TraceGen0Time) {
  2220     return;
  2223   gclog_or_tty->print_cr("ALL PAUSES");
  2224   print_summary_sd("   Total", &_total);
  2225   gclog_or_tty->print_cr("");
  2226   gclog_or_tty->print_cr("");
  2227   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  2228   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  2229   gclog_or_tty->print_cr("");
  2231   gclog_or_tty->print_cr("EVACUATION PAUSES");
  2233   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
  2234     gclog_or_tty->print_cr("none");
  2235   } else {
  2236     print_summary_sd("   Evacuation Pauses", &_total);
  2237     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
  2238     print_summary("      Parallel Time", &_parallel);
  2239     print_summary("         Ext Root Scanning", &_ext_root_scan);
  2240     print_summary("         SATB Filtering", &_satb_filtering);
  2241     print_summary("         Update RS", &_update_rs);
  2242     print_summary("         Scan RS", &_scan_rs);
  2243     print_summary("         Object Copy", &_obj_copy);
  2244     print_summary("         Termination", &_termination);
  2245     print_summary("         Parallel Other", &_parallel_other);
  2246     print_summary("      Clear CT", &_clear_ct);
  2247     print_summary("      Other", &_other);
  2249   gclog_or_tty->print_cr("");
  2251   gclog_or_tty->print_cr("MISC");
  2252   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
  2253   print_summary_sd("   Yields", &_all_yield_times_ms);
  2256 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
  2257   if (TraceGen1Time) {
  2258     _all_full_gc_times.add(full_gc_time_ms);
  2262 void TraceGen1TimeData::print() const {
  2263   if (!TraceGen1Time) {
  2264     return;
  2267   if (_all_full_gc_times.num() > 0) {
  2268     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2269       _all_full_gc_times.num(),
  2270       _all_full_gc_times.sum() / 1000.0);
  2271     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
  2272     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2273       _all_full_gc_times.sd(),
  2274       _all_full_gc_times.maximum());

mercurial