src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 08 Sep 2011 05:16:49 -0400

author
tonyp
date
Thu, 08 Sep 2011 05:16:49 -0400
changeset 3119
4f41766176cf
parent 3114
20213c8a3c40
child 3120
af2ab04e0038
permissions
-rw-r--r--

7084509: G1: fix inconsistencies and mistakes in the young list target length calculations
Summary: Fixed inconsistencies and mistakes in the young list target length calculations so that a) the calculated target length is optimal (before, it was not), b) other parameters like max survivor size and max gc locker eden expansion are always consistent with the calculated target length (before, they were not always), and c) the resulting target length was always bound by desired min and max values (before, it was not).
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 #define PREDICTIONS_VERBOSE 0
    41 // <NEW PREDICTION>
    43 // Different defaults for different number of GC threads
    44 // They were chosen by running GCOld and SPECjbb on debris with different
    45 //   numbers of GC threads and choosing them based on the results
    47 // all the same
    48 static double rs_length_diff_defaults[] = {
    49   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    50 };
    52 static double cost_per_card_ms_defaults[] = {
    53   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    54 };
    56 // all the same
    57 static double fully_young_cards_per_entry_ratio_defaults[] = {
    58   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    59 };
    61 static double cost_per_entry_ms_defaults[] = {
    62   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    63 };
    65 static double cost_per_byte_ms_defaults[] = {
    66   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    67 };
    69 // these should be pretty consistent
    70 static double constant_other_time_ms_defaults[] = {
    71   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    72 };
    75 static double young_other_cost_per_region_ms_defaults[] = {
    76   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    77 };
    79 static double non_young_other_cost_per_region_ms_defaults[] = {
    80   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    81 };
    83 // </NEW PREDICTION>
    85 // Help class for avoiding interleaved logging
    86 class LineBuffer: public StackObj {
    88 private:
    89   static const int BUFFER_LEN = 1024;
    90   static const int INDENT_CHARS = 3;
    91   char _buffer[BUFFER_LEN];
    92   int _indent_level;
    93   int _cur;
    95   void vappend(const char* format, va_list ap) {
    96     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    97     if (res != -1) {
    98       _cur += res;
    99     } else {
   100       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
   101       _buffer[BUFFER_LEN -1] = 0;
   102       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
   103     }
   104   }
   106 public:
   107   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   108     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   109       _buffer[_cur] = ' ';
   110     }
   111   }
   113 #ifndef PRODUCT
   114   ~LineBuffer() {
   115     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   116   }
   117 #endif
   119   void append(const char* format, ...) {
   120     va_list ap;
   121     va_start(ap, format);
   122     vappend(format, ap);
   123     va_end(ap);
   124   }
   126   void append_and_print_cr(const char* format, ...) {
   127     va_list ap;
   128     va_start(ap, format);
   129     vappend(format, ap);
   130     va_end(ap);
   131     gclog_or_tty->print_cr("%s", _buffer);
   132     _cur = _indent_level * INDENT_CHARS;
   133   }
   134 };
   136 G1CollectorPolicy::G1CollectorPolicy() :
   137   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   138                         ? ParallelGCThreads : 1),
   140   _n_pauses(0),
   141   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   142   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   143   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   144   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   145   _all_pause_times_ms(new NumberSeq()),
   146   _stop_world_start(0.0),
   147   _all_stop_world_times_ms(new NumberSeq()),
   148   _all_yield_times_ms(new NumberSeq()),
   150   _all_mod_union_times_ms(new NumberSeq()),
   152   _summary(new Summary()),
   154 #ifndef PRODUCT
   155   _cur_clear_ct_time_ms(0.0),
   156   _min_clear_cc_time_ms(-1.0),
   157   _max_clear_cc_time_ms(-1.0),
   158   _cur_clear_cc_time_ms(0.0),
   159   _cum_clear_cc_time_ms(0.0),
   160   _num_cc_clears(0L),
   161 #endif
   163   _region_num_young(0),
   164   _region_num_tenured(0),
   165   _prev_region_num_young(0),
   166   _prev_region_num_tenured(0),
   168   _aux_num(10),
   169   _all_aux_times_ms(new NumberSeq[_aux_num]),
   170   _cur_aux_start_times_ms(new double[_aux_num]),
   171   _cur_aux_times_ms(new double[_aux_num]),
   172   _cur_aux_times_set(new bool[_aux_num]),
   174   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   175   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   177   // <NEW PREDICTION>
   179   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   180   _prev_collection_pause_end_ms(0.0),
   181   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   182   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   183   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   184   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _partially_young_cards_per_entry_ratio_seq(
   186                                          new TruncatedSeq(TruncatedSeqLength)),
   187   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   188   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   189   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   190   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   191   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   192   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   193   _non_young_other_cost_per_region_ms_seq(
   194                                          new TruncatedSeq(TruncatedSeqLength)),
   196   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   197   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   198   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   200   _pause_time_target_ms((double) MaxGCPauseMillis),
   202   // </NEW PREDICTION>
   204   _full_young_gcs(true),
   205   _full_young_pause_num(0),
   206   _partial_young_pause_num(0),
   208   _during_marking(false),
   209   _in_marking_window(false),
   210   _in_marking_window_im(false),
   212   _known_garbage_ratio(0.0),
   213   _known_garbage_bytes(0),
   215   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   217    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
   219   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
   220   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
   222   _recent_avg_pause_time_ratio(0.0),
   223   _num_markings(0),
   224   _n_marks(0),
   225   _n_pauses_at_mark_end(0),
   227   _all_full_gc_times_ms(new NumberSeq()),
   229   // G1PausesBtwnConcMark defaults to -1
   230   // so the hack is to do the cast  QQQ FIXME
   231   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   232   _n_marks_since_last_pause(0),
   233   _initiate_conc_mark_if_possible(false),
   234   _during_initial_mark_pause(false),
   235   _should_revert_to_full_young_gcs(false),
   236   _last_full_young_gc(false),
   238   _eden_bytes_before_gc(0),
   239   _survivor_bytes_before_gc(0),
   240   _capacity_before_gc(0),
   242   _prev_collection_pause_used_at_end_bytes(0),
   244   _collection_set(NULL),
   245   _collection_set_size(0),
   246   _collection_set_bytes_used_before(0),
   248   // Incremental CSet attributes
   249   _inc_cset_build_state(Inactive),
   250   _inc_cset_head(NULL),
   251   _inc_cset_tail(NULL),
   252   _inc_cset_size(0),
   253   _inc_cset_young_index(0),
   254   _inc_cset_bytes_used_before(0),
   255   _inc_cset_max_finger(NULL),
   256   _inc_cset_recorded_young_bytes(0),
   257   _inc_cset_recorded_rs_lengths(0),
   258   _inc_cset_predicted_elapsed_time_ms(0.0),
   259   _inc_cset_predicted_bytes_to_copy(0),
   261 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   262 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   263 #endif // _MSC_VER
   265   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   266                                                  G1YoungSurvRateNumRegionsSummary)),
   267   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   268                                               G1YoungSurvRateNumRegionsSummary)),
   269   // add here any more surv rate groups
   270   _recorded_survivor_regions(0),
   271   _recorded_survivor_head(NULL),
   272   _recorded_survivor_tail(NULL),
   273   _survivors_age_table(true),
   275   _gc_overhead_perc(0.0) {
   277   // Set up the region size and associated fields. Given that the
   278   // policy is created before the heap, we have to set this up here,
   279   // so it's done as soon as possible.
   280   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   281   HeapRegionRemSet::setup_remset_size();
   283   G1ErgoVerbose::initialize();
   284   if (PrintAdaptiveSizePolicy) {
   285     // Currently, we only use a single switch for all the heuristics.
   286     G1ErgoVerbose::set_enabled(true);
   287     // Given that we don't currently have a verboseness level
   288     // parameter, we'll hardcode this to high. This can be easily
   289     // changed in the future.
   290     G1ErgoVerbose::set_level(ErgoHigh);
   291   } else {
   292     G1ErgoVerbose::set_enabled(false);
   293   }
   295   // Verify PLAB sizes
   296   const uint region_size = HeapRegion::GrainWords;
   297   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   298     char buffer[128];
   299     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
   300                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   301     vm_exit_during_initialization(buffer);
   302   }
   304   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   305   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   307   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   308   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   309   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   311   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   312   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   314   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   316   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   318   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   319   _par_last_termination_attempts = new double[_parallel_gc_threads];
   320   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   321   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   323   // start conservatively
   324   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   326   // <NEW PREDICTION>
   328   int index;
   329   if (ParallelGCThreads == 0)
   330     index = 0;
   331   else if (ParallelGCThreads > 8)
   332     index = 7;
   333   else
   334     index = ParallelGCThreads - 1;
   336   _pending_card_diff_seq->add(0.0);
   337   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   338   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   339   _fully_young_cards_per_entry_ratio_seq->add(
   340                             fully_young_cards_per_entry_ratio_defaults[index]);
   341   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   342   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   343   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   344   _young_other_cost_per_region_ms_seq->add(
   345                                young_other_cost_per_region_ms_defaults[index]);
   346   _non_young_other_cost_per_region_ms_seq->add(
   347                            non_young_other_cost_per_region_ms_defaults[index]);
   349   // </NEW PREDICTION>
   351   // Below, we might need to calculate the pause time target based on
   352   // the pause interval. When we do so we are going to give G1 maximum
   353   // flexibility and allow it to do pauses when it needs to. So, we'll
   354   // arrange that the pause interval to be pause time target + 1 to
   355   // ensure that a) the pause time target is maximized with respect to
   356   // the pause interval and b) we maintain the invariant that pause
   357   // time target < pause interval. If the user does not want this
   358   // maximum flexibility, they will have to set the pause interval
   359   // explicitly.
   361   // First make sure that, if either parameter is set, its value is
   362   // reasonable.
   363   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   364     if (MaxGCPauseMillis < 1) {
   365       vm_exit_during_initialization("MaxGCPauseMillis should be "
   366                                     "greater than 0");
   367     }
   368   }
   369   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   370     if (GCPauseIntervalMillis < 1) {
   371       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   372                                     "greater than 0");
   373     }
   374   }
   376   // Then, if the pause time target parameter was not set, set it to
   377   // the default value.
   378   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   379     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   380       // The default pause time target in G1 is 200ms
   381       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   382     } else {
   383       // We do not allow the pause interval to be set without the
   384       // pause time target
   385       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   386                                     "without setting MaxGCPauseMillis");
   387     }
   388   }
   390   // Then, if the interval parameter was not set, set it according to
   391   // the pause time target (this will also deal with the case when the
   392   // pause time target is the default value).
   393   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   394     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   395   }
   397   // Finally, make sure that the two parameters are consistent.
   398   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   399     char buffer[256];
   400     jio_snprintf(buffer, 256,
   401                  "MaxGCPauseMillis (%u) should be less than "
   402                  "GCPauseIntervalMillis (%u)",
   403                  MaxGCPauseMillis, GCPauseIntervalMillis);
   404     vm_exit_during_initialization(buffer);
   405   }
   407   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   408   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   409   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   410   _sigma = (double) G1ConfidencePercent / 100.0;
   412   // start conservatively (around 50ms is about right)
   413   _concurrent_mark_remark_times_ms->add(0.05);
   414   _concurrent_mark_cleanup_times_ms->add(0.20);
   415   _tenuring_threshold = MaxTenuringThreshold;
   416   // _max_survivor_regions will be calculated by
   417   // update_young_list_target_length() during initialization.
   418   _max_survivor_regions = 0;
   420   assert(GCTimeRatio > 0,
   421          "we should have set it to a default value set_g1_gc_flags() "
   422          "if a user set it to 0");
   423   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   425   uintx reserve_perc = G1ReservePercent;
   426   // Put an artificial ceiling on this so that it's not set to a silly value.
   427   if (reserve_perc > 50) {
   428     reserve_perc = 50;
   429     warning("G1ReservePercent is set to a value that is too large, "
   430             "it's been updated to %u", reserve_perc);
   431   }
   432   _reserve_factor = (double) reserve_perc / 100.0;
   433   // This will be set in calculate_reserve() when the heap is expanded
   434   // for the first time during initialization.
   435   _reserve_regions = 0;
   437   initialize_all();
   438 }
   440 // Increment "i", mod "len"
   441 static void inc_mod(int& i, int len) {
   442   i++; if (i == len) i = 0;
   443 }
   445 void G1CollectorPolicy::initialize_flags() {
   446   set_min_alignment(HeapRegion::GrainBytes);
   447   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   448   if (SurvivorRatio < 1) {
   449     vm_exit_during_initialization("Invalid survivor ratio specified");
   450   }
   451   CollectorPolicy::initialize_flags();
   452 }
   454 // The easiest way to deal with the parsing of the NewSize /
   455 // MaxNewSize / etc. parameteres is to re-use the code in the
   456 // TwoGenerationCollectorPolicy class. This is similar to what
   457 // ParallelScavenge does with its GenerationSizer class (see
   458 // ParallelScavengeHeap::initialize()). We might change this in the
   459 // future, but it's a good start.
   460 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   461   size_t size_to_region_num(size_t byte_size) {
   462     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   463   }
   465 public:
   466   G1YoungGenSizer() {
   467     initialize_flags();
   468     initialize_size_info();
   469   }
   471   size_t min_young_region_num() {
   472     return size_to_region_num(_min_gen0_size);
   473   }
   474   size_t initial_young_region_num() {
   475     return size_to_region_num(_initial_gen0_size);
   476   }
   477   size_t max_young_region_num() {
   478     return size_to_region_num(_max_gen0_size);
   479   }
   480 };
   482 void G1CollectorPolicy::init() {
   483   // Set aside an initial future to_space.
   484   _g1 = G1CollectedHeap::heap();
   486   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   488   initialize_gc_policy_counters();
   490   G1YoungGenSizer sizer;
   491   size_t initial_region_num = sizer.initial_young_region_num();
   493   if (UseAdaptiveSizePolicy) {
   494     set_adaptive_young_list_length(true);
   495     _young_list_fixed_length = 0;
   496   } else {
   497     set_adaptive_young_list_length(false);
   498     _young_list_fixed_length = initial_region_num;
   499   }
   500   _free_regions_at_end_of_collection = _g1->free_regions();
   501   update_young_list_target_length();
   503   // We may immediately start allocating regions and placing them on the
   504   // collection set list. Initialize the per-collection set info
   505   start_incremental_cset_building();
   506 }
   508 // Create the jstat counters for the policy.
   509 void G1CollectorPolicy::initialize_gc_policy_counters() {
   510   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   511 }
   513 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   514                                          double base_time_ms,
   515                                          size_t base_free_regions,
   516                                          double target_pause_time_ms) {
   517   if (young_length >= base_free_regions) {
   518     // end condition 1: not enough space for the young regions
   519     return false;
   520   }
   522   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   523   size_t bytes_to_copy =
   524                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   525   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   526   double young_other_time_ms = predict_young_other_time_ms(young_length);
   527   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   528   if (pause_time_ms > target_pause_time_ms) {
   529     // end condition 2: prediction is over the target pause time
   530     return false;
   531   }
   533   size_t free_bytes =
   534                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   535   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   536     // end condition 3: out-of-space (conservatively!)
   537     return false;
   538   }
   540   // success!
   541   return true;
   542 }
   544 void G1CollectorPolicy::calculate_reserve(size_t all_regions) {
   545   double reserve_regions_d = (double) all_regions * _reserve_factor;
   546   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   547   // smaller than 1.0) we'll get 1.
   548   _reserve_regions = (size_t) ceil(reserve_regions_d);
   549 }
   551 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   552                                                      size_t base_min_length) {
   553   size_t desired_min_length = 0;
   554   if (adaptive_young_list_length()) {
   555     if (_alloc_rate_ms_seq->num() > 3) {
   556       double now_sec = os::elapsedTime();
   557       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   558       double alloc_rate_ms = predict_alloc_rate_ms();
   559       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   560     } else {
   561       // otherwise we don't have enough info to make the prediction
   562     }
   563   }
   564   // Here, we might want to also take into account any additional
   565   // constraints (i.e., user-defined minimum bound). Currently, we don't.
   566   return base_min_length + desired_min_length;
   567 }
   569 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   570   // Here, we might want to also take into account any additional
   571   // constraints (i.e., user-defined minimum bound). Currently, we
   572   // effectively don't set this bound.
   573   return _g1->n_regions();
   574 }
   576 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   577   if (rs_lengths == (size_t) -1) {
   578     // if it's set to the default value (-1), we should predict it;
   579     // otherwise, use the given value.
   580     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   581   }
   583   // Calculate the absolute and desired min bounds.
   585   // This is how many young regions we already have (currently: the survivors).
   586   size_t base_min_length = recorded_survivor_regions();
   587   // This is the absolute minimum young length, which ensures that we
   588   // can allocate one eden region in the worst-case.
   589   size_t absolute_min_length = base_min_length + 1;
   590   size_t desired_min_length =
   591                      calculate_young_list_desired_min_length(base_min_length);
   592   if (desired_min_length < absolute_min_length) {
   593     desired_min_length = absolute_min_length;
   594   }
   596   // Calculate the absolute and desired max bounds.
   598   // We will try our best not to "eat" into the reserve.
   599   size_t absolute_max_length = 0;
   600   if (_free_regions_at_end_of_collection > _reserve_regions) {
   601     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   602   }
   603   size_t desired_max_length = calculate_young_list_desired_max_length();
   604   if (desired_max_length > absolute_max_length) {
   605     desired_max_length = absolute_max_length;
   606   }
   608   size_t young_list_target_length = 0;
   609   if (adaptive_young_list_length()) {
   610     if (full_young_gcs()) {
   611       young_list_target_length =
   612                         calculate_young_list_target_length(rs_lengths,
   613                                                            base_min_length,
   614                                                            desired_min_length,
   615                                                            desired_max_length);
   616       _rs_lengths_prediction = rs_lengths;
   617     } else {
   618       // Don't calculate anything and let the code below bound it to
   619       // the desired_min_length, i.e., do the next GC as soon as
   620       // possible to maximize how many old regions we can add to it.
   621     }
   622   } else {
   623     if (full_young_gcs()) {
   624       young_list_target_length = _young_list_fixed_length;
   625     } else {
   626       // A bit arbitrary: during partially-young GCs we allocate half
   627       // the young regions to try to add old regions to the CSet.
   628       young_list_target_length = _young_list_fixed_length / 2;
   629       // We choose to accept that we might go under the desired min
   630       // length given that we intentionally ask for a smaller young gen.
   631       desired_min_length = absolute_min_length;
   632     }
   633   }
   635   // Make sure we don't go over the desired max length, nor under the
   636   // desired min length. In case they clash, desired_min_length wins
   637   // which is why that test is second.
   638   if (young_list_target_length > desired_max_length) {
   639     young_list_target_length = desired_max_length;
   640   }
   641   if (young_list_target_length < desired_min_length) {
   642     young_list_target_length = desired_min_length;
   643   }
   645   assert(young_list_target_length > recorded_survivor_regions(),
   646          "we should be able to allocate at least one eden region");
   647   assert(young_list_target_length >= absolute_min_length, "post-condition");
   648   _young_list_target_length = young_list_target_length;
   650   update_max_gc_locker_expansion();
   651 }
   653 size_t
   654 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   655                                                    size_t base_min_length,
   656                                                    size_t desired_min_length,
   657                                                    size_t desired_max_length) {
   658   assert(adaptive_young_list_length(), "pre-condition");
   659   assert(full_young_gcs(), "only call this for fully-young GCs");
   661   // In case some edge-condition makes the desired max length too small...
   662   if (desired_max_length <= desired_min_length) {
   663     return desired_min_length;
   664   }
   666   // We'll adjust min_young_length and max_young_length not to include
   667   // the already allocated young regions (i.e., so they reflect the
   668   // min and max eden regions we'll allocate). The base_min_length
   669   // will be reflected in the predictions by the
   670   // survivor_regions_evac_time prediction.
   671   assert(desired_min_length > base_min_length, "invariant");
   672   size_t min_young_length = desired_min_length - base_min_length;
   673   assert(desired_max_length > base_min_length, "invariant");
   674   size_t max_young_length = desired_max_length - base_min_length;
   676   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   677   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   678   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   679   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   680   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   681   double base_time_ms =
   682     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   683     survivor_regions_evac_time;
   684   size_t available_free_regions = _free_regions_at_end_of_collection;
   685   size_t base_free_regions = 0;
   686   if (available_free_regions > _reserve_regions) {
   687     base_free_regions = available_free_regions - _reserve_regions;
   688   }
   690   // Here, we will make sure that the shortest young length that
   691   // makes sense fits within the target pause time.
   693   if (predict_will_fit(min_young_length, base_time_ms,
   694                        base_free_regions, target_pause_time_ms)) {
   695     // The shortest young length will fit into the target pause time;
   696     // we'll now check whether the absolute maximum number of young
   697     // regions will fit in the target pause time. If not, we'll do
   698     // a binary search between min_young_length and max_young_length.
   699     if (predict_will_fit(max_young_length, base_time_ms,
   700                          base_free_regions, target_pause_time_ms)) {
   701       // The maximum young length will fit into the target pause time.
   702       // We are done so set min young length to the maximum length (as
   703       // the result is assumed to be returned in min_young_length).
   704       min_young_length = max_young_length;
   705     } else {
   706       // The maximum possible number of young regions will not fit within
   707       // the target pause time so we'll search for the optimal
   708       // length. The loop invariants are:
   709       //
   710       // min_young_length < max_young_length
   711       // min_young_length is known to fit into the target pause time
   712       // max_young_length is known not to fit into the target pause time
   713       //
   714       // Going into the loop we know the above hold as we've just
   715       // checked them. Every time around the loop we check whether
   716       // the middle value between min_young_length and
   717       // max_young_length fits into the target pause time. If it
   718       // does, it becomes the new min. If it doesn't, it becomes
   719       // the new max. This way we maintain the loop invariants.
   721       assert(min_young_length < max_young_length, "invariant");
   722       size_t diff = (max_young_length - min_young_length) / 2;
   723       while (diff > 0) {
   724         size_t young_length = min_young_length + diff;
   725         if (predict_will_fit(young_length, base_time_ms,
   726                              base_free_regions, target_pause_time_ms)) {
   727           min_young_length = young_length;
   728         } else {
   729           max_young_length = young_length;
   730         }
   731         assert(min_young_length <  max_young_length, "invariant");
   732         diff = (max_young_length - min_young_length) / 2;
   733       }
   734       // The results is min_young_length which, according to the
   735       // loop invariants, should fit within the target pause time.
   737       // These are the post-conditions of the binary search above:
   738       assert(min_young_length < max_young_length,
   739              "otherwise we should have discovered that max_young_length "
   740              "fits into the pause target and not done the binary search");
   741       assert(predict_will_fit(min_young_length, base_time_ms,
   742                               base_free_regions, target_pause_time_ms),
   743              "min_young_length, the result of the binary search, should "
   744              "fit into the pause target");
   745       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   746                                base_free_regions, target_pause_time_ms),
   747              "min_young_length, the result of the binary search, should be "
   748              "optimal, so no larger length should fit into the pause target");
   749     }
   750   } else {
   751     // Even the minimum length doesn't fit into the pause time
   752     // target, return it as the result nevertheless.
   753   }
   754   return base_min_length + min_young_length;
   755 }
   757 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   758   double survivor_regions_evac_time = 0.0;
   759   for (HeapRegion * r = _recorded_survivor_head;
   760        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   761        r = r->get_next_young_region()) {
   762     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   763   }
   764   return survivor_regions_evac_time;
   765 }
   767 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   768   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   770   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   771   if (rs_lengths > _rs_lengths_prediction) {
   772     // add 10% to avoid having to recalculate often
   773     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   774     update_young_list_target_length(rs_lengths_prediction);
   775   }
   776 }
   780 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   781                                                bool is_tlab,
   782                                                bool* gc_overhead_limit_was_exceeded) {
   783   guarantee(false, "Not using this policy feature yet.");
   784   return NULL;
   785 }
   787 // This method controls how a collector handles one or more
   788 // of its generations being fully allocated.
   789 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   790                                                        bool is_tlab) {
   791   guarantee(false, "Not using this policy feature yet.");
   792   return NULL;
   793 }
   796 #ifndef PRODUCT
   797 bool G1CollectorPolicy::verify_young_ages() {
   798   HeapRegion* head = _g1->young_list()->first_region();
   799   return
   800     verify_young_ages(head, _short_lived_surv_rate_group);
   801   // also call verify_young_ages on any additional surv rate groups
   802 }
   804 bool
   805 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   806                                      SurvRateGroup *surv_rate_group) {
   807   guarantee( surv_rate_group != NULL, "pre-condition" );
   809   const char* name = surv_rate_group->name();
   810   bool ret = true;
   811   int prev_age = -1;
   813   for (HeapRegion* curr = head;
   814        curr != NULL;
   815        curr = curr->get_next_young_region()) {
   816     SurvRateGroup* group = curr->surv_rate_group();
   817     if (group == NULL && !curr->is_survivor()) {
   818       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   819       ret = false;
   820     }
   822     if (surv_rate_group == group) {
   823       int age = curr->age_in_surv_rate_group();
   825       if (age < 0) {
   826         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   827         ret = false;
   828       }
   830       if (age <= prev_age) {
   831         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   832                                "(%d, %d)", name, age, prev_age);
   833         ret = false;
   834       }
   835       prev_age = age;
   836     }
   837   }
   839   return ret;
   840 }
   841 #endif // PRODUCT
   843 void G1CollectorPolicy::record_full_collection_start() {
   844   _cur_collection_start_sec = os::elapsedTime();
   845   // Release the future to-space so that it is available for compaction into.
   846   _g1->set_full_collection();
   847 }
   849 void G1CollectorPolicy::record_full_collection_end() {
   850   // Consider this like a collection pause for the purposes of allocation
   851   // since last pause.
   852   double end_sec = os::elapsedTime();
   853   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   854   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   856   _all_full_gc_times_ms->add(full_gc_time_ms);
   858   update_recent_gc_times(end_sec, full_gc_time_ms);
   860   _g1->clear_full_collection();
   862   // "Nuke" the heuristics that control the fully/partially young GC
   863   // transitions and make sure we start with fully young GCs after the
   864   // Full GC.
   865   set_full_young_gcs(true);
   866   _last_full_young_gc = false;
   867   _should_revert_to_full_young_gcs = false;
   868   clear_initiate_conc_mark_if_possible();
   869   clear_during_initial_mark_pause();
   870   _known_garbage_bytes = 0;
   871   _known_garbage_ratio = 0.0;
   872   _in_marking_window = false;
   873   _in_marking_window_im = false;
   875   _short_lived_surv_rate_group->start_adding_regions();
   876   // also call this on any additional surv rate groups
   878   record_survivor_regions(0, NULL, NULL);
   880   _prev_region_num_young   = _region_num_young;
   881   _prev_region_num_tenured = _region_num_tenured;
   883   _free_regions_at_end_of_collection = _g1->free_regions();
   884   // Reset survivors SurvRateGroup.
   885   _survivor_surv_rate_group->reset();
   886   update_young_list_target_length();
   887 }
   889 void G1CollectorPolicy::record_stop_world_start() {
   890   _stop_world_start = os::elapsedTime();
   891 }
   893 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   894                                                       size_t start_used) {
   895   if (PrintGCDetails) {
   896     gclog_or_tty->stamp(PrintGCTimeStamps);
   897     gclog_or_tty->print("[GC pause");
   898     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   899   }
   901   // We only need to do this here as the policy will only be applied
   902   // to the GC we're about to start. so, no point is calculating this
   903   // every time we calculate / recalculate the target young length.
   904   update_survivors_policy();
   906   assert(_g1->used() == _g1->recalculate_used(),
   907          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   908                  _g1->used(), _g1->recalculate_used()));
   910   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   911   _all_stop_world_times_ms->add(s_w_t_ms);
   912   _stop_world_start = 0.0;
   914   _cur_collection_start_sec = start_time_sec;
   915   _cur_collection_pause_used_at_start_bytes = start_used;
   916   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   917   _pending_cards = _g1->pending_card_num();
   918   _max_pending_cards = _g1->max_pending_card_num();
   920   _bytes_in_collection_set_before_gc = 0;
   921   _bytes_copied_during_gc = 0;
   923   YoungList* young_list = _g1->young_list();
   924   _eden_bytes_before_gc = young_list->eden_used_bytes();
   925   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   926   _capacity_before_gc = _g1->capacity();
   928 #ifdef DEBUG
   929   // initialise these to something well known so that we can spot
   930   // if they are not set properly
   932   for (int i = 0; i < _parallel_gc_threads; ++i) {
   933     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   934     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   935     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   936     _par_last_update_rs_times_ms[i] = -1234.0;
   937     _par_last_update_rs_processed_buffers[i] = -1234.0;
   938     _par_last_scan_rs_times_ms[i] = -1234.0;
   939     _par_last_obj_copy_times_ms[i] = -1234.0;
   940     _par_last_termination_times_ms[i] = -1234.0;
   941     _par_last_termination_attempts[i] = -1234.0;
   942     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   943     _par_last_gc_worker_times_ms[i] = -1234.0;
   944   }
   945 #endif
   947   for (int i = 0; i < _aux_num; ++i) {
   948     _cur_aux_times_ms[i] = 0.0;
   949     _cur_aux_times_set[i] = false;
   950   }
   952   _satb_drain_time_set = false;
   953   _last_satb_drain_processed_buffers = -1;
   955   _last_young_gc_full = false;
   957   // do that for any other surv rate groups
   958   _short_lived_surv_rate_group->stop_adding_regions();
   959   _survivors_age_table.clear();
   961   assert( verify_young_ages(), "region age verification" );
   962 }
   964 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
   965   _mark_closure_time_ms = mark_closure_time_ms;
   966 }
   968 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   969                                                    mark_init_elapsed_time_ms) {
   970   _during_marking = true;
   971   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   972   clear_during_initial_mark_pause();
   973   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   974 }
   976 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   977   _mark_remark_start_sec = os::elapsedTime();
   978   _during_marking = false;
   979 }
   981 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   982   double end_time_sec = os::elapsedTime();
   983   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   984   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   985   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   986   _prev_collection_pause_end_ms += elapsed_time_ms;
   988   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   989 }
   991 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   992   _mark_cleanup_start_sec = os::elapsedTime();
   993 }
   995 void
   996 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
   997                                                       size_t max_live_bytes) {
   998   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
   999   record_concurrent_mark_cleanup_end_work2();
  1002 void
  1003 G1CollectorPolicy::
  1004 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
  1005                                          size_t max_live_bytes) {
  1006   if (_n_marks < 2) {
  1007     _n_marks++;
  1011 // The important thing about this is that it includes "os::elapsedTime".
  1012 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
  1013   double end_time_sec = os::elapsedTime();
  1014   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
  1015   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1016   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1017   _prev_collection_pause_end_ms += elapsed_time_ms;
  1019   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
  1021   _num_markings++;
  1022   _n_pauses_at_mark_end = _n_pauses;
  1023   _n_marks_since_last_pause++;
  1026 void
  1027 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
  1028   _should_revert_to_full_young_gcs = false;
  1029   _last_full_young_gc = true;
  1030   _in_marking_window = false;
  1033 void G1CollectorPolicy::record_concurrent_pause() {
  1034   if (_stop_world_start > 0.0) {
  1035     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
  1036     _all_yield_times_ms->add(yield_ms);
  1040 void G1CollectorPolicy::record_concurrent_pause_end() {
  1043 template<class T>
  1044 T sum_of(T* sum_arr, int start, int n, int N) {
  1045   T sum = (T)0;
  1046   for (int i = 0; i < n; i++) {
  1047     int j = (start + i) % N;
  1048     sum += sum_arr[j];
  1050   return sum;
  1053 void G1CollectorPolicy::print_par_stats(int level,
  1054                                         const char* str,
  1055                                         double* data) {
  1056   double min = data[0], max = data[0];
  1057   double total = 0.0;
  1058   LineBuffer buf(level);
  1059   buf.append("[%s (ms):", str);
  1060   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1061     double val = data[i];
  1062     if (val < min)
  1063       min = val;
  1064     if (val > max)
  1065       max = val;
  1066     total += val;
  1067     buf.append("  %3.1lf", val);
  1069   buf.append_and_print_cr("");
  1070   double avg = total / (double) ParallelGCThreads;
  1071   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1072     avg, min, max, max - min);
  1075 void G1CollectorPolicy::print_par_sizes(int level,
  1076                                         const char* str,
  1077                                         double* data) {
  1078   double min = data[0], max = data[0];
  1079   double total = 0.0;
  1080   LineBuffer buf(level);
  1081   buf.append("[%s :", str);
  1082   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1083     double val = data[i];
  1084     if (val < min)
  1085       min = val;
  1086     if (val > max)
  1087       max = val;
  1088     total += val;
  1089     buf.append(" %d", (int) val);
  1091   buf.append_and_print_cr("");
  1092   double avg = total / (double) ParallelGCThreads;
  1093   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1094     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1097 void G1CollectorPolicy::print_stats (int level,
  1098                                      const char* str,
  1099                                      double value) {
  1100   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1103 void G1CollectorPolicy::print_stats (int level,
  1104                                      const char* str,
  1105                                      int value) {
  1106   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1109 double G1CollectorPolicy::avg_value (double* data) {
  1110   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1111     double ret = 0.0;
  1112     for (uint i = 0; i < ParallelGCThreads; ++i)
  1113       ret += data[i];
  1114     return ret / (double) ParallelGCThreads;
  1115   } else {
  1116     return data[0];
  1120 double G1CollectorPolicy::max_value (double* data) {
  1121   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1122     double ret = data[0];
  1123     for (uint i = 1; i < ParallelGCThreads; ++i)
  1124       if (data[i] > ret)
  1125         ret = data[i];
  1126     return ret;
  1127   } else {
  1128     return data[0];
  1132 double G1CollectorPolicy::sum_of_values (double* data) {
  1133   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1134     double sum = 0.0;
  1135     for (uint i = 0; i < ParallelGCThreads; i++)
  1136       sum += data[i];
  1137     return sum;
  1138   } else {
  1139     return data[0];
  1143 double G1CollectorPolicy::max_sum (double* data1,
  1144                                    double* data2) {
  1145   double ret = data1[0] + data2[0];
  1147   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1148     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1149       double data = data1[i] + data2[i];
  1150       if (data > ret)
  1151         ret = data;
  1154   return ret;
  1157 // Anything below that is considered to be zero
  1158 #define MIN_TIMER_GRANULARITY 0.0000001
  1160 void G1CollectorPolicy::record_collection_pause_end() {
  1161   double end_time_sec = os::elapsedTime();
  1162   double elapsed_ms = _last_pause_time_ms;
  1163   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1164   size_t rs_size =
  1165     _cur_collection_pause_used_regions_at_start - collection_set_size();
  1166   size_t cur_used_bytes = _g1->used();
  1167   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1168   bool last_pause_included_initial_mark = false;
  1169   bool update_stats = !_g1->evacuation_failed();
  1171 #ifndef PRODUCT
  1172   if (G1YoungSurvRateVerbose) {
  1173     gclog_or_tty->print_cr("");
  1174     _short_lived_surv_rate_group->print();
  1175     // do that for any other surv rate groups too
  1177 #endif // PRODUCT
  1179   last_pause_included_initial_mark = during_initial_mark_pause();
  1180   if (last_pause_included_initial_mark)
  1181     record_concurrent_mark_init_end(0.0);
  1183   size_t marking_initiating_used_threshold =
  1184     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1186   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
  1187     assert(!last_pause_included_initial_mark, "invariant");
  1188     if (cur_used_bytes > marking_initiating_used_threshold) {
  1189       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1190         assert(!during_initial_mark_pause(), "we should not see this here");
  1192         ergo_verbose3(ErgoConcCycles,
  1193                       "request concurrent cycle initiation",
  1194                       ergo_format_reason("occupancy higher than threshold")
  1195                       ergo_format_byte("occupancy")
  1196                       ergo_format_byte_perc("threshold"),
  1197                       cur_used_bytes,
  1198                       marking_initiating_used_threshold,
  1199                       (double) InitiatingHeapOccupancyPercent);
  1201         // Note: this might have already been set, if during the last
  1202         // pause we decided to start a cycle but at the beginning of
  1203         // this pause we decided to postpone it. That's OK.
  1204         set_initiate_conc_mark_if_possible();
  1205       } else {
  1206         ergo_verbose2(ErgoConcCycles,
  1207                   "do not request concurrent cycle initiation",
  1208                   ergo_format_reason("occupancy lower than previous occupancy")
  1209                   ergo_format_byte("occupancy")
  1210                   ergo_format_byte("previous occupancy"),
  1211                   cur_used_bytes,
  1212                   _prev_collection_pause_used_at_end_bytes);
  1217   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1219   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1220                           end_time_sec, false);
  1222   guarantee(_cur_collection_pause_used_regions_at_start >=
  1223             collection_set_size(),
  1224             "Negative RS size?");
  1226   // This assert is exempted when we're doing parallel collection pauses,
  1227   // because the fragmentation caused by the parallel GC allocation buffers
  1228   // can lead to more memory being used during collection than was used
  1229   // before. Best leave this out until the fragmentation problem is fixed.
  1230   // Pauses in which evacuation failed can also lead to negative
  1231   // collections, since no space is reclaimed from a region containing an
  1232   // object whose evacuation failed.
  1233   // Further, we're now always doing parallel collection.  But I'm still
  1234   // leaving this here as a placeholder for a more precise assertion later.
  1235   // (DLD, 10/05.)
  1236   assert((true || parallel) // Always using GC LABs now.
  1237          || _g1->evacuation_failed()
  1238          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1239          "Negative collection");
  1241   size_t freed_bytes =
  1242     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1243   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1245   double survival_fraction =
  1246     (double)surviving_bytes/
  1247     (double)_collection_set_bytes_used_before;
  1249   _n_pauses++;
  1251   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1252   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1253   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1254   double update_rs_processed_buffers =
  1255     sum_of_values(_par_last_update_rs_processed_buffers);
  1256   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1257   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1258   double termination_time = avg_value(_par_last_termination_times_ms);
  1260   double parallel_known_time = update_rs_time +
  1261                                ext_root_scan_time +
  1262                                mark_stack_scan_time +
  1263                                scan_rs_time +
  1264                                obj_copy_time +
  1265                                termination_time;
  1267   double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1269   PauseSummary* summary = _summary;
  1271   if (update_stats) {
  1272     _recent_rs_scan_times_ms->add(scan_rs_time);
  1273     _recent_pause_times_ms->add(elapsed_ms);
  1274     _recent_rs_sizes->add(rs_size);
  1276     MainBodySummary* body_summary = summary->main_body_summary();
  1277     guarantee(body_summary != NULL, "should not be null!");
  1279     if (_satb_drain_time_set)
  1280       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1281     else
  1282       body_summary->record_satb_drain_time_ms(0.0);
  1284     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1285     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1286     body_summary->record_update_rs_time_ms(update_rs_time);
  1287     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1288     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1289     if (parallel) {
  1290       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1291       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1292       body_summary->record_termination_time_ms(termination_time);
  1293       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1295     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1297     // We exempt parallel collection from this check because Alloc Buffer
  1298     // fragmentation can produce negative collections.  Same with evac
  1299     // failure.
  1300     // Further, we're now always doing parallel collection.  But I'm still
  1301     // leaving this here as a placeholder for a more precise assertion later.
  1302     // (DLD, 10/05.
  1303     assert((true || parallel)
  1304            || _g1->evacuation_failed()
  1305            || surviving_bytes <= _collection_set_bytes_used_before,
  1306            "Or else negative collection!");
  1307     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
  1308     _recent_CS_bytes_surviving->add(surviving_bytes);
  1310     // this is where we update the allocation rate of the application
  1311     double app_time_ms =
  1312       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1313     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1314       // This usually happens due to the timer not having the required
  1315       // granularity. Some Linuxes are the usual culprits.
  1316       // We'll just set it to something (arbitrarily) small.
  1317       app_time_ms = 1.0;
  1319     size_t regions_allocated =
  1320       (_region_num_young - _prev_region_num_young) +
  1321       (_region_num_tenured - _prev_region_num_tenured);
  1322     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1323     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1324     _prev_region_num_young   = _region_num_young;
  1325     _prev_region_num_tenured = _region_num_tenured;
  1327     double interval_ms =
  1328       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1329     update_recent_gc_times(end_time_sec, elapsed_ms);
  1330     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1331     if (recent_avg_pause_time_ratio() < 0.0 ||
  1332         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1333 #ifndef PRODUCT
  1334       // Dump info to allow post-facto debugging
  1335       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1336       gclog_or_tty->print_cr("-------------------------------------------");
  1337       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1338       _recent_gc_times_ms->dump();
  1339       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1340       _recent_prev_end_times_for_all_gcs_sec->dump();
  1341       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1342                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1343       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1344       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1345 #endif  // !PRODUCT
  1346       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1347       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1348       if (_recent_avg_pause_time_ratio < 0.0) {
  1349         _recent_avg_pause_time_ratio = 0.0;
  1350       } else {
  1351         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1352         _recent_avg_pause_time_ratio = 1.0;
  1357   if (G1PolicyVerbose > 1) {
  1358     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
  1361   if (G1PolicyVerbose > 1) {
  1362     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
  1363                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
  1364                            "      |RS|: " SIZE_FORMAT,
  1365                            elapsed_ms, recent_avg_time_for_pauses_ms(),
  1366                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
  1367                            rs_size);
  1369     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
  1370                            "       At end " SIZE_FORMAT "K\n"
  1371                            "       garbage      : " SIZE_FORMAT "K"
  1372                            "       of     " SIZE_FORMAT "K\n"
  1373                            "       survival     : %6.2f%%  (%6.2f%% avg)",
  1374                            _cur_collection_pause_used_at_start_bytes/K,
  1375                            _g1->used()/K, freed_bytes/K,
  1376                            _collection_set_bytes_used_before/K,
  1377                            survival_fraction*100.0,
  1378                            recent_avg_survival_fraction()*100.0);
  1379     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
  1380                            recent_avg_pause_time_ratio() * 100.0);
  1383   double other_time_ms = elapsed_ms;
  1385   if (_satb_drain_time_set) {
  1386     other_time_ms -= _cur_satb_drain_time_ms;
  1389   if (parallel) {
  1390     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
  1391   } else {
  1392     other_time_ms -=
  1393       update_rs_time +
  1394       ext_root_scan_time + mark_stack_scan_time +
  1395       scan_rs_time + obj_copy_time;
  1398   if (PrintGCDetails) {
  1399     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1400                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1401                            elapsed_ms / 1000.0);
  1403     if (_satb_drain_time_set) {
  1404       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1406     if (_last_satb_drain_processed_buffers >= 0) {
  1407       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
  1409     if (parallel) {
  1410       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1411       print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
  1412       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1413       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1414       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1415       print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
  1416       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1417       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1418       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1419       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1420       print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
  1422       for (int i = 0; i < _parallel_gc_threads; i++) {
  1423         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1425       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
  1427       print_stats(2, "Parallel Other", parallel_other_time);
  1428       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1429     } else {
  1430       print_stats(1, "Update RS", update_rs_time);
  1431       print_stats(2, "Processed Buffers",
  1432                   (int)update_rs_processed_buffers);
  1433       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1434       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1435       print_stats(1, "Scan RS", scan_rs_time);
  1436       print_stats(1, "Object Copying", obj_copy_time);
  1438 #ifndef PRODUCT
  1439     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1440     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1441     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1442     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1443     if (_num_cc_clears > 0) {
  1444       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1446 #endif
  1447     print_stats(1, "Other", other_time_ms);
  1448     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
  1450     for (int i = 0; i < _aux_num; ++i) {
  1451       if (_cur_aux_times_set[i]) {
  1452         char buffer[96];
  1453         sprintf(buffer, "Aux%d", i);
  1454         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1459   _all_pause_times_ms->add(elapsed_ms);
  1460   if (update_stats) {
  1461     summary->record_total_time_ms(elapsed_ms);
  1462     summary->record_other_time_ms(other_time_ms);
  1464   for (int i = 0; i < _aux_num; ++i)
  1465     if (_cur_aux_times_set[i])
  1466       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1468   // Reset marks-between-pauses counter.
  1469   _n_marks_since_last_pause = 0;
  1471   // Update the efficiency-since-mark vars.
  1472   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1473   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1474     // This usually happens due to the timer not having the required
  1475     // granularity. Some Linuxes are the usual culprits.
  1476     // We'll just set it to something (arbitrarily) small.
  1477     proc_ms = 1.0;
  1479   double cur_efficiency = (double) freed_bytes / proc_ms;
  1481   bool new_in_marking_window = _in_marking_window;
  1482   bool new_in_marking_window_im = false;
  1483   if (during_initial_mark_pause()) {
  1484     new_in_marking_window = true;
  1485     new_in_marking_window_im = true;
  1488   if (_last_full_young_gc) {
  1489     ergo_verbose2(ErgoPartiallyYoungGCs,
  1490                   "start partially-young GCs",
  1491                   ergo_format_byte_perc("known garbage"),
  1492                   _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1493     set_full_young_gcs(false);
  1494     _last_full_young_gc = false;
  1497   if ( !_last_young_gc_full ) {
  1498     if (_should_revert_to_full_young_gcs) {
  1499       ergo_verbose2(ErgoPartiallyYoungGCs,
  1500                     "end partially-young GCs",
  1501                     ergo_format_reason("partially-young GCs end requested")
  1502                     ergo_format_byte_perc("known garbage"),
  1503                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1504       set_full_young_gcs(true);
  1505     } else if (_known_garbage_ratio < 0.05) {
  1506       ergo_verbose3(ErgoPartiallyYoungGCs,
  1507                "end partially-young GCs",
  1508                ergo_format_reason("known garbage percent lower than threshold")
  1509                ergo_format_byte_perc("known garbage")
  1510                ergo_format_perc("threshold"),
  1511                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1512                0.05 * 100.0);
  1513       set_full_young_gcs(true);
  1514     } else if (adaptive_young_list_length() &&
  1515               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1516       ergo_verbose5(ErgoPartiallyYoungGCs,
  1517                     "end partially-young GCs",
  1518                     ergo_format_reason("current GC efficiency lower than "
  1519                                        "predicted fully-young GC efficiency")
  1520                     ergo_format_double("GC efficiency factor")
  1521                     ergo_format_double("current GC efficiency")
  1522                     ergo_format_double("predicted fully-young GC efficiency")
  1523                     ergo_format_byte_perc("known garbage"),
  1524                     get_gc_eff_factor(), cur_efficiency,
  1525                     predict_young_gc_eff(),
  1526                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1527       set_full_young_gcs(true);
  1530   _should_revert_to_full_young_gcs = false;
  1532   if (_last_young_gc_full && !_during_marking) {
  1533     _young_gc_eff_seq->add(cur_efficiency);
  1536   _short_lived_surv_rate_group->start_adding_regions();
  1537   // do that for any other surv rate groupsx
  1539   // <NEW PREDICTION>
  1541   if (update_stats) {
  1542     double pause_time_ms = elapsed_ms;
  1544     size_t diff = 0;
  1545     if (_max_pending_cards >= _pending_cards)
  1546       diff = _max_pending_cards - _pending_cards;
  1547     _pending_card_diff_seq->add((double) diff);
  1549     double cost_per_card_ms = 0.0;
  1550     if (_pending_cards > 0) {
  1551       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1552       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1555     size_t cards_scanned = _g1->cards_scanned();
  1557     double cost_per_entry_ms = 0.0;
  1558     if (cards_scanned > 10) {
  1559       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1560       if (_last_young_gc_full)
  1561         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1562       else
  1563         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1566     if (_max_rs_lengths > 0) {
  1567       double cards_per_entry_ratio =
  1568         (double) cards_scanned / (double) _max_rs_lengths;
  1569       if (_last_young_gc_full)
  1570         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1571       else
  1572         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1575     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1576     if (rs_length_diff >= 0)
  1577       _rs_length_diff_seq->add((double) rs_length_diff);
  1579     size_t copied_bytes = surviving_bytes;
  1580     double cost_per_byte_ms = 0.0;
  1581     if (copied_bytes > 0) {
  1582       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1583       if (_in_marking_window)
  1584         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1585       else
  1586         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1589     double all_other_time_ms = pause_time_ms -
  1590       (update_rs_time + scan_rs_time + obj_copy_time +
  1591        _mark_closure_time_ms + termination_time);
  1593     double young_other_time_ms = 0.0;
  1594     if (_recorded_young_regions > 0) {
  1595       young_other_time_ms =
  1596         _recorded_young_cset_choice_time_ms +
  1597         _recorded_young_free_cset_time_ms;
  1598       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1599                                              (double) _recorded_young_regions);
  1601     double non_young_other_time_ms = 0.0;
  1602     if (_recorded_non_young_regions > 0) {
  1603       non_young_other_time_ms =
  1604         _recorded_non_young_cset_choice_time_ms +
  1605         _recorded_non_young_free_cset_time_ms;
  1607       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1608                                          (double) _recorded_non_young_regions);
  1611     double constant_other_time_ms = all_other_time_ms -
  1612       (young_other_time_ms + non_young_other_time_ms);
  1613     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1615     double survival_ratio = 0.0;
  1616     if (_bytes_in_collection_set_before_gc > 0) {
  1617       survival_ratio = (double) _bytes_copied_during_gc /
  1618                                    (double) _bytes_in_collection_set_before_gc;
  1621     _pending_cards_seq->add((double) _pending_cards);
  1622     _scanned_cards_seq->add((double) cards_scanned);
  1623     _rs_lengths_seq->add((double) _max_rs_lengths);
  1625     double expensive_region_limit_ms =
  1626       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1627     if (expensive_region_limit_ms < 0.0) {
  1628       // this means that the other time was predicted to be longer than
  1629       // than the max pause time
  1630       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1632     _expensive_region_limit_ms = expensive_region_limit_ms;
  1634     if (PREDICTIONS_VERBOSE) {
  1635       gclog_or_tty->print_cr("");
  1636       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
  1637                     "REGIONS %d %d %d "
  1638                     "PENDING_CARDS %d %d "
  1639                     "CARDS_SCANNED %d %d "
  1640                     "RS_LENGTHS %d %d "
  1641                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
  1642                     "SURVIVAL_RATIO %1.6lf %1.6lf "
  1643                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  1644                     "OTHER_YOUNG %1.6lf %1.6lf "
  1645                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
  1646                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
  1647                     "ELAPSED %1.6lf %1.6lf ",
  1648                     _cur_collection_start_sec,
  1649                     (!_last_young_gc_full) ? 2 :
  1650                     (last_pause_included_initial_mark) ? 1 : 0,
  1651                     _recorded_region_num,
  1652                     _recorded_young_regions,
  1653                     _recorded_non_young_regions,
  1654                     _predicted_pending_cards, _pending_cards,
  1655                     _predicted_cards_scanned, cards_scanned,
  1656                     _predicted_rs_lengths, _max_rs_lengths,
  1657                     _predicted_rs_update_time_ms, update_rs_time,
  1658                     _predicted_rs_scan_time_ms, scan_rs_time,
  1659                     _predicted_survival_ratio, survival_ratio,
  1660                     _predicted_object_copy_time_ms, obj_copy_time,
  1661                     _predicted_constant_other_time_ms, constant_other_time_ms,
  1662                     _predicted_young_other_time_ms, young_other_time_ms,
  1663                     _predicted_non_young_other_time_ms,
  1664                     non_young_other_time_ms,
  1665                     _vtime_diff_ms, termination_time,
  1666                     _predicted_pause_time_ms, elapsed_ms);
  1669     if (G1PolicyVerbose > 0) {
  1670       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
  1671                     _predicted_pause_time_ms,
  1672                     (_within_target) ? "within" : "outside",
  1673                     elapsed_ms);
  1678   _in_marking_window = new_in_marking_window;
  1679   _in_marking_window_im = new_in_marking_window_im;
  1680   _free_regions_at_end_of_collection = _g1->free_regions();
  1681   update_young_list_target_length();
  1683   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1684   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1685   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1686   // </NEW PREDICTION>
  1689 #define EXT_SIZE_FORMAT "%d%s"
  1690 #define EXT_SIZE_PARAMS(bytes)                                  \
  1691   byte_size_in_proper_unit((bytes)),                            \
  1692   proper_unit_for_byte_size((bytes))
  1694 void G1CollectorPolicy::print_heap_transition() {
  1695   if (PrintGCDetails) {
  1696     YoungList* young_list = _g1->young_list();
  1697     size_t eden_bytes = young_list->eden_used_bytes();
  1698     size_t survivor_bytes = young_list->survivor_used_bytes();
  1699     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1700     size_t used = _g1->used();
  1701     size_t capacity = _g1->capacity();
  1703     gclog_or_tty->print_cr(
  1704          "   [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1705              "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1706              "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1707                      EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1708              EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1709                EXT_SIZE_PARAMS(eden_bytes),
  1710              EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1711                EXT_SIZE_PARAMS(survivor_bytes),
  1712              EXT_SIZE_PARAMS(used_before_gc),
  1713              EXT_SIZE_PARAMS(_capacity_before_gc),
  1714                EXT_SIZE_PARAMS(used),
  1715                EXT_SIZE_PARAMS(capacity));
  1716   } else if (PrintGC) {
  1717     _g1->print_size_transition(gclog_or_tty,
  1718                                _cur_collection_pause_used_at_start_bytes,
  1719                                _g1->used(), _g1->capacity());
  1723 // <NEW PREDICTION>
  1725 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1726                                                      double update_rs_processed_buffers,
  1727                                                      double goal_ms) {
  1728   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1729   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1731   if (G1UseAdaptiveConcRefinement) {
  1732     const int k_gy = 3, k_gr = 6;
  1733     const double inc_k = 1.1, dec_k = 0.9;
  1735     int g = cg1r->green_zone();
  1736     if (update_rs_time > goal_ms) {
  1737       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1738     } else {
  1739       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1740         g = (int)MAX2(g * inc_k, g + 1.0);
  1743     // Change the refinement threads params
  1744     cg1r->set_green_zone(g);
  1745     cg1r->set_yellow_zone(g * k_gy);
  1746     cg1r->set_red_zone(g * k_gr);
  1747     cg1r->reinitialize_threads();
  1749     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1750     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1751                                     cg1r->yellow_zone());
  1752     // Change the barrier params
  1753     dcqs.set_process_completed_threshold(processing_threshold);
  1754     dcqs.set_max_completed_queue(cg1r->red_zone());
  1757   int curr_queue_size = dcqs.completed_buffers_num();
  1758   if (curr_queue_size >= cg1r->yellow_zone()) {
  1759     dcqs.set_completed_queue_padding(curr_queue_size);
  1760   } else {
  1761     dcqs.set_completed_queue_padding(0);
  1763   dcqs.notify_if_necessary();
  1766 double
  1767 G1CollectorPolicy::
  1768 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1769   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1771   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1772   size_t young_num = g1h->young_list()->length();
  1773   if (young_num == 0)
  1774     return 0.0;
  1776   young_num += adjustment;
  1777   size_t pending_cards = predict_pending_cards();
  1778   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1779                       predict_rs_length_diff();
  1780   size_t card_num;
  1781   if (full_young_gcs())
  1782     card_num = predict_young_card_num(rs_lengths);
  1783   else
  1784     card_num = predict_non_young_card_num(rs_lengths);
  1785   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1786   double accum_yg_surv_rate =
  1787     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1789   size_t bytes_to_copy =
  1790     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1792   return
  1793     predict_rs_update_time_ms(pending_cards) +
  1794     predict_rs_scan_time_ms(card_num) +
  1795     predict_object_copy_time_ms(bytes_to_copy) +
  1796     predict_young_other_time_ms(young_num) +
  1797     predict_constant_other_time_ms();
  1800 double
  1801 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1802   size_t rs_length = predict_rs_length_diff();
  1803   size_t card_num;
  1804   if (full_young_gcs())
  1805     card_num = predict_young_card_num(rs_length);
  1806   else
  1807     card_num = predict_non_young_card_num(rs_length);
  1808   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1811 double
  1812 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1813                                                 size_t scanned_cards) {
  1814   return
  1815     predict_rs_update_time_ms(pending_cards) +
  1816     predict_rs_scan_time_ms(scanned_cards) +
  1817     predict_constant_other_time_ms();
  1820 double
  1821 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1822                                                   bool young) {
  1823   size_t rs_length = hr->rem_set()->occupied();
  1824   size_t card_num;
  1825   if (full_young_gcs())
  1826     card_num = predict_young_card_num(rs_length);
  1827   else
  1828     card_num = predict_non_young_card_num(rs_length);
  1829   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1831   double region_elapsed_time_ms =
  1832     predict_rs_scan_time_ms(card_num) +
  1833     predict_object_copy_time_ms(bytes_to_copy);
  1835   if (young)
  1836     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1837   else
  1838     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1840   return region_elapsed_time_ms;
  1843 size_t
  1844 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1845   size_t bytes_to_copy;
  1846   if (hr->is_marked())
  1847     bytes_to_copy = hr->max_live_bytes();
  1848   else {
  1849     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1850                "invariant" );
  1851     int age = hr->age_in_surv_rate_group();
  1852     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1853     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1856   return bytes_to_copy;
  1859 void
  1860 G1CollectorPolicy::start_recording_regions() {
  1861   _recorded_rs_lengths            = 0;
  1862   _recorded_young_regions         = 0;
  1863   _recorded_non_young_regions     = 0;
  1865 #if PREDICTIONS_VERBOSE
  1866   _recorded_marked_bytes          = 0;
  1867   _recorded_young_bytes           = 0;
  1868   _predicted_bytes_to_copy        = 0;
  1869   _predicted_rs_lengths           = 0;
  1870   _predicted_cards_scanned        = 0;
  1871 #endif // PREDICTIONS_VERBOSE
  1874 void
  1875 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  1876 #if PREDICTIONS_VERBOSE
  1877   if (!young) {
  1878     _recorded_marked_bytes += hr->max_live_bytes();
  1880   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  1881 #endif // PREDICTIONS_VERBOSE
  1883   size_t rs_length = hr->rem_set()->occupied();
  1884   _recorded_rs_lengths += rs_length;
  1887 void
  1888 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  1889   assert(!hr->is_young(), "should not call this");
  1890   ++_recorded_non_young_regions;
  1891   record_cset_region_info(hr, false);
  1894 void
  1895 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  1896   _recorded_young_regions = n_regions;
  1899 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  1900 #if PREDICTIONS_VERBOSE
  1901   _recorded_young_bytes = bytes;
  1902 #endif // PREDICTIONS_VERBOSE
  1905 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1906   _recorded_rs_lengths = rs_lengths;
  1909 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  1910   _predicted_bytes_to_copy = bytes;
  1913 void
  1914 G1CollectorPolicy::end_recording_regions() {
  1915   // The _predicted_pause_time_ms field is referenced in code
  1916   // not under PREDICTIONS_VERBOSE. Let's initialize it.
  1917   _predicted_pause_time_ms = -1.0;
  1919 #if PREDICTIONS_VERBOSE
  1920   _predicted_pending_cards = predict_pending_cards();
  1921   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  1922   if (full_young_gcs())
  1923     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
  1924   else
  1925     _predicted_cards_scanned +=
  1926       predict_non_young_card_num(_predicted_rs_lengths);
  1927   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  1929   _predicted_rs_update_time_ms =
  1930     predict_rs_update_time_ms(_g1->pending_card_num());
  1931   _predicted_rs_scan_time_ms =
  1932     predict_rs_scan_time_ms(_predicted_cards_scanned);
  1933   _predicted_object_copy_time_ms =
  1934     predict_object_copy_time_ms(_predicted_bytes_to_copy);
  1935   _predicted_constant_other_time_ms =
  1936     predict_constant_other_time_ms();
  1937   _predicted_young_other_time_ms =
  1938     predict_young_other_time_ms(_recorded_young_regions);
  1939   _predicted_non_young_other_time_ms =
  1940     predict_non_young_other_time_ms(_recorded_non_young_regions);
  1942   _predicted_pause_time_ms =
  1943     _predicted_rs_update_time_ms +
  1944     _predicted_rs_scan_time_ms +
  1945     _predicted_object_copy_time_ms +
  1946     _predicted_constant_other_time_ms +
  1947     _predicted_young_other_time_ms +
  1948     _predicted_non_young_other_time_ms;
  1949 #endif // PREDICTIONS_VERBOSE
  1952 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  1953                                                            predicted_time_ms) {
  1954   // I don't think we need to do this when in young GC mode since
  1955   // marking will be initiated next time we hit the soft limit anyway...
  1956   if (predicted_time_ms > _expensive_region_limit_ms) {
  1957     ergo_verbose2(ErgoPartiallyYoungGCs,
  1958               "request partially-young GCs end",
  1959               ergo_format_reason("predicted region time higher than threshold")
  1960               ergo_format_ms("predicted region time")
  1961               ergo_format_ms("threshold"),
  1962               predicted_time_ms, _expensive_region_limit_ms);
  1963     // no point in doing another partial one
  1964     _should_revert_to_full_young_gcs = true;
  1968 // </NEW PREDICTION>
  1971 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1972                                                double elapsed_ms) {
  1973   _recent_gc_times_ms->add(elapsed_ms);
  1974   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1975   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1978 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
  1979   if (_recent_pause_times_ms->num() == 0) {
  1980     return (double) MaxGCPauseMillis;
  1982   return _recent_pause_times_ms->avg();
  1985 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
  1986   if (_recent_rs_scan_times_ms->num() == 0) {
  1987     return (double)MaxGCPauseMillis/3.0;
  1989   return _recent_rs_scan_times_ms->avg();
  1992 int G1CollectorPolicy::number_of_recent_gcs() {
  1993   assert(_recent_rs_scan_times_ms->num() ==
  1994          _recent_pause_times_ms->num(), "Sequence out of sync");
  1995   assert(_recent_pause_times_ms->num() ==
  1996          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
  1997   assert(_recent_CS_bytes_used_before->num() ==
  1998          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
  2000   return _recent_pause_times_ms->num();
  2003 double G1CollectorPolicy::recent_avg_survival_fraction() {
  2004   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
  2005                                            _recent_CS_bytes_used_before);
  2008 double G1CollectorPolicy::last_survival_fraction() {
  2009   return last_survival_fraction_work(_recent_CS_bytes_surviving,
  2010                                      _recent_CS_bytes_used_before);
  2013 double
  2014 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
  2015                                                      TruncatedSeq* before) {
  2016   assert(surviving->num() == before->num(), "Sequence out of sync");
  2017   if (before->sum() > 0.0) {
  2018       double recent_survival_rate = surviving->sum() / before->sum();
  2019       // We exempt parallel collection from this check because Alloc Buffer
  2020       // fragmentation can produce negative collections.
  2021       // Further, we're now always doing parallel collection.  But I'm still
  2022       // leaving this here as a placeholder for a more precise assertion later.
  2023       // (DLD, 10/05.)
  2024       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2025              _g1->evacuation_failed() ||
  2026              recent_survival_rate <= 1.0, "Or bad frac");
  2027       return recent_survival_rate;
  2028   } else {
  2029     return 1.0; // Be conservative.
  2033 double
  2034 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
  2035                                                TruncatedSeq* before) {
  2036   assert(surviving->num() == before->num(), "Sequence out of sync");
  2037   if (surviving->num() > 0 && before->last() > 0.0) {
  2038     double last_survival_rate = surviving->last() / before->last();
  2039     // We exempt parallel collection from this check because Alloc Buffer
  2040     // fragmentation can produce negative collections.
  2041     // Further, we're now always doing parallel collection.  But I'm still
  2042     // leaving this here as a placeholder for a more precise assertion later.
  2043     // (DLD, 10/05.)
  2044     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2045            last_survival_rate <= 1.0, "Or bad frac");
  2046     return last_survival_rate;
  2047   } else {
  2048     return 1.0;
  2052 static const int survival_min_obs = 5;
  2053 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
  2054 static const double min_survival_rate = 0.1;
  2056 double
  2057 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
  2058                                                            double latest) {
  2059   double res = avg;
  2060   if (number_of_recent_gcs() < survival_min_obs) {
  2061     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
  2063   res = MAX2(res, latest);
  2064   res = MAX2(res, min_survival_rate);
  2065   // In the parallel case, LAB fragmentation can produce "negative
  2066   // collections"; so can evac failure.  Cap at 1.0
  2067   res = MIN2(res, 1.0);
  2068   return res;
  2071 size_t G1CollectorPolicy::expansion_amount() {
  2072   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  2073   double threshold = _gc_overhead_perc;
  2074   if (recent_gc_overhead > threshold) {
  2075     // We will double the existing space, or take
  2076     // G1ExpandByPercentOfAvailable % of the available expansion
  2077     // space, whichever is smaller, bounded below by a minimum
  2078     // expansion (unless that's all that's left.)
  2079     const size_t min_expand_bytes = 1*M;
  2080     size_t reserved_bytes = _g1->max_capacity();
  2081     size_t committed_bytes = _g1->capacity();
  2082     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  2083     size_t expand_bytes;
  2084     size_t expand_bytes_via_pct =
  2085       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  2086     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  2087     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  2088     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  2090     ergo_verbose5(ErgoHeapSizing,
  2091                   "attempt heap expansion",
  2092                   ergo_format_reason("recent GC overhead higher than "
  2093                                      "threshold after GC")
  2094                   ergo_format_perc("recent GC overhead")
  2095                   ergo_format_perc("threshold")
  2096                   ergo_format_byte("uncommitted")
  2097                   ergo_format_byte_perc("calculated expansion amount"),
  2098                   recent_gc_overhead, threshold,
  2099                   uncommitted_bytes,
  2100                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  2102     return expand_bytes;
  2103   } else {
  2104     return 0;
  2108 void G1CollectorPolicy::note_start_of_mark_thread() {
  2109   _mark_thread_startup_sec = os::elapsedTime();
  2112 class CountCSClosure: public HeapRegionClosure {
  2113   G1CollectorPolicy* _g1_policy;
  2114 public:
  2115   CountCSClosure(G1CollectorPolicy* g1_policy) :
  2116     _g1_policy(g1_policy) {}
  2117   bool doHeapRegion(HeapRegion* r) {
  2118     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  2119     return false;
  2121 };
  2123 void G1CollectorPolicy::count_CS_bytes_used() {
  2124   CountCSClosure cs_closure(this);
  2125   _g1->collection_set_iterate(&cs_closure);
  2128 void G1CollectorPolicy::print_summary (int level,
  2129                                        const char* str,
  2130                                        NumberSeq* seq) const {
  2131   double sum = seq->sum();
  2132   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  2133                 str, sum / 1000.0, seq->avg());
  2136 void G1CollectorPolicy::print_summary_sd (int level,
  2137                                           const char* str,
  2138                                           NumberSeq* seq) const {
  2139   print_summary(level, str, seq);
  2140   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2141                 seq->num(), seq->sd(), seq->maximum());
  2144 void G1CollectorPolicy::check_other_times(int level,
  2145                                         NumberSeq* other_times_ms,
  2146                                         NumberSeq* calc_other_times_ms) const {
  2147   bool should_print = false;
  2148   LineBuffer buf(level + 2);
  2150   double max_sum = MAX2(fabs(other_times_ms->sum()),
  2151                         fabs(calc_other_times_ms->sum()));
  2152   double min_sum = MIN2(fabs(other_times_ms->sum()),
  2153                         fabs(calc_other_times_ms->sum()));
  2154   double sum_ratio = max_sum / min_sum;
  2155   if (sum_ratio > 1.1) {
  2156     should_print = true;
  2157     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  2160   double max_avg = MAX2(fabs(other_times_ms->avg()),
  2161                         fabs(calc_other_times_ms->avg()));
  2162   double min_avg = MIN2(fabs(other_times_ms->avg()),
  2163                         fabs(calc_other_times_ms->avg()));
  2164   double avg_ratio = max_avg / min_avg;
  2165   if (avg_ratio > 1.1) {
  2166     should_print = true;
  2167     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  2170   if (other_times_ms->sum() < -0.01) {
  2171     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  2174   if (other_times_ms->avg() < -0.01) {
  2175     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  2178   if (calc_other_times_ms->sum() < -0.01) {
  2179     should_print = true;
  2180     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  2183   if (calc_other_times_ms->avg() < -0.01) {
  2184     should_print = true;
  2185     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  2188   if (should_print)
  2189     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2192 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2193   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2194   MainBodySummary*    body_summary = summary->main_body_summary();
  2195   if (summary->get_total_seq()->num() > 0) {
  2196     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2197     if (body_summary != NULL) {
  2198       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  2199       if (parallel) {
  2200         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2201         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2202         print_summary(2, "Ext Root Scanning",
  2203                       body_summary->get_ext_root_scan_seq());
  2204         print_summary(2, "Mark Stack Scanning",
  2205                       body_summary->get_mark_stack_scan_seq());
  2206         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2207         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2208         print_summary(2, "Termination", body_summary->get_termination_seq());
  2209         print_summary(2, "Other", body_summary->get_parallel_other_seq());
  2211           NumberSeq* other_parts[] = {
  2212             body_summary->get_update_rs_seq(),
  2213             body_summary->get_ext_root_scan_seq(),
  2214             body_summary->get_mark_stack_scan_seq(),
  2215             body_summary->get_scan_rs_seq(),
  2216             body_summary->get_obj_copy_seq(),
  2217             body_summary->get_termination_seq()
  2218           };
  2219           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2220                                         6, other_parts);
  2221           check_other_times(2, body_summary->get_parallel_other_seq(),
  2222                             &calc_other_times_ms);
  2224         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2225         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2226       } else {
  2227         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2228         print_summary(1, "Ext Root Scanning",
  2229                       body_summary->get_ext_root_scan_seq());
  2230         print_summary(1, "Mark Stack Scanning",
  2231                       body_summary->get_mark_stack_scan_seq());
  2232         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2233         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2236     print_summary(1, "Other", summary->get_other_seq());
  2238       if (body_summary != NULL) {
  2239         NumberSeq calc_other_times_ms;
  2240         if (parallel) {
  2241           // parallel
  2242           NumberSeq* other_parts[] = {
  2243             body_summary->get_satb_drain_seq(),
  2244             body_summary->get_parallel_seq(),
  2245             body_summary->get_clear_ct_seq()
  2246           };
  2247           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2248                                                 3, other_parts);
  2249         } else {
  2250           // serial
  2251           NumberSeq* other_parts[] = {
  2252             body_summary->get_satb_drain_seq(),
  2253             body_summary->get_update_rs_seq(),
  2254             body_summary->get_ext_root_scan_seq(),
  2255             body_summary->get_mark_stack_scan_seq(),
  2256             body_summary->get_scan_rs_seq(),
  2257             body_summary->get_obj_copy_seq()
  2258           };
  2259           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2260                                                 6, other_parts);
  2262         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2265   } else {
  2266     LineBuffer(1).append_and_print_cr("none");
  2268   LineBuffer(0).append_and_print_cr("");
  2271 void G1CollectorPolicy::print_tracing_info() const {
  2272   if (TraceGen0Time) {
  2273     gclog_or_tty->print_cr("ALL PAUSES");
  2274     print_summary_sd(0, "Total", _all_pause_times_ms);
  2275     gclog_or_tty->print_cr("");
  2276     gclog_or_tty->print_cr("");
  2277     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
  2278     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
  2279     gclog_or_tty->print_cr("");
  2281     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2282     print_summary(_summary);
  2284     gclog_or_tty->print_cr("MISC");
  2285     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2286     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2287     for (int i = 0; i < _aux_num; ++i) {
  2288       if (_all_aux_times_ms[i].num() > 0) {
  2289         char buffer[96];
  2290         sprintf(buffer, "Aux%d", i);
  2291         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2295     size_t all_region_num = _region_num_young + _region_num_tenured;
  2296     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
  2297                "Tenured %8d (%6.2lf%%)",
  2298                all_region_num,
  2299                _region_num_young,
  2300                (double) _region_num_young / (double) all_region_num * 100.0,
  2301                _region_num_tenured,
  2302                (double) _region_num_tenured / (double) all_region_num * 100.0);
  2304   if (TraceGen1Time) {
  2305     if (_all_full_gc_times_ms->num() > 0) {
  2306       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2307                  _all_full_gc_times_ms->num(),
  2308                  _all_full_gc_times_ms->sum() / 1000.0);
  2309       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2310       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2311                     _all_full_gc_times_ms->sd(),
  2312                     _all_full_gc_times_ms->maximum());
  2317 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2318 #ifndef PRODUCT
  2319   _short_lived_surv_rate_group->print_surv_rate_summary();
  2320   // add this call for any other surv rate groups
  2321 #endif // PRODUCT
  2324 void G1CollectorPolicy::update_region_num(bool young) {
  2325   if (young) {
  2326     ++_region_num_young;
  2327   } else {
  2328     ++_region_num_tenured;
  2332 #ifndef PRODUCT
  2333 // for debugging, bit of a hack...
  2334 static char*
  2335 region_num_to_mbs(int length) {
  2336   static char buffer[64];
  2337   double bytes = (double) (length * HeapRegion::GrainBytes);
  2338   double mbs = bytes / (double) (1024 * 1024);
  2339   sprintf(buffer, "%7.2lfMB", mbs);
  2340   return buffer;
  2342 #endif // PRODUCT
  2344 size_t G1CollectorPolicy::max_regions(int purpose) {
  2345   switch (purpose) {
  2346     case GCAllocForSurvived:
  2347       return _max_survivor_regions;
  2348     case GCAllocForTenured:
  2349       return REGIONS_UNLIMITED;
  2350     default:
  2351       ShouldNotReachHere();
  2352       return REGIONS_UNLIMITED;
  2353   };
  2356 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2357   size_t expansion_region_num = 0;
  2358   if (GCLockerEdenExpansionPercent > 0) {
  2359     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2360     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2361     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2362     // less than 1.0) we'll get 1.
  2363     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2364   } else {
  2365     assert(expansion_region_num == 0, "sanity");
  2367   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2368   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2371 // Calculates survivor space parameters.
  2372 void G1CollectorPolicy::update_survivors_policy() {
  2373   double max_survivor_regions_d =
  2374                  (double) _young_list_target_length / (double) SurvivorRatio;
  2375   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2376   // smaller than 1.0) we'll get 1.
  2377   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2379   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2380         HeapRegion::GrainWords * _max_survivor_regions);
  2383 #ifndef PRODUCT
  2384 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2385   CollectionSetChooser* _chooser;
  2386 public:
  2387   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2388     _chooser(chooser) {}
  2390   bool doHeapRegion(HeapRegion* r) {
  2391     if (!r->continuesHumongous()) {
  2392       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2394     return false;
  2396 };
  2398 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
  2399   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2400   _g1->heap_region_iterate(&cl);
  2401   return true;
  2403 #endif
  2405 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2406                                                      GCCause::Cause gc_cause) {
  2407   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2408   if (!during_cycle) {
  2409     ergo_verbose1(ErgoConcCycles,
  2410                   "request concurrent cycle initiation",
  2411                   ergo_format_reason("requested by GC cause")
  2412                   ergo_format_str("GC cause"),
  2413                   GCCause::to_string(gc_cause));
  2414     set_initiate_conc_mark_if_possible();
  2415     return true;
  2416   } else {
  2417     ergo_verbose1(ErgoConcCycles,
  2418                   "do not request concurrent cycle initiation",
  2419                   ergo_format_reason("concurrent cycle already in progress")
  2420                   ergo_format_str("GC cause"),
  2421                   GCCause::to_string(gc_cause));
  2422     return false;
  2426 void
  2427 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2428   // We are about to decide on whether this pause will be an
  2429   // initial-mark pause.
  2431   // First, during_initial_mark_pause() should not be already set. We
  2432   // will set it here if we have to. However, it should be cleared by
  2433   // the end of the pause (it's only set for the duration of an
  2434   // initial-mark pause).
  2435   assert(!during_initial_mark_pause(), "pre-condition");
  2437   if (initiate_conc_mark_if_possible()) {
  2438     // We had noticed on a previous pause that the heap occupancy has
  2439     // gone over the initiating threshold and we should start a
  2440     // concurrent marking cycle. So we might initiate one.
  2442     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2443     if (!during_cycle) {
  2444       // The concurrent marking thread is not "during a cycle", i.e.,
  2445       // it has completed the last one. So we can go ahead and
  2446       // initiate a new cycle.
  2448       set_during_initial_mark_pause();
  2450       // And we can now clear initiate_conc_mark_if_possible() as
  2451       // we've already acted on it.
  2452       clear_initiate_conc_mark_if_possible();
  2454       ergo_verbose0(ErgoConcCycles,
  2455                   "initiate concurrent cycle",
  2456                   ergo_format_reason("concurrent cycle initiation requested"));
  2457     } else {
  2458       // The concurrent marking thread is still finishing up the
  2459       // previous cycle. If we start one right now the two cycles
  2460       // overlap. In particular, the concurrent marking thread might
  2461       // be in the process of clearing the next marking bitmap (which
  2462       // we will use for the next cycle if we start one). Starting a
  2463       // cycle now will be bad given that parts of the marking
  2464       // information might get cleared by the marking thread. And we
  2465       // cannot wait for the marking thread to finish the cycle as it
  2466       // periodically yields while clearing the next marking bitmap
  2467       // and, if it's in a yield point, it's waiting for us to
  2468       // finish. So, at this point we will not start a cycle and we'll
  2469       // let the concurrent marking thread complete the last one.
  2470       ergo_verbose0(ErgoConcCycles,
  2471                     "do not initiate concurrent cycle",
  2472                     ergo_format_reason("concurrent cycle already in progress"));
  2477 void
  2478 G1CollectorPolicy_BestRegionsFirst::
  2479 record_collection_pause_start(double start_time_sec, size_t start_used) {
  2480   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
  2483 class KnownGarbageClosure: public HeapRegionClosure {
  2484   CollectionSetChooser* _hrSorted;
  2486 public:
  2487   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2488     _hrSorted(hrSorted)
  2489   {}
  2491   bool doHeapRegion(HeapRegion* r) {
  2492     // We only include humongous regions in collection
  2493     // sets when concurrent mark shows that their contained object is
  2494     // unreachable.
  2496     // Do we have any marking information for this region?
  2497     if (r->is_marked()) {
  2498       // We don't include humongous regions in collection
  2499       // sets because we collect them immediately at the end of a marking
  2500       // cycle.  We also don't include young regions because we *must*
  2501       // include them in the next collection pause.
  2502       if (!r->isHumongous() && !r->is_young()) {
  2503         _hrSorted->addMarkedHeapRegion(r);
  2506     return false;
  2508 };
  2510 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2511   CollectionSetChooser* _hrSorted;
  2512   jint _marked_regions_added;
  2513   jint _chunk_size;
  2514   jint _cur_chunk_idx;
  2515   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2516   int _worker;
  2517   int _invokes;
  2519   void get_new_chunk() {
  2520     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2521     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2523   void add_region(HeapRegion* r) {
  2524     if (_cur_chunk_idx == _cur_chunk_end) {
  2525       get_new_chunk();
  2527     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2528     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2529     _marked_regions_added++;
  2530     _cur_chunk_idx++;
  2533 public:
  2534   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2535                            jint chunk_size,
  2536                            int worker) :
  2537     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2538     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2539     _invokes(0)
  2540   {}
  2542   bool doHeapRegion(HeapRegion* r) {
  2543     // We only include humongous regions in collection
  2544     // sets when concurrent mark shows that their contained object is
  2545     // unreachable.
  2546     _invokes++;
  2548     // Do we have any marking information for this region?
  2549     if (r->is_marked()) {
  2550       // We don't include humongous regions in collection
  2551       // sets because we collect them immediately at the end of a marking
  2552       // cycle.
  2553       // We also do not include young regions in collection sets
  2554       if (!r->isHumongous() && !r->is_young()) {
  2555         add_region(r);
  2558     return false;
  2560   jint marked_regions_added() { return _marked_regions_added; }
  2561   int invokes() { return _invokes; }
  2562 };
  2564 class ParKnownGarbageTask: public AbstractGangTask {
  2565   CollectionSetChooser* _hrSorted;
  2566   jint _chunk_size;
  2567   G1CollectedHeap* _g1;
  2568 public:
  2569   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2570     AbstractGangTask("ParKnownGarbageTask"),
  2571     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2572     _g1(G1CollectedHeap::heap())
  2573   {}
  2575   void work(int i) {
  2576     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2577     // Back to zero for the claim value.
  2578     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2579                                          HeapRegion::InitialClaimValue);
  2580     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2581     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2582     if (G1PrintParCleanupStats) {
  2583       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2584                  i, parKnownGarbageCl.invokes(), regions_added);
  2587 };
  2589 void
  2590 G1CollectorPolicy_BestRegionsFirst::
  2591 record_concurrent_mark_cleanup_end(size_t freed_bytes,
  2592                                    size_t max_live_bytes) {
  2593   double start;
  2594   if (G1PrintParCleanupStats) start = os::elapsedTime();
  2595   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  2597   _collectionSetChooser->clearMarkedHeapRegions();
  2598   double clear_marked_end;
  2599   if (G1PrintParCleanupStats) {
  2600     clear_marked_end = os::elapsedTime();
  2601     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
  2602                   (clear_marked_end - start)*1000.0);
  2604   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2605     const size_t OverpartitionFactor = 4;
  2606     const size_t MinWorkUnit = 8;
  2607     const size_t WorkUnit =
  2608       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2609            MinWorkUnit);
  2610     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2611                                                              WorkUnit);
  2612     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2613                                             (int) WorkUnit);
  2614     _g1->workers()->run_task(&parKnownGarbageTask);
  2616     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2617            "sanity check");
  2618   } else {
  2619     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2620     _g1->heap_region_iterate(&knownGarbagecl);
  2622   double known_garbage_end;
  2623   if (G1PrintParCleanupStats) {
  2624     known_garbage_end = os::elapsedTime();
  2625     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2626                   (known_garbage_end - clear_marked_end)*1000.0);
  2628   _collectionSetChooser->sortMarkedHeapRegions();
  2629   double sort_end;
  2630   if (G1PrintParCleanupStats) {
  2631     sort_end = os::elapsedTime();
  2632     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2633                   (sort_end - known_garbage_end)*1000.0);
  2636   record_concurrent_mark_cleanup_end_work2();
  2637   double work2_end;
  2638   if (G1PrintParCleanupStats) {
  2639     work2_end = os::elapsedTime();
  2640     gclog_or_tty->print_cr("  work2: %8.3f ms.",
  2641                   (work2_end - sort_end)*1000.0);
  2645 // Add the heap region at the head of the non-incremental collection set
  2646 void G1CollectorPolicy::
  2647 add_to_collection_set(HeapRegion* hr) {
  2648   assert(_inc_cset_build_state == Active, "Precondition");
  2649   assert(!hr->is_young(), "non-incremental add of young region");
  2651   if (_g1->mark_in_progress())
  2652     _g1->concurrent_mark()->registerCSetRegion(hr);
  2654   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2655   hr->set_in_collection_set(true);
  2656   hr->set_next_in_collection_set(_collection_set);
  2657   _collection_set = hr;
  2658   _collection_set_size++;
  2659   _collection_set_bytes_used_before += hr->used();
  2660   _g1->register_region_with_in_cset_fast_test(hr);
  2663 // Initialize the per-collection-set information
  2664 void G1CollectorPolicy::start_incremental_cset_building() {
  2665   assert(_inc_cset_build_state == Inactive, "Precondition");
  2667   _inc_cset_head = NULL;
  2668   _inc_cset_tail = NULL;
  2669   _inc_cset_size = 0;
  2670   _inc_cset_bytes_used_before = 0;
  2672   _inc_cset_young_index = 0;
  2674   _inc_cset_max_finger = 0;
  2675   _inc_cset_recorded_young_bytes = 0;
  2676   _inc_cset_recorded_rs_lengths = 0;
  2677   _inc_cset_predicted_elapsed_time_ms = 0;
  2678   _inc_cset_predicted_bytes_to_copy = 0;
  2679   _inc_cset_build_state = Active;
  2682 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2683   // This routine is used when:
  2684   // * adding survivor regions to the incremental cset at the end of an
  2685   //   evacuation pause,
  2686   // * adding the current allocation region to the incremental cset
  2687   //   when it is retired, and
  2688   // * updating existing policy information for a region in the
  2689   //   incremental cset via young list RSet sampling.
  2690   // Therefore this routine may be called at a safepoint by the
  2691   // VM thread, or in-between safepoints by mutator threads (when
  2692   // retiring the current allocation region) or a concurrent
  2693   // refine thread (RSet sampling).
  2695   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2696   size_t used_bytes = hr->used();
  2698   _inc_cset_recorded_rs_lengths += rs_length;
  2699   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2701   _inc_cset_bytes_used_before += used_bytes;
  2703   // Cache the values we have added to the aggregated informtion
  2704   // in the heap region in case we have to remove this region from
  2705   // the incremental collection set, or it is updated by the
  2706   // rset sampling code
  2707   hr->set_recorded_rs_length(rs_length);
  2708   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2710 #if PREDICTIONS_VERBOSE
  2711   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  2712   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  2714   // Record the number of bytes used in this region
  2715   _inc_cset_recorded_young_bytes += used_bytes;
  2717   // Cache the values we have added to the aggregated informtion
  2718   // in the heap region in case we have to remove this region from
  2719   // the incremental collection set, or it is updated by the
  2720   // rset sampling code
  2721   hr->set_predicted_bytes_to_copy(bytes_to_copy);
  2722 #endif // PREDICTIONS_VERBOSE
  2725 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2726   // This routine is currently only called as part of the updating of
  2727   // existing policy information for regions in the incremental cset that
  2728   // is performed by the concurrent refine thread(s) as part of young list
  2729   // RSet sampling. Therefore we should not be at a safepoint.
  2731   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2732   assert(hr->is_young(), "it should be");
  2734   size_t used_bytes = hr->used();
  2735   size_t old_rs_length = hr->recorded_rs_length();
  2736   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2738   // Subtract the old recorded/predicted policy information for
  2739   // the given heap region from the collection set info.
  2740   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2741   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2743   _inc_cset_bytes_used_before -= used_bytes;
  2745   // Clear the values cached in the heap region
  2746   hr->set_recorded_rs_length(0);
  2747   hr->set_predicted_elapsed_time_ms(0);
  2749 #if PREDICTIONS_VERBOSE
  2750   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  2751   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  2753   // Subtract the number of bytes used in this region
  2754   _inc_cset_recorded_young_bytes -= used_bytes;
  2756   // Clear the values cached in the heap region
  2757   hr->set_predicted_bytes_to_copy(0);
  2758 #endif // PREDICTIONS_VERBOSE
  2761 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2762   // Update the collection set information that is dependent on the new RS length
  2763   assert(hr->is_young(), "Precondition");
  2765   remove_from_incremental_cset_info(hr);
  2766   add_to_incremental_cset_info(hr, new_rs_length);
  2769 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2770   assert( hr->is_young(), "invariant");
  2771   assert( hr->young_index_in_cset() == -1, "invariant" );
  2772   assert(_inc_cset_build_state == Active, "Precondition");
  2774   // We need to clear and set the cached recorded/cached collection set
  2775   // information in the heap region here (before the region gets added
  2776   // to the collection set). An individual heap region's cached values
  2777   // are calculated, aggregated with the policy collection set info,
  2778   // and cached in the heap region here (initially) and (subsequently)
  2779   // by the Young List sampling code.
  2781   size_t rs_length = hr->rem_set()->occupied();
  2782   add_to_incremental_cset_info(hr, rs_length);
  2784   HeapWord* hr_end = hr->end();
  2785   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2787   assert(!hr->in_collection_set(), "invariant");
  2788   hr->set_in_collection_set(true);
  2789   assert( hr->next_in_collection_set() == NULL, "invariant");
  2791   _inc_cset_size++;
  2792   _g1->register_region_with_in_cset_fast_test(hr);
  2794   hr->set_young_index_in_cset((int) _inc_cset_young_index);
  2795   ++_inc_cset_young_index;
  2798 // Add the region at the RHS of the incremental cset
  2799 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2800   // We should only ever be appending survivors at the end of a pause
  2801   assert( hr->is_survivor(), "Logic");
  2803   // Do the 'common' stuff
  2804   add_region_to_incremental_cset_common(hr);
  2806   // Now add the region at the right hand side
  2807   if (_inc_cset_tail == NULL) {
  2808     assert(_inc_cset_head == NULL, "invariant");
  2809     _inc_cset_head = hr;
  2810   } else {
  2811     _inc_cset_tail->set_next_in_collection_set(hr);
  2813   _inc_cset_tail = hr;
  2816 // Add the region to the LHS of the incremental cset
  2817 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2818   // Survivors should be added to the RHS at the end of a pause
  2819   assert(!hr->is_survivor(), "Logic");
  2821   // Do the 'common' stuff
  2822   add_region_to_incremental_cset_common(hr);
  2824   // Add the region at the left hand side
  2825   hr->set_next_in_collection_set(_inc_cset_head);
  2826   if (_inc_cset_head == NULL) {
  2827     assert(_inc_cset_tail == NULL, "Invariant");
  2828     _inc_cset_tail = hr;
  2830   _inc_cset_head = hr;
  2833 #ifndef PRODUCT
  2834 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2835   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2837   st->print_cr("\nCollection_set:");
  2838   HeapRegion* csr = list_head;
  2839   while (csr != NULL) {
  2840     HeapRegion* next = csr->next_in_collection_set();
  2841     assert(csr->in_collection_set(), "bad CS");
  2842     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2843                  "age: %4d, y: %d, surv: %d",
  2844                         csr->bottom(), csr->end(),
  2845                         csr->top(),
  2846                         csr->prev_top_at_mark_start(),
  2847                         csr->next_top_at_mark_start(),
  2848                         csr->top_at_conc_mark_count(),
  2849                         csr->age_in_surv_rate_group_cond(),
  2850                         csr->is_young(),
  2851                         csr->is_survivor());
  2852     csr = next;
  2855 #endif // !PRODUCT
  2857 void
  2858 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
  2859                                                   double target_pause_time_ms) {
  2860   // Set this here - in case we're not doing young collections.
  2861   double non_young_start_time_sec = os::elapsedTime();
  2863   YoungList* young_list = _g1->young_list();
  2865   start_recording_regions();
  2867   guarantee(target_pause_time_ms > 0.0,
  2868             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2869                     target_pause_time_ms));
  2870   guarantee(_collection_set == NULL, "Precondition");
  2872   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2873   double predicted_pause_time_ms = base_time_ms;
  2875   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2877   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2878                 "start choosing CSet",
  2879                 ergo_format_ms("predicted base time")
  2880                 ergo_format_ms("remaining time")
  2881                 ergo_format_ms("target pause time"),
  2882                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2884   // the 10% and 50% values are arbitrary...
  2885   double threshold = 0.10 * target_pause_time_ms;
  2886   if (time_remaining_ms < threshold) {
  2887     double prev_time_remaining_ms = time_remaining_ms;
  2888     time_remaining_ms = 0.50 * target_pause_time_ms;
  2889     _within_target = false;
  2890     ergo_verbose3(ErgoCSetConstruction,
  2891                   "adjust remaining time",
  2892                   ergo_format_reason("remaining time lower than threshold")
  2893                   ergo_format_ms("remaining time")
  2894                   ergo_format_ms("threshold")
  2895                   ergo_format_ms("adjusted remaining time"),
  2896                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2897   } else {
  2898     _within_target = true;
  2901   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2903   HeapRegion* hr;
  2904   double young_start_time_sec = os::elapsedTime();
  2906   _collection_set_bytes_used_before = 0;
  2907   _collection_set_size = 0;
  2908   _young_cset_length  = 0;
  2909   _last_young_gc_full = full_young_gcs() ? true : false;
  2911   if (_last_young_gc_full) {
  2912     ++_full_young_pause_num;
  2913   } else {
  2914     ++_partial_young_pause_num;
  2917   // The young list is laid with the survivor regions from the previous
  2918   // pause are appended to the RHS of the young list, i.e.
  2919   //   [Newly Young Regions ++ Survivors from last pause].
  2921   size_t survivor_region_num = young_list->survivor_length();
  2922   size_t eden_region_num = young_list->length() - survivor_region_num;
  2923   size_t old_region_num = 0;
  2924   hr = young_list->first_survivor_region();
  2925   while (hr != NULL) {
  2926     assert(hr->is_survivor(), "badly formed young list");
  2927     hr->set_young();
  2928     hr = hr->get_next_young_region();
  2931   // Clear the fields that point to the survivor list - they are all young now.
  2932   young_list->clear_survivors();
  2934   if (_g1->mark_in_progress())
  2935     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2937   _young_cset_length = _inc_cset_young_index;
  2938   _collection_set = _inc_cset_head;
  2939   _collection_set_size = _inc_cset_size;
  2940   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2941   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2942   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2944   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2945                 "add young regions to CSet",
  2946                 ergo_format_region("eden")
  2947                 ergo_format_region("survivors")
  2948                 ergo_format_ms("predicted young region time"),
  2949                 eden_region_num, survivor_region_num,
  2950                 _inc_cset_predicted_elapsed_time_ms);
  2952   // The number of recorded young regions is the incremental
  2953   // collection set's current size
  2954   set_recorded_young_regions(_inc_cset_size);
  2955   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2956   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  2957 #if PREDICTIONS_VERBOSE
  2958   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  2959 #endif // PREDICTIONS_VERBOSE
  2961   assert(_inc_cset_size == young_list->length(), "Invariant");
  2963   double young_end_time_sec = os::elapsedTime();
  2964   _recorded_young_cset_choice_time_ms =
  2965     (young_end_time_sec - young_start_time_sec) * 1000.0;
  2967   // We are doing young collections so reset this.
  2968   non_young_start_time_sec = young_end_time_sec;
  2970   if (!full_young_gcs()) {
  2971     bool should_continue = true;
  2972     NumberSeq seq;
  2973     double avg_prediction = 100000000000000000.0; // something very large
  2975     size_t prev_collection_set_size = _collection_set_size;
  2976     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  2977     do {
  2978       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  2979                                                       avg_prediction);
  2980       if (hr != NULL) {
  2981         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2982         time_remaining_ms -= predicted_time_ms;
  2983         predicted_pause_time_ms += predicted_time_ms;
  2984         add_to_collection_set(hr);
  2985         record_non_young_cset_region(hr);
  2986         seq.add(predicted_time_ms);
  2987         avg_prediction = seq.avg() + seq.sd();
  2990       should_continue = true;
  2991       if (hr == NULL) {
  2992         // No need for an ergo verbose message here,
  2993         // getNextMarkRegion() does this when it returns NULL.
  2994         should_continue = false;
  2995       } else {
  2996         if (adaptive_young_list_length()) {
  2997           if (time_remaining_ms < 0.0) {
  2998             ergo_verbose1(ErgoCSetConstruction,
  2999                           "stop adding old regions to CSet",
  3000                           ergo_format_reason("remaining time is lower than 0")
  3001                           ergo_format_ms("remaining time"),
  3002                           time_remaining_ms);
  3003             should_continue = false;
  3005         } else {
  3006           if (_collection_set_size < _young_list_fixed_length) {
  3007             ergo_verbose2(ErgoCSetConstruction,
  3008                           "stop adding old regions to CSet",
  3009                           ergo_format_reason("CSet length lower than target")
  3010                           ergo_format_region("CSet")
  3011                           ergo_format_region("young target"),
  3012                           _collection_set_size, _young_list_fixed_length);
  3013             should_continue = false;
  3017     } while (should_continue);
  3019     if (!adaptive_young_list_length() &&
  3020         _collection_set_size < _young_list_fixed_length) {
  3021       ergo_verbose2(ErgoCSetConstruction,
  3022                     "request partially-young GCs end",
  3023                     ergo_format_reason("CSet length lower than target")
  3024                     ergo_format_region("CSet")
  3025                     ergo_format_region("young target"),
  3026                     _collection_set_size, _young_list_fixed_length);
  3027       _should_revert_to_full_young_gcs  = true;
  3030     old_region_num = _collection_set_size - prev_collection_set_size;
  3032     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  3033                   "add old regions to CSet",
  3034                   ergo_format_region("old")
  3035                   ergo_format_ms("predicted old region time"),
  3036                   old_region_num,
  3037                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  3040   stop_incremental_cset_building();
  3042   count_CS_bytes_used();
  3044   end_recording_regions();
  3046   ergo_verbose5(ErgoCSetConstruction,
  3047                 "finish choosing CSet",
  3048                 ergo_format_region("eden")
  3049                 ergo_format_region("survivors")
  3050                 ergo_format_region("old")
  3051                 ergo_format_ms("predicted pause time")
  3052                 ergo_format_ms("target pause time"),
  3053                 eden_region_num, survivor_region_num, old_region_num,
  3054                 predicted_pause_time_ms, target_pause_time_ms);
  3056   double non_young_end_time_sec = os::elapsedTime();
  3057   _recorded_non_young_cset_choice_time_ms =
  3058     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
  3061 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
  3062   G1CollectorPolicy::record_full_collection_end();
  3063   _collectionSetChooser->updateAfterFullCollection();
  3066 void G1CollectorPolicy_BestRegionsFirst::
  3067 record_collection_pause_end() {
  3068   G1CollectorPolicy::record_collection_pause_end();
  3069   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");

mercurial