src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 26 Oct 2011 08:44:53 +0200

author
brutisso
date
Wed, 26 Oct 2011 08:44:53 +0200
changeset 3221
dbfcbecbb2dc
parent 3219
c6a6e936dc68
child 3268
8aae2050e83e
permissions
-rw-r--r--

7102191: G1: assert(_min_desired_young_length <= initial_region_num) failed: Initial young gen size too small
Summary: initial_region_num actually not needed.
Reviewed-by: tonyp, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 #define PREDICTIONS_VERBOSE 0
    41 // <NEW PREDICTION>
    43 // Different defaults for different number of GC threads
    44 // They were chosen by running GCOld and SPECjbb on debris with different
    45 //   numbers of GC threads and choosing them based on the results
    47 // all the same
    48 static double rs_length_diff_defaults[] = {
    49   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    50 };
    52 static double cost_per_card_ms_defaults[] = {
    53   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    54 };
    56 // all the same
    57 static double fully_young_cards_per_entry_ratio_defaults[] = {
    58   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    59 };
    61 static double cost_per_entry_ms_defaults[] = {
    62   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    63 };
    65 static double cost_per_byte_ms_defaults[] = {
    66   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    67 };
    69 // these should be pretty consistent
    70 static double constant_other_time_ms_defaults[] = {
    71   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    72 };
    75 static double young_other_cost_per_region_ms_defaults[] = {
    76   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    77 };
    79 static double non_young_other_cost_per_region_ms_defaults[] = {
    80   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    81 };
    83 // </NEW PREDICTION>
    85 // Help class for avoiding interleaved logging
    86 class LineBuffer: public StackObj {
    88 private:
    89   static const int BUFFER_LEN = 1024;
    90   static const int INDENT_CHARS = 3;
    91   char _buffer[BUFFER_LEN];
    92   int _indent_level;
    93   int _cur;
    95   void vappend(const char* format, va_list ap) {
    96     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    97     if (res != -1) {
    98       _cur += res;
    99     } else {
   100       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
   101       _buffer[BUFFER_LEN -1] = 0;
   102       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
   103     }
   104   }
   106 public:
   107   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   108     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   109       _buffer[_cur] = ' ';
   110     }
   111   }
   113 #ifndef PRODUCT
   114   ~LineBuffer() {
   115     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   116   }
   117 #endif
   119   void append(const char* format, ...) {
   120     va_list ap;
   121     va_start(ap, format);
   122     vappend(format, ap);
   123     va_end(ap);
   124   }
   126   void append_and_print_cr(const char* format, ...) {
   127     va_list ap;
   128     va_start(ap, format);
   129     vappend(format, ap);
   130     va_end(ap);
   131     gclog_or_tty->print_cr("%s", _buffer);
   132     _cur = _indent_level * INDENT_CHARS;
   133   }
   134 };
   136 G1CollectorPolicy::G1CollectorPolicy() :
   137   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   138                         ? ParallelGCThreads : 1),
   140   _n_pauses(0),
   141   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   142   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   143   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   144   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   145   _all_pause_times_ms(new NumberSeq()),
   146   _stop_world_start(0.0),
   147   _all_stop_world_times_ms(new NumberSeq()),
   148   _all_yield_times_ms(new NumberSeq()),
   149   _using_new_ratio_calculations(false),
   151   _all_mod_union_times_ms(new NumberSeq()),
   153   _summary(new Summary()),
   155   _cur_clear_ct_time_ms(0.0),
   157   _cur_ref_proc_time_ms(0.0),
   158   _cur_ref_enq_time_ms(0.0),
   160 #ifndef PRODUCT
   161   _min_clear_cc_time_ms(-1.0),
   162   _max_clear_cc_time_ms(-1.0),
   163   _cur_clear_cc_time_ms(0.0),
   164   _cum_clear_cc_time_ms(0.0),
   165   _num_cc_clears(0L),
   166 #endif
   168   _region_num_young(0),
   169   _region_num_tenured(0),
   170   _prev_region_num_young(0),
   171   _prev_region_num_tenured(0),
   173   _aux_num(10),
   174   _all_aux_times_ms(new NumberSeq[_aux_num]),
   175   _cur_aux_start_times_ms(new double[_aux_num]),
   176   _cur_aux_times_ms(new double[_aux_num]),
   177   _cur_aux_times_set(new bool[_aux_num]),
   179   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   180   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   182   // <NEW PREDICTION>
   184   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _prev_collection_pause_end_ms(0.0),
   186   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   187   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   188   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   189   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   190   _partially_young_cards_per_entry_ratio_seq(
   191                                          new TruncatedSeq(TruncatedSeqLength)),
   192   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   193   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   194   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   195   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   196   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   197   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   198   _non_young_other_cost_per_region_ms_seq(
   199                                          new TruncatedSeq(TruncatedSeqLength)),
   201   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   202   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   203   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   205   _pause_time_target_ms((double) MaxGCPauseMillis),
   207   // </NEW PREDICTION>
   209   _full_young_gcs(true),
   210   _full_young_pause_num(0),
   211   _partial_young_pause_num(0),
   213   _during_marking(false),
   214   _in_marking_window(false),
   215   _in_marking_window_im(false),
   217   _known_garbage_ratio(0.0),
   218   _known_garbage_bytes(0),
   220   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   222    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
   224   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
   225   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
   227   _recent_avg_pause_time_ratio(0.0),
   229   _all_full_gc_times_ms(new NumberSeq()),
   231   // G1PausesBtwnConcMark defaults to -1
   232   // so the hack is to do the cast  QQQ FIXME
   233   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   234   _initiate_conc_mark_if_possible(false),
   235   _during_initial_mark_pause(false),
   236   _should_revert_to_full_young_gcs(false),
   237   _last_full_young_gc(false),
   239   _eden_bytes_before_gc(0),
   240   _survivor_bytes_before_gc(0),
   241   _capacity_before_gc(0),
   243   _prev_collection_pause_used_at_end_bytes(0),
   245   _collection_set(NULL),
   246   _collection_set_size(0),
   247   _collection_set_bytes_used_before(0),
   249   // Incremental CSet attributes
   250   _inc_cset_build_state(Inactive),
   251   _inc_cset_head(NULL),
   252   _inc_cset_tail(NULL),
   253   _inc_cset_size(0),
   254   _inc_cset_young_index(0),
   255   _inc_cset_bytes_used_before(0),
   256   _inc_cset_max_finger(NULL),
   257   _inc_cset_recorded_young_bytes(0),
   258   _inc_cset_recorded_rs_lengths(0),
   259   _inc_cset_predicted_elapsed_time_ms(0.0),
   260   _inc_cset_predicted_bytes_to_copy(0),
   262 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   263 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   264 #endif // _MSC_VER
   266   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   267                                                  G1YoungSurvRateNumRegionsSummary)),
   268   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   269                                               G1YoungSurvRateNumRegionsSummary)),
   270   // add here any more surv rate groups
   271   _recorded_survivor_regions(0),
   272   _recorded_survivor_head(NULL),
   273   _recorded_survivor_tail(NULL),
   274   _survivors_age_table(true),
   276   _gc_overhead_perc(0.0) {
   278   // Set up the region size and associated fields. Given that the
   279   // policy is created before the heap, we have to set this up here,
   280   // so it's done as soon as possible.
   281   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   282   HeapRegionRemSet::setup_remset_size();
   284   G1ErgoVerbose::initialize();
   285   if (PrintAdaptiveSizePolicy) {
   286     // Currently, we only use a single switch for all the heuristics.
   287     G1ErgoVerbose::set_enabled(true);
   288     // Given that we don't currently have a verboseness level
   289     // parameter, we'll hardcode this to high. This can be easily
   290     // changed in the future.
   291     G1ErgoVerbose::set_level(ErgoHigh);
   292   } else {
   293     G1ErgoVerbose::set_enabled(false);
   294   }
   296   // Verify PLAB sizes
   297   const size_t region_size = HeapRegion::GrainWords;
   298   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   299     char buffer[128];
   300     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   301                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   302     vm_exit_during_initialization(buffer);
   303   }
   305   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   306   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   308   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   309   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   310   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   312   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   313   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   315   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   317   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   319   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   320   _par_last_termination_attempts = new double[_parallel_gc_threads];
   321   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   322   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   323   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
   325   // start conservatively
   326   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   328   // <NEW PREDICTION>
   330   int index;
   331   if (ParallelGCThreads == 0)
   332     index = 0;
   333   else if (ParallelGCThreads > 8)
   334     index = 7;
   335   else
   336     index = ParallelGCThreads - 1;
   338   _pending_card_diff_seq->add(0.0);
   339   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   340   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   341   _fully_young_cards_per_entry_ratio_seq->add(
   342                             fully_young_cards_per_entry_ratio_defaults[index]);
   343   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   344   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   345   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   346   _young_other_cost_per_region_ms_seq->add(
   347                                young_other_cost_per_region_ms_defaults[index]);
   348   _non_young_other_cost_per_region_ms_seq->add(
   349                            non_young_other_cost_per_region_ms_defaults[index]);
   351   // </NEW PREDICTION>
   353   // Below, we might need to calculate the pause time target based on
   354   // the pause interval. When we do so we are going to give G1 maximum
   355   // flexibility and allow it to do pauses when it needs to. So, we'll
   356   // arrange that the pause interval to be pause time target + 1 to
   357   // ensure that a) the pause time target is maximized with respect to
   358   // the pause interval and b) we maintain the invariant that pause
   359   // time target < pause interval. If the user does not want this
   360   // maximum flexibility, they will have to set the pause interval
   361   // explicitly.
   363   // First make sure that, if either parameter is set, its value is
   364   // reasonable.
   365   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   366     if (MaxGCPauseMillis < 1) {
   367       vm_exit_during_initialization("MaxGCPauseMillis should be "
   368                                     "greater than 0");
   369     }
   370   }
   371   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   372     if (GCPauseIntervalMillis < 1) {
   373       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   374                                     "greater than 0");
   375     }
   376   }
   378   // Then, if the pause time target parameter was not set, set it to
   379   // the default value.
   380   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   381     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   382       // The default pause time target in G1 is 200ms
   383       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   384     } else {
   385       // We do not allow the pause interval to be set without the
   386       // pause time target
   387       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   388                                     "without setting MaxGCPauseMillis");
   389     }
   390   }
   392   // Then, if the interval parameter was not set, set it according to
   393   // the pause time target (this will also deal with the case when the
   394   // pause time target is the default value).
   395   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   396     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   397   }
   399   // Finally, make sure that the two parameters are consistent.
   400   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   401     char buffer[256];
   402     jio_snprintf(buffer, 256,
   403                  "MaxGCPauseMillis (%u) should be less than "
   404                  "GCPauseIntervalMillis (%u)",
   405                  MaxGCPauseMillis, GCPauseIntervalMillis);
   406     vm_exit_during_initialization(buffer);
   407   }
   409   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   410   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   411   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   412   _sigma = (double) G1ConfidencePercent / 100.0;
   414   // start conservatively (around 50ms is about right)
   415   _concurrent_mark_remark_times_ms->add(0.05);
   416   _concurrent_mark_cleanup_times_ms->add(0.20);
   417   _tenuring_threshold = MaxTenuringThreshold;
   418   // _max_survivor_regions will be calculated by
   419   // update_young_list_target_length() during initialization.
   420   _max_survivor_regions = 0;
   422   assert(GCTimeRatio > 0,
   423          "we should have set it to a default value set_g1_gc_flags() "
   424          "if a user set it to 0");
   425   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   427   uintx reserve_perc = G1ReservePercent;
   428   // Put an artificial ceiling on this so that it's not set to a silly value.
   429   if (reserve_perc > 50) {
   430     reserve_perc = 50;
   431     warning("G1ReservePercent is set to a value that is too large, "
   432             "it's been updated to %u", reserve_perc);
   433   }
   434   _reserve_factor = (double) reserve_perc / 100.0;
   435   // This will be set when the heap is expanded
   436   // for the first time during initialization.
   437   _reserve_regions = 0;
   439   initialize_all();
   440   _collectionSetChooser = new CollectionSetChooser();
   441 }
   443 // Increment "i", mod "len"
   444 static void inc_mod(int& i, int len) {
   445   i++; if (i == len) i = 0;
   446 }
   448 void G1CollectorPolicy::initialize_flags() {
   449   set_min_alignment(HeapRegion::GrainBytes);
   450   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   451   if (SurvivorRatio < 1) {
   452     vm_exit_during_initialization("Invalid survivor ratio specified");
   453   }
   454   CollectorPolicy::initialize_flags();
   455 }
   457 // The easiest way to deal with the parsing of the NewSize /
   458 // MaxNewSize / etc. parameteres is to re-use the code in the
   459 // TwoGenerationCollectorPolicy class. This is similar to what
   460 // ParallelScavenge does with its GenerationSizer class (see
   461 // ParallelScavengeHeap::initialize()). We might change this in the
   462 // future, but it's a good start.
   463 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   464 private:
   465   size_t size_to_region_num(size_t byte_size) {
   466     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   467   }
   469 public:
   470   G1YoungGenSizer() {
   471     initialize_flags();
   472     initialize_size_info();
   473   }
   474   size_t min_young_region_num() {
   475     return size_to_region_num(_min_gen0_size);
   476   }
   477   size_t initial_young_region_num() {
   478     return size_to_region_num(_initial_gen0_size);
   479   }
   480   size_t max_young_region_num() {
   481     return size_to_region_num(_max_gen0_size);
   482   }
   483 };
   485 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
   486   assert(number_of_heap_regions > 0, "Heap must be initialized");
   487   size_t young_size = number_of_heap_regions / (NewRatio + 1);
   488   _min_desired_young_length = young_size;
   489   _max_desired_young_length = young_size;
   490 }
   492 void G1CollectorPolicy::init() {
   493   // Set aside an initial future to_space.
   494   _g1 = G1CollectedHeap::heap();
   496   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   498   initialize_gc_policy_counters();
   500   G1YoungGenSizer sizer;
   501   _min_desired_young_length = sizer.min_young_region_num();
   502   _max_desired_young_length = sizer.max_young_region_num();
   504   if (FLAG_IS_CMDLINE(NewRatio)) {
   505     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   506       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   507     } else {
   508       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
   509       update_young_list_size_using_newratio(_g1->n_regions());
   510       _using_new_ratio_calculations = true;
   511     }
   512   }
   514   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   516   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
   517   if (adaptive_young_list_length()) {
   518     _young_list_fixed_length = 0;
   519   } else {
   520     assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ");
   521     _young_list_fixed_length = _min_desired_young_length;
   522   }
   523   _free_regions_at_end_of_collection = _g1->free_regions();
   524   update_young_list_target_length();
   525   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   527   // We may immediately start allocating regions and placing them on the
   528   // collection set list. Initialize the per-collection set info
   529   start_incremental_cset_building();
   530 }
   532 // Create the jstat counters for the policy.
   533 void G1CollectorPolicy::initialize_gc_policy_counters() {
   534   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   535 }
   537 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   538                                          double base_time_ms,
   539                                          size_t base_free_regions,
   540                                          double target_pause_time_ms) {
   541   if (young_length >= base_free_regions) {
   542     // end condition 1: not enough space for the young regions
   543     return false;
   544   }
   546   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   547   size_t bytes_to_copy =
   548                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   549   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   550   double young_other_time_ms = predict_young_other_time_ms(young_length);
   551   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   552   if (pause_time_ms > target_pause_time_ms) {
   553     // end condition 2: prediction is over the target pause time
   554     return false;
   555   }
   557   size_t free_bytes =
   558                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   559   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   560     // end condition 3: out-of-space (conservatively!)
   561     return false;
   562   }
   564   // success!
   565   return true;
   566 }
   568 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   569   // re-calculate the necessary reserve
   570   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   571   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   572   // smaller than 1.0) we'll get 1.
   573   _reserve_regions = (size_t) ceil(reserve_regions_d);
   575   if (_using_new_ratio_calculations) {
   576     // -XX:NewRatio was specified so we need to update the
   577     // young gen length when the heap size has changed.
   578     update_young_list_size_using_newratio(new_number_of_regions);
   579   }
   580 }
   582 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   583                                                      size_t base_min_length) {
   584   size_t desired_min_length = 0;
   585   if (adaptive_young_list_length()) {
   586     if (_alloc_rate_ms_seq->num() > 3) {
   587       double now_sec = os::elapsedTime();
   588       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   589       double alloc_rate_ms = predict_alloc_rate_ms();
   590       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   591     } else {
   592       // otherwise we don't have enough info to make the prediction
   593     }
   594   }
   595   desired_min_length += base_min_length;
   596   // make sure we don't go below any user-defined minimum bound
   597   return MAX2(_min_desired_young_length, desired_min_length);
   598 }
   600 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   601   // Here, we might want to also take into account any additional
   602   // constraints (i.e., user-defined minimum bound). Currently, we
   603   // effectively don't set this bound.
   604   return _max_desired_young_length;
   605 }
   607 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   608   if (rs_lengths == (size_t) -1) {
   609     // if it's set to the default value (-1), we should predict it;
   610     // otherwise, use the given value.
   611     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   612   }
   614   // Calculate the absolute and desired min bounds.
   616   // This is how many young regions we already have (currently: the survivors).
   617   size_t base_min_length = recorded_survivor_regions();
   618   // This is the absolute minimum young length, which ensures that we
   619   // can allocate one eden region in the worst-case.
   620   size_t absolute_min_length = base_min_length + 1;
   621   size_t desired_min_length =
   622                      calculate_young_list_desired_min_length(base_min_length);
   623   if (desired_min_length < absolute_min_length) {
   624     desired_min_length = absolute_min_length;
   625   }
   627   // Calculate the absolute and desired max bounds.
   629   // We will try our best not to "eat" into the reserve.
   630   size_t absolute_max_length = 0;
   631   if (_free_regions_at_end_of_collection > _reserve_regions) {
   632     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   633   }
   634   size_t desired_max_length = calculate_young_list_desired_max_length();
   635   if (desired_max_length > absolute_max_length) {
   636     desired_max_length = absolute_max_length;
   637   }
   639   size_t young_list_target_length = 0;
   640   if (adaptive_young_list_length()) {
   641     if (full_young_gcs()) {
   642       young_list_target_length =
   643                         calculate_young_list_target_length(rs_lengths,
   644                                                            base_min_length,
   645                                                            desired_min_length,
   646                                                            desired_max_length);
   647       _rs_lengths_prediction = rs_lengths;
   648     } else {
   649       // Don't calculate anything and let the code below bound it to
   650       // the desired_min_length, i.e., do the next GC as soon as
   651       // possible to maximize how many old regions we can add to it.
   652     }
   653   } else {
   654     if (full_young_gcs()) {
   655       young_list_target_length = _young_list_fixed_length;
   656     } else {
   657       // A bit arbitrary: during partially-young GCs we allocate half
   658       // the young regions to try to add old regions to the CSet.
   659       young_list_target_length = _young_list_fixed_length / 2;
   660       // We choose to accept that we might go under the desired min
   661       // length given that we intentionally ask for a smaller young gen.
   662       desired_min_length = absolute_min_length;
   663     }
   664   }
   666   // Make sure we don't go over the desired max length, nor under the
   667   // desired min length. In case they clash, desired_min_length wins
   668   // which is why that test is second.
   669   if (young_list_target_length > desired_max_length) {
   670     young_list_target_length = desired_max_length;
   671   }
   672   if (young_list_target_length < desired_min_length) {
   673     young_list_target_length = desired_min_length;
   674   }
   676   assert(young_list_target_length > recorded_survivor_regions(),
   677          "we should be able to allocate at least one eden region");
   678   assert(young_list_target_length >= absolute_min_length, "post-condition");
   679   _young_list_target_length = young_list_target_length;
   681   update_max_gc_locker_expansion();
   682 }
   684 size_t
   685 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   686                                                    size_t base_min_length,
   687                                                    size_t desired_min_length,
   688                                                    size_t desired_max_length) {
   689   assert(adaptive_young_list_length(), "pre-condition");
   690   assert(full_young_gcs(), "only call this for fully-young GCs");
   692   // In case some edge-condition makes the desired max length too small...
   693   if (desired_max_length <= desired_min_length) {
   694     return desired_min_length;
   695   }
   697   // We'll adjust min_young_length and max_young_length not to include
   698   // the already allocated young regions (i.e., so they reflect the
   699   // min and max eden regions we'll allocate). The base_min_length
   700   // will be reflected in the predictions by the
   701   // survivor_regions_evac_time prediction.
   702   assert(desired_min_length > base_min_length, "invariant");
   703   size_t min_young_length = desired_min_length - base_min_length;
   704   assert(desired_max_length > base_min_length, "invariant");
   705   size_t max_young_length = desired_max_length - base_min_length;
   707   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   708   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   709   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   710   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   711   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   712   double base_time_ms =
   713     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   714     survivor_regions_evac_time;
   715   size_t available_free_regions = _free_regions_at_end_of_collection;
   716   size_t base_free_regions = 0;
   717   if (available_free_regions > _reserve_regions) {
   718     base_free_regions = available_free_regions - _reserve_regions;
   719   }
   721   // Here, we will make sure that the shortest young length that
   722   // makes sense fits within the target pause time.
   724   if (predict_will_fit(min_young_length, base_time_ms,
   725                        base_free_regions, target_pause_time_ms)) {
   726     // The shortest young length will fit into the target pause time;
   727     // we'll now check whether the absolute maximum number of young
   728     // regions will fit in the target pause time. If not, we'll do
   729     // a binary search between min_young_length and max_young_length.
   730     if (predict_will_fit(max_young_length, base_time_ms,
   731                          base_free_regions, target_pause_time_ms)) {
   732       // The maximum young length will fit into the target pause time.
   733       // We are done so set min young length to the maximum length (as
   734       // the result is assumed to be returned in min_young_length).
   735       min_young_length = max_young_length;
   736     } else {
   737       // The maximum possible number of young regions will not fit within
   738       // the target pause time so we'll search for the optimal
   739       // length. The loop invariants are:
   740       //
   741       // min_young_length < max_young_length
   742       // min_young_length is known to fit into the target pause time
   743       // max_young_length is known not to fit into the target pause time
   744       //
   745       // Going into the loop we know the above hold as we've just
   746       // checked them. Every time around the loop we check whether
   747       // the middle value between min_young_length and
   748       // max_young_length fits into the target pause time. If it
   749       // does, it becomes the new min. If it doesn't, it becomes
   750       // the new max. This way we maintain the loop invariants.
   752       assert(min_young_length < max_young_length, "invariant");
   753       size_t diff = (max_young_length - min_young_length) / 2;
   754       while (diff > 0) {
   755         size_t young_length = min_young_length + diff;
   756         if (predict_will_fit(young_length, base_time_ms,
   757                              base_free_regions, target_pause_time_ms)) {
   758           min_young_length = young_length;
   759         } else {
   760           max_young_length = young_length;
   761         }
   762         assert(min_young_length <  max_young_length, "invariant");
   763         diff = (max_young_length - min_young_length) / 2;
   764       }
   765       // The results is min_young_length which, according to the
   766       // loop invariants, should fit within the target pause time.
   768       // These are the post-conditions of the binary search above:
   769       assert(min_young_length < max_young_length,
   770              "otherwise we should have discovered that max_young_length "
   771              "fits into the pause target and not done the binary search");
   772       assert(predict_will_fit(min_young_length, base_time_ms,
   773                               base_free_regions, target_pause_time_ms),
   774              "min_young_length, the result of the binary search, should "
   775              "fit into the pause target");
   776       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   777                                base_free_regions, target_pause_time_ms),
   778              "min_young_length, the result of the binary search, should be "
   779              "optimal, so no larger length should fit into the pause target");
   780     }
   781   } else {
   782     // Even the minimum length doesn't fit into the pause time
   783     // target, return it as the result nevertheless.
   784   }
   785   return base_min_length + min_young_length;
   786 }
   788 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   789   double survivor_regions_evac_time = 0.0;
   790   for (HeapRegion * r = _recorded_survivor_head;
   791        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   792        r = r->get_next_young_region()) {
   793     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   794   }
   795   return survivor_regions_evac_time;
   796 }
   798 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   799   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   801   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   802   if (rs_lengths > _rs_lengths_prediction) {
   803     // add 10% to avoid having to recalculate often
   804     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   805     update_young_list_target_length(rs_lengths_prediction);
   806   }
   807 }
   811 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   812                                                bool is_tlab,
   813                                                bool* gc_overhead_limit_was_exceeded) {
   814   guarantee(false, "Not using this policy feature yet.");
   815   return NULL;
   816 }
   818 // This method controls how a collector handles one or more
   819 // of its generations being fully allocated.
   820 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   821                                                        bool is_tlab) {
   822   guarantee(false, "Not using this policy feature yet.");
   823   return NULL;
   824 }
   827 #ifndef PRODUCT
   828 bool G1CollectorPolicy::verify_young_ages() {
   829   HeapRegion* head = _g1->young_list()->first_region();
   830   return
   831     verify_young_ages(head, _short_lived_surv_rate_group);
   832   // also call verify_young_ages on any additional surv rate groups
   833 }
   835 bool
   836 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   837                                      SurvRateGroup *surv_rate_group) {
   838   guarantee( surv_rate_group != NULL, "pre-condition" );
   840   const char* name = surv_rate_group->name();
   841   bool ret = true;
   842   int prev_age = -1;
   844   for (HeapRegion* curr = head;
   845        curr != NULL;
   846        curr = curr->get_next_young_region()) {
   847     SurvRateGroup* group = curr->surv_rate_group();
   848     if (group == NULL && !curr->is_survivor()) {
   849       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   850       ret = false;
   851     }
   853     if (surv_rate_group == group) {
   854       int age = curr->age_in_surv_rate_group();
   856       if (age < 0) {
   857         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   858         ret = false;
   859       }
   861       if (age <= prev_age) {
   862         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   863                                "(%d, %d)", name, age, prev_age);
   864         ret = false;
   865       }
   866       prev_age = age;
   867     }
   868   }
   870   return ret;
   871 }
   872 #endif // PRODUCT
   874 void G1CollectorPolicy::record_full_collection_start() {
   875   _cur_collection_start_sec = os::elapsedTime();
   876   // Release the future to-space so that it is available for compaction into.
   877   _g1->set_full_collection();
   878 }
   880 void G1CollectorPolicy::record_full_collection_end() {
   881   // Consider this like a collection pause for the purposes of allocation
   882   // since last pause.
   883   double end_sec = os::elapsedTime();
   884   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   885   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   887   _all_full_gc_times_ms->add(full_gc_time_ms);
   889   update_recent_gc_times(end_sec, full_gc_time_ms);
   891   _g1->clear_full_collection();
   893   // "Nuke" the heuristics that control the fully/partially young GC
   894   // transitions and make sure we start with fully young GCs after the
   895   // Full GC.
   896   set_full_young_gcs(true);
   897   _last_full_young_gc = false;
   898   _should_revert_to_full_young_gcs = false;
   899   clear_initiate_conc_mark_if_possible();
   900   clear_during_initial_mark_pause();
   901   _known_garbage_bytes = 0;
   902   _known_garbage_ratio = 0.0;
   903   _in_marking_window = false;
   904   _in_marking_window_im = false;
   906   _short_lived_surv_rate_group->start_adding_regions();
   907   // also call this on any additional surv rate groups
   909   record_survivor_regions(0, NULL, NULL);
   911   _prev_region_num_young   = _region_num_young;
   912   _prev_region_num_tenured = _region_num_tenured;
   914   _free_regions_at_end_of_collection = _g1->free_regions();
   915   // Reset survivors SurvRateGroup.
   916   _survivor_surv_rate_group->reset();
   917   update_young_list_target_length();
   918   _collectionSetChooser->updateAfterFullCollection();
   919 }
   921 void G1CollectorPolicy::record_stop_world_start() {
   922   _stop_world_start = os::elapsedTime();
   923 }
   925 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   926                                                       size_t start_used) {
   927   if (PrintGCDetails) {
   928     gclog_or_tty->stamp(PrintGCTimeStamps);
   929     gclog_or_tty->print("[GC pause");
   930     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   931   }
   933   // We only need to do this here as the policy will only be applied
   934   // to the GC we're about to start. so, no point is calculating this
   935   // every time we calculate / recalculate the target young length.
   936   update_survivors_policy();
   938   assert(_g1->used() == _g1->recalculate_used(),
   939          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   940                  _g1->used(), _g1->recalculate_used()));
   942   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   943   _all_stop_world_times_ms->add(s_w_t_ms);
   944   _stop_world_start = 0.0;
   946   _cur_collection_start_sec = start_time_sec;
   947   _cur_collection_pause_used_at_start_bytes = start_used;
   948   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   949   _pending_cards = _g1->pending_card_num();
   950   _max_pending_cards = _g1->max_pending_card_num();
   952   _bytes_in_collection_set_before_gc = 0;
   953   _bytes_copied_during_gc = 0;
   955   YoungList* young_list = _g1->young_list();
   956   _eden_bytes_before_gc = young_list->eden_used_bytes();
   957   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   958   _capacity_before_gc = _g1->capacity();
   960 #ifdef DEBUG
   961   // initialise these to something well known so that we can spot
   962   // if they are not set properly
   964   for (int i = 0; i < _parallel_gc_threads; ++i) {
   965     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   966     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   967     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   968     _par_last_update_rs_times_ms[i] = -1234.0;
   969     _par_last_update_rs_processed_buffers[i] = -1234.0;
   970     _par_last_scan_rs_times_ms[i] = -1234.0;
   971     _par_last_obj_copy_times_ms[i] = -1234.0;
   972     _par_last_termination_times_ms[i] = -1234.0;
   973     _par_last_termination_attempts[i] = -1234.0;
   974     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   975     _par_last_gc_worker_times_ms[i] = -1234.0;
   976     _par_last_gc_worker_other_times_ms[i] = -1234.0;
   977   }
   978 #endif
   980   for (int i = 0; i < _aux_num; ++i) {
   981     _cur_aux_times_ms[i] = 0.0;
   982     _cur_aux_times_set[i] = false;
   983   }
   985   // These are initialized to zero here and they are set during
   986   // the evacuation pause if marking is in progress.
   987   _cur_satb_drain_time_ms = 0.0;
   988   _last_satb_drain_processed_buffers = 0;
   990   _last_young_gc_full = false;
   992   // do that for any other surv rate groups
   993   _short_lived_surv_rate_group->stop_adding_regions();
   994   _survivors_age_table.clear();
   996   assert( verify_young_ages(), "region age verification" );
   997 }
   999 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
  1000   _mark_closure_time_ms = mark_closure_time_ms;
  1003 void G1CollectorPolicy::record_concurrent_mark_init_end(double
  1004                                                    mark_init_elapsed_time_ms) {
  1005   _during_marking = true;
  1006   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
  1007   clear_during_initial_mark_pause();
  1008   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
  1011 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
  1012   _mark_remark_start_sec = os::elapsedTime();
  1013   _during_marking = false;
  1016 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
  1017   double end_time_sec = os::elapsedTime();
  1018   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
  1019   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
  1020   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1021   _prev_collection_pause_end_ms += elapsed_time_ms;
  1023   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
  1026 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
  1027   _mark_cleanup_start_sec = os::elapsedTime();
  1030 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
  1031   _should_revert_to_full_young_gcs = false;
  1032   _last_full_young_gc = true;
  1033   _in_marking_window = false;
  1036 void G1CollectorPolicy::record_concurrent_pause() {
  1037   if (_stop_world_start > 0.0) {
  1038     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
  1039     _all_yield_times_ms->add(yield_ms);
  1043 void G1CollectorPolicy::record_concurrent_pause_end() {
  1046 template<class T>
  1047 T sum_of(T* sum_arr, int start, int n, int N) {
  1048   T sum = (T)0;
  1049   for (int i = 0; i < n; i++) {
  1050     int j = (start + i) % N;
  1051     sum += sum_arr[j];
  1053   return sum;
  1056 void G1CollectorPolicy::print_par_stats(int level,
  1057                                         const char* str,
  1058                                         double* data) {
  1059   double min = data[0], max = data[0];
  1060   double total = 0.0;
  1061   LineBuffer buf(level);
  1062   buf.append("[%s (ms):", str);
  1063   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1064     double val = data[i];
  1065     if (val < min)
  1066       min = val;
  1067     if (val > max)
  1068       max = val;
  1069     total += val;
  1070     buf.append("  %3.1lf", val);
  1072   buf.append_and_print_cr("");
  1073   double avg = total / (double) ParallelGCThreads;
  1074   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1075     avg, min, max, max - min);
  1078 void G1CollectorPolicy::print_par_sizes(int level,
  1079                                         const char* str,
  1080                                         double* data) {
  1081   double min = data[0], max = data[0];
  1082   double total = 0.0;
  1083   LineBuffer buf(level);
  1084   buf.append("[%s :", str);
  1085   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1086     double val = data[i];
  1087     if (val < min)
  1088       min = val;
  1089     if (val > max)
  1090       max = val;
  1091     total += val;
  1092     buf.append(" %d", (int) val);
  1094   buf.append_and_print_cr("");
  1095   double avg = total / (double) ParallelGCThreads;
  1096   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1097     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1100 void G1CollectorPolicy::print_stats(int level,
  1101                                     const char* str,
  1102                                     double value) {
  1103   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1106 void G1CollectorPolicy::print_stats(int level,
  1107                                     const char* str,
  1108                                     int value) {
  1109   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1112 double G1CollectorPolicy::avg_value(double* data) {
  1113   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1114     double ret = 0.0;
  1115     for (uint i = 0; i < ParallelGCThreads; ++i) {
  1116       ret += data[i];
  1118     return ret / (double) ParallelGCThreads;
  1119   } else {
  1120     return data[0];
  1124 double G1CollectorPolicy::max_value(double* data) {
  1125   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1126     double ret = data[0];
  1127     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1128       if (data[i] > ret) {
  1129         ret = data[i];
  1132     return ret;
  1133   } else {
  1134     return data[0];
  1138 double G1CollectorPolicy::sum_of_values(double* data) {
  1139   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1140     double sum = 0.0;
  1141     for (uint i = 0; i < ParallelGCThreads; i++) {
  1142       sum += data[i];
  1144     return sum;
  1145   } else {
  1146     return data[0];
  1150 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
  1151   double ret = data1[0] + data2[0];
  1153   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1154     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1155       double data = data1[i] + data2[i];
  1156       if (data > ret) {
  1157         ret = data;
  1161   return ret;
  1164 // Anything below that is considered to be zero
  1165 #define MIN_TIMER_GRANULARITY 0.0000001
  1167 void G1CollectorPolicy::record_collection_pause_end() {
  1168   double end_time_sec = os::elapsedTime();
  1169   double elapsed_ms = _last_pause_time_ms;
  1170   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1171   size_t rs_size =
  1172     _cur_collection_pause_used_regions_at_start - collection_set_size();
  1173   size_t cur_used_bytes = _g1->used();
  1174   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1175   bool last_pause_included_initial_mark = false;
  1176   bool update_stats = !_g1->evacuation_failed();
  1178 #ifndef PRODUCT
  1179   if (G1YoungSurvRateVerbose) {
  1180     gclog_or_tty->print_cr("");
  1181     _short_lived_surv_rate_group->print();
  1182     // do that for any other surv rate groups too
  1184 #endif // PRODUCT
  1186   last_pause_included_initial_mark = during_initial_mark_pause();
  1187   if (last_pause_included_initial_mark)
  1188     record_concurrent_mark_init_end(0.0);
  1190   size_t marking_initiating_used_threshold =
  1191     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1193   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
  1194     assert(!last_pause_included_initial_mark, "invariant");
  1195     if (cur_used_bytes > marking_initiating_used_threshold) {
  1196       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1197         assert(!during_initial_mark_pause(), "we should not see this here");
  1199         ergo_verbose3(ErgoConcCycles,
  1200                       "request concurrent cycle initiation",
  1201                       ergo_format_reason("occupancy higher than threshold")
  1202                       ergo_format_byte("occupancy")
  1203                       ergo_format_byte_perc("threshold"),
  1204                       cur_used_bytes,
  1205                       marking_initiating_used_threshold,
  1206                       (double) InitiatingHeapOccupancyPercent);
  1208         // Note: this might have already been set, if during the last
  1209         // pause we decided to start a cycle but at the beginning of
  1210         // this pause we decided to postpone it. That's OK.
  1211         set_initiate_conc_mark_if_possible();
  1212       } else {
  1213         ergo_verbose2(ErgoConcCycles,
  1214                   "do not request concurrent cycle initiation",
  1215                   ergo_format_reason("occupancy lower than previous occupancy")
  1216                   ergo_format_byte("occupancy")
  1217                   ergo_format_byte("previous occupancy"),
  1218                   cur_used_bytes,
  1219                   _prev_collection_pause_used_at_end_bytes);
  1224   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1226   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1227                           end_time_sec, false);
  1229   guarantee(_cur_collection_pause_used_regions_at_start >=
  1230             collection_set_size(),
  1231             "Negative RS size?");
  1233   // This assert is exempted when we're doing parallel collection pauses,
  1234   // because the fragmentation caused by the parallel GC allocation buffers
  1235   // can lead to more memory being used during collection than was used
  1236   // before. Best leave this out until the fragmentation problem is fixed.
  1237   // Pauses in which evacuation failed can also lead to negative
  1238   // collections, since no space is reclaimed from a region containing an
  1239   // object whose evacuation failed.
  1240   // Further, we're now always doing parallel collection.  But I'm still
  1241   // leaving this here as a placeholder for a more precise assertion later.
  1242   // (DLD, 10/05.)
  1243   assert((true || parallel) // Always using GC LABs now.
  1244          || _g1->evacuation_failed()
  1245          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1246          "Negative collection");
  1248   size_t freed_bytes =
  1249     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1250   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1252   double survival_fraction =
  1253     (double)surviving_bytes/
  1254     (double)_collection_set_bytes_used_before;
  1256   _n_pauses++;
  1258   // These values are used to update the summary information that is
  1259   // displayed when TraceGen0Time is enabled, and are output as part
  1260   // of the PrintGCDetails output, in the non-parallel case.
  1262   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1263   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1264   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1265   double update_rs_processed_buffers =
  1266     sum_of_values(_par_last_update_rs_processed_buffers);
  1267   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1268   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1269   double termination_time = avg_value(_par_last_termination_times_ms);
  1271   double known_time = ext_root_scan_time +
  1272                       mark_stack_scan_time +
  1273                       update_rs_time +
  1274                       scan_rs_time +
  1275                       obj_copy_time;
  1277   double other_time_ms = elapsed_ms;
  1279   // Subtract the SATB drain time. It's initialized to zero at the
  1280   // start of the pause and is updated during the pause if marking
  1281   // is in progress.
  1282   other_time_ms -= _cur_satb_drain_time_ms;
  1284   if (parallel) {
  1285     other_time_ms -= _cur_collection_par_time_ms;
  1286   } else {
  1287     other_time_ms -= known_time;
  1290   // Subtract the time taken to clean the card table from the
  1291   // current value of "other time"
  1292   other_time_ms -= _cur_clear_ct_time_ms;
  1294   // TraceGen0Time and TraceGen1Time summary info updating.
  1295   _all_pause_times_ms->add(elapsed_ms);
  1297   if (update_stats) {
  1298     _recent_rs_scan_times_ms->add(scan_rs_time);
  1299     _recent_pause_times_ms->add(elapsed_ms);
  1300     _recent_rs_sizes->add(rs_size);
  1302     _summary->record_total_time_ms(elapsed_ms);
  1303     _summary->record_other_time_ms(other_time_ms);
  1305     MainBodySummary* body_summary = _summary->main_body_summary();
  1306     assert(body_summary != NULL, "should not be null!");
  1308     // This will be non-zero iff marking is currently in progress (i.e.
  1309     // _g1->mark_in_progress() == true) and the currrent pause was not
  1310     // an initial mark pause. Since the body_summary items are NumberSeqs,
  1311     // however, they have to be consistent and updated in lock-step with
  1312     // each other. Therefore we unconditionally record the SATB drain
  1313     // time - even if it's zero.
  1314     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1316     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1317     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1318     body_summary->record_update_rs_time_ms(update_rs_time);
  1319     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1320     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1322     if (parallel) {
  1323       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1324       body_summary->record_termination_time_ms(termination_time);
  1326       double parallel_known_time = known_time + termination_time;
  1327       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1328       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1331     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1332     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1334     // We exempt parallel collection from this check because Alloc Buffer
  1335     // fragmentation can produce negative collections.  Same with evac
  1336     // failure.
  1337     // Further, we're now always doing parallel collection.  But I'm still
  1338     // leaving this here as a placeholder for a more precise assertion later.
  1339     // (DLD, 10/05.
  1340     assert((true || parallel)
  1341            || _g1->evacuation_failed()
  1342            || surviving_bytes <= _collection_set_bytes_used_before,
  1343            "Or else negative collection!");
  1345     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
  1346     _recent_CS_bytes_surviving->add(surviving_bytes);
  1348     // this is where we update the allocation rate of the application
  1349     double app_time_ms =
  1350       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1351     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1352       // This usually happens due to the timer not having the required
  1353       // granularity. Some Linuxes are the usual culprits.
  1354       // We'll just set it to something (arbitrarily) small.
  1355       app_time_ms = 1.0;
  1357     size_t regions_allocated =
  1358       (_region_num_young - _prev_region_num_young) +
  1359       (_region_num_tenured - _prev_region_num_tenured);
  1360     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1361     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1362     _prev_region_num_young   = _region_num_young;
  1363     _prev_region_num_tenured = _region_num_tenured;
  1365     double interval_ms =
  1366       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1367     update_recent_gc_times(end_time_sec, elapsed_ms);
  1368     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1369     if (recent_avg_pause_time_ratio() < 0.0 ||
  1370         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1371 #ifndef PRODUCT
  1372       // Dump info to allow post-facto debugging
  1373       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1374       gclog_or_tty->print_cr("-------------------------------------------");
  1375       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1376       _recent_gc_times_ms->dump();
  1377       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1378       _recent_prev_end_times_for_all_gcs_sec->dump();
  1379       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1380                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1381       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1382       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1383 #endif  // !PRODUCT
  1384       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1385       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1386       if (_recent_avg_pause_time_ratio < 0.0) {
  1387         _recent_avg_pause_time_ratio = 0.0;
  1388       } else {
  1389         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1390         _recent_avg_pause_time_ratio = 1.0;
  1395   for (int i = 0; i < _aux_num; ++i) {
  1396     if (_cur_aux_times_set[i]) {
  1397       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1402   if (G1PolicyVerbose > 1) {
  1403     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
  1406   if (G1PolicyVerbose > 1) {
  1407     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
  1408                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
  1409                            "      |RS|: " SIZE_FORMAT,
  1410                            elapsed_ms, recent_avg_time_for_pauses_ms(),
  1411                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
  1412                            rs_size);
  1414     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
  1415                            "       At end " SIZE_FORMAT "K\n"
  1416                            "       garbage      : " SIZE_FORMAT "K"
  1417                            "       of     " SIZE_FORMAT "K\n"
  1418                            "       survival     : %6.2f%%  (%6.2f%% avg)",
  1419                            _cur_collection_pause_used_at_start_bytes/K,
  1420                            _g1->used()/K, freed_bytes/K,
  1421                            _collection_set_bytes_used_before/K,
  1422                            survival_fraction*100.0,
  1423                            recent_avg_survival_fraction()*100.0);
  1424     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
  1425                            recent_avg_pause_time_ratio() * 100.0);
  1428   // PrintGCDetails output
  1429   if (PrintGCDetails) {
  1430     bool print_marking_info =
  1431       _g1->mark_in_progress() && !last_pause_included_initial_mark;
  1433     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1434                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1435                            elapsed_ms / 1000.0);
  1437     if (print_marking_info) {
  1438       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1439       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
  1442     if (parallel) {
  1443       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1444       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
  1445       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1446       if (print_marking_info) {
  1447         print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
  1449       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1450       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1451       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1452       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1453       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1454       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1455       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
  1457       for (int i = 0; i < _parallel_gc_threads; i++) {
  1458         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1460         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
  1461                                    _par_last_mark_stack_scan_times_ms[i] +
  1462                                    _par_last_update_rs_times_ms[i] +
  1463                                    _par_last_scan_rs_times_ms[i] +
  1464                                    _par_last_obj_copy_times_ms[i] +
  1465                                    _par_last_termination_times_ms[i];
  1467         _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
  1469       print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
  1470       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
  1471     } else {
  1472       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1473       if (print_marking_info) {
  1474         print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1476       print_stats(1, "Update RS", update_rs_time);
  1477       print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  1478       print_stats(1, "Scan RS", scan_rs_time);
  1479       print_stats(1, "Object Copying", obj_copy_time);
  1481     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1482 #ifndef PRODUCT
  1483     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1484     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1485     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1486     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1487     if (_num_cc_clears > 0) {
  1488       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1490 #endif
  1491     print_stats(1, "Other", other_time_ms);
  1492     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
  1493     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1494     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1496     for (int i = 0; i < _aux_num; ++i) {
  1497       if (_cur_aux_times_set[i]) {
  1498         char buffer[96];
  1499         sprintf(buffer, "Aux%d", i);
  1500         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1505   // Update the efficiency-since-mark vars.
  1506   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1507   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1508     // This usually happens due to the timer not having the required
  1509     // granularity. Some Linuxes are the usual culprits.
  1510     // We'll just set it to something (arbitrarily) small.
  1511     proc_ms = 1.0;
  1513   double cur_efficiency = (double) freed_bytes / proc_ms;
  1515   bool new_in_marking_window = _in_marking_window;
  1516   bool new_in_marking_window_im = false;
  1517   if (during_initial_mark_pause()) {
  1518     new_in_marking_window = true;
  1519     new_in_marking_window_im = true;
  1522   if (_last_full_young_gc) {
  1523     if (!last_pause_included_initial_mark) {
  1524       ergo_verbose2(ErgoPartiallyYoungGCs,
  1525                     "start partially-young GCs",
  1526                     ergo_format_byte_perc("known garbage"),
  1527                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1528       set_full_young_gcs(false);
  1529     } else {
  1530       ergo_verbose0(ErgoPartiallyYoungGCs,
  1531                     "do not start partially-young GCs",
  1532                     ergo_format_reason("concurrent cycle is about to start"));
  1534     _last_full_young_gc = false;
  1537   if ( !_last_young_gc_full ) {
  1538     if (_should_revert_to_full_young_gcs) {
  1539       ergo_verbose2(ErgoPartiallyYoungGCs,
  1540                     "end partially-young GCs",
  1541                     ergo_format_reason("partially-young GCs end requested")
  1542                     ergo_format_byte_perc("known garbage"),
  1543                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1544       set_full_young_gcs(true);
  1545     } else if (_known_garbage_ratio < 0.05) {
  1546       ergo_verbose3(ErgoPartiallyYoungGCs,
  1547                "end partially-young GCs",
  1548                ergo_format_reason("known garbage percent lower than threshold")
  1549                ergo_format_byte_perc("known garbage")
  1550                ergo_format_perc("threshold"),
  1551                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1552                0.05 * 100.0);
  1553       set_full_young_gcs(true);
  1554     } else if (adaptive_young_list_length() &&
  1555               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1556       ergo_verbose5(ErgoPartiallyYoungGCs,
  1557                     "end partially-young GCs",
  1558                     ergo_format_reason("current GC efficiency lower than "
  1559                                        "predicted fully-young GC efficiency")
  1560                     ergo_format_double("GC efficiency factor")
  1561                     ergo_format_double("current GC efficiency")
  1562                     ergo_format_double("predicted fully-young GC efficiency")
  1563                     ergo_format_byte_perc("known garbage"),
  1564                     get_gc_eff_factor(), cur_efficiency,
  1565                     predict_young_gc_eff(),
  1566                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1567       set_full_young_gcs(true);
  1570   _should_revert_to_full_young_gcs = false;
  1572   if (_last_young_gc_full && !_during_marking) {
  1573     _young_gc_eff_seq->add(cur_efficiency);
  1576   _short_lived_surv_rate_group->start_adding_regions();
  1577   // do that for any other surv rate groupsx
  1579   // <NEW PREDICTION>
  1581   if (update_stats) {
  1582     double pause_time_ms = elapsed_ms;
  1584     size_t diff = 0;
  1585     if (_max_pending_cards >= _pending_cards)
  1586       diff = _max_pending_cards - _pending_cards;
  1587     _pending_card_diff_seq->add((double) diff);
  1589     double cost_per_card_ms = 0.0;
  1590     if (_pending_cards > 0) {
  1591       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1592       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1595     size_t cards_scanned = _g1->cards_scanned();
  1597     double cost_per_entry_ms = 0.0;
  1598     if (cards_scanned > 10) {
  1599       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1600       if (_last_young_gc_full)
  1601         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1602       else
  1603         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1606     if (_max_rs_lengths > 0) {
  1607       double cards_per_entry_ratio =
  1608         (double) cards_scanned / (double) _max_rs_lengths;
  1609       if (_last_young_gc_full)
  1610         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1611       else
  1612         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1615     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1616     if (rs_length_diff >= 0)
  1617       _rs_length_diff_seq->add((double) rs_length_diff);
  1619     size_t copied_bytes = surviving_bytes;
  1620     double cost_per_byte_ms = 0.0;
  1621     if (copied_bytes > 0) {
  1622       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1623       if (_in_marking_window)
  1624         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1625       else
  1626         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1629     double all_other_time_ms = pause_time_ms -
  1630       (update_rs_time + scan_rs_time + obj_copy_time +
  1631        _mark_closure_time_ms + termination_time);
  1633     double young_other_time_ms = 0.0;
  1634     if (_recorded_young_regions > 0) {
  1635       young_other_time_ms =
  1636         _recorded_young_cset_choice_time_ms +
  1637         _recorded_young_free_cset_time_ms;
  1638       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1639                                              (double) _recorded_young_regions);
  1641     double non_young_other_time_ms = 0.0;
  1642     if (_recorded_non_young_regions > 0) {
  1643       non_young_other_time_ms =
  1644         _recorded_non_young_cset_choice_time_ms +
  1645         _recorded_non_young_free_cset_time_ms;
  1647       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1648                                          (double) _recorded_non_young_regions);
  1651     double constant_other_time_ms = all_other_time_ms -
  1652       (young_other_time_ms + non_young_other_time_ms);
  1653     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1655     double survival_ratio = 0.0;
  1656     if (_bytes_in_collection_set_before_gc > 0) {
  1657       survival_ratio = (double) _bytes_copied_during_gc /
  1658                                    (double) _bytes_in_collection_set_before_gc;
  1661     _pending_cards_seq->add((double) _pending_cards);
  1662     _scanned_cards_seq->add((double) cards_scanned);
  1663     _rs_lengths_seq->add((double) _max_rs_lengths);
  1665     double expensive_region_limit_ms =
  1666       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1667     if (expensive_region_limit_ms < 0.0) {
  1668       // this means that the other time was predicted to be longer than
  1669       // than the max pause time
  1670       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1672     _expensive_region_limit_ms = expensive_region_limit_ms;
  1674     if (PREDICTIONS_VERBOSE) {
  1675       gclog_or_tty->print_cr("");
  1676       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
  1677                     "REGIONS %d %d %d "
  1678                     "PENDING_CARDS %d %d "
  1679                     "CARDS_SCANNED %d %d "
  1680                     "RS_LENGTHS %d %d "
  1681                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
  1682                     "SURVIVAL_RATIO %1.6lf %1.6lf "
  1683                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  1684                     "OTHER_YOUNG %1.6lf %1.6lf "
  1685                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
  1686                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
  1687                     "ELAPSED %1.6lf %1.6lf ",
  1688                     _cur_collection_start_sec,
  1689                     (!_last_young_gc_full) ? 2 :
  1690                     (last_pause_included_initial_mark) ? 1 : 0,
  1691                     _recorded_region_num,
  1692                     _recorded_young_regions,
  1693                     _recorded_non_young_regions,
  1694                     _predicted_pending_cards, _pending_cards,
  1695                     _predicted_cards_scanned, cards_scanned,
  1696                     _predicted_rs_lengths, _max_rs_lengths,
  1697                     _predicted_rs_update_time_ms, update_rs_time,
  1698                     _predicted_rs_scan_time_ms, scan_rs_time,
  1699                     _predicted_survival_ratio, survival_ratio,
  1700                     _predicted_object_copy_time_ms, obj_copy_time,
  1701                     _predicted_constant_other_time_ms, constant_other_time_ms,
  1702                     _predicted_young_other_time_ms, young_other_time_ms,
  1703                     _predicted_non_young_other_time_ms,
  1704                     non_young_other_time_ms,
  1705                     _vtime_diff_ms, termination_time,
  1706                     _predicted_pause_time_ms, elapsed_ms);
  1709     if (G1PolicyVerbose > 0) {
  1710       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
  1711                     _predicted_pause_time_ms,
  1712                     (_within_target) ? "within" : "outside",
  1713                     elapsed_ms);
  1718   _in_marking_window = new_in_marking_window;
  1719   _in_marking_window_im = new_in_marking_window_im;
  1720   _free_regions_at_end_of_collection = _g1->free_regions();
  1721   update_young_list_target_length();
  1723   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1724   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1725   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1726   // </NEW PREDICTION>
  1728   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  1731 #define EXT_SIZE_FORMAT "%d%s"
  1732 #define EXT_SIZE_PARAMS(bytes)                                  \
  1733   byte_size_in_proper_unit((bytes)),                            \
  1734   proper_unit_for_byte_size((bytes))
  1736 void G1CollectorPolicy::print_heap_transition() {
  1737   if (PrintGCDetails) {
  1738     YoungList* young_list = _g1->young_list();
  1739     size_t eden_bytes = young_list->eden_used_bytes();
  1740     size_t survivor_bytes = young_list->survivor_used_bytes();
  1741     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1742     size_t used = _g1->used();
  1743     size_t capacity = _g1->capacity();
  1744     size_t eden_capacity =
  1745       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1747     gclog_or_tty->print_cr(
  1748       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1749       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1750       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1751       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1752       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1753       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1754       EXT_SIZE_PARAMS(eden_bytes),
  1755       EXT_SIZE_PARAMS(eden_capacity),
  1756       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1757       EXT_SIZE_PARAMS(survivor_bytes),
  1758       EXT_SIZE_PARAMS(used_before_gc),
  1759       EXT_SIZE_PARAMS(_capacity_before_gc),
  1760       EXT_SIZE_PARAMS(used),
  1761       EXT_SIZE_PARAMS(capacity));
  1763     _prev_eden_capacity = eden_capacity;
  1764   } else if (PrintGC) {
  1765     _g1->print_size_transition(gclog_or_tty,
  1766                                _cur_collection_pause_used_at_start_bytes,
  1767                                _g1->used(), _g1->capacity());
  1771 // <NEW PREDICTION>
  1773 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1774                                                      double update_rs_processed_buffers,
  1775                                                      double goal_ms) {
  1776   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1777   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1779   if (G1UseAdaptiveConcRefinement) {
  1780     const int k_gy = 3, k_gr = 6;
  1781     const double inc_k = 1.1, dec_k = 0.9;
  1783     int g = cg1r->green_zone();
  1784     if (update_rs_time > goal_ms) {
  1785       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1786     } else {
  1787       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1788         g = (int)MAX2(g * inc_k, g + 1.0);
  1791     // Change the refinement threads params
  1792     cg1r->set_green_zone(g);
  1793     cg1r->set_yellow_zone(g * k_gy);
  1794     cg1r->set_red_zone(g * k_gr);
  1795     cg1r->reinitialize_threads();
  1797     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1798     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1799                                     cg1r->yellow_zone());
  1800     // Change the barrier params
  1801     dcqs.set_process_completed_threshold(processing_threshold);
  1802     dcqs.set_max_completed_queue(cg1r->red_zone());
  1805   int curr_queue_size = dcqs.completed_buffers_num();
  1806   if (curr_queue_size >= cg1r->yellow_zone()) {
  1807     dcqs.set_completed_queue_padding(curr_queue_size);
  1808   } else {
  1809     dcqs.set_completed_queue_padding(0);
  1811   dcqs.notify_if_necessary();
  1814 double
  1815 G1CollectorPolicy::
  1816 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1817   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1819   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1820   size_t young_num = g1h->young_list()->length();
  1821   if (young_num == 0)
  1822     return 0.0;
  1824   young_num += adjustment;
  1825   size_t pending_cards = predict_pending_cards();
  1826   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1827                       predict_rs_length_diff();
  1828   size_t card_num;
  1829   if (full_young_gcs())
  1830     card_num = predict_young_card_num(rs_lengths);
  1831   else
  1832     card_num = predict_non_young_card_num(rs_lengths);
  1833   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1834   double accum_yg_surv_rate =
  1835     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1837   size_t bytes_to_copy =
  1838     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1840   return
  1841     predict_rs_update_time_ms(pending_cards) +
  1842     predict_rs_scan_time_ms(card_num) +
  1843     predict_object_copy_time_ms(bytes_to_copy) +
  1844     predict_young_other_time_ms(young_num) +
  1845     predict_constant_other_time_ms();
  1848 double
  1849 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1850   size_t rs_length = predict_rs_length_diff();
  1851   size_t card_num;
  1852   if (full_young_gcs())
  1853     card_num = predict_young_card_num(rs_length);
  1854   else
  1855     card_num = predict_non_young_card_num(rs_length);
  1856   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1859 double
  1860 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1861                                                 size_t scanned_cards) {
  1862   return
  1863     predict_rs_update_time_ms(pending_cards) +
  1864     predict_rs_scan_time_ms(scanned_cards) +
  1865     predict_constant_other_time_ms();
  1868 double
  1869 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1870                                                   bool young) {
  1871   size_t rs_length = hr->rem_set()->occupied();
  1872   size_t card_num;
  1873   if (full_young_gcs())
  1874     card_num = predict_young_card_num(rs_length);
  1875   else
  1876     card_num = predict_non_young_card_num(rs_length);
  1877   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1879   double region_elapsed_time_ms =
  1880     predict_rs_scan_time_ms(card_num) +
  1881     predict_object_copy_time_ms(bytes_to_copy);
  1883   if (young)
  1884     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1885   else
  1886     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1888   return region_elapsed_time_ms;
  1891 size_t
  1892 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1893   size_t bytes_to_copy;
  1894   if (hr->is_marked())
  1895     bytes_to_copy = hr->max_live_bytes();
  1896   else {
  1897     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1898                "invariant" );
  1899     int age = hr->age_in_surv_rate_group();
  1900     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1901     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1904   return bytes_to_copy;
  1907 void
  1908 G1CollectorPolicy::start_recording_regions() {
  1909   _recorded_rs_lengths            = 0;
  1910   _recorded_young_regions         = 0;
  1911   _recorded_non_young_regions     = 0;
  1913 #if PREDICTIONS_VERBOSE
  1914   _recorded_marked_bytes          = 0;
  1915   _recorded_young_bytes           = 0;
  1916   _predicted_bytes_to_copy        = 0;
  1917   _predicted_rs_lengths           = 0;
  1918   _predicted_cards_scanned        = 0;
  1919 #endif // PREDICTIONS_VERBOSE
  1922 void
  1923 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  1924 #if PREDICTIONS_VERBOSE
  1925   if (!young) {
  1926     _recorded_marked_bytes += hr->max_live_bytes();
  1928   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  1929 #endif // PREDICTIONS_VERBOSE
  1931   size_t rs_length = hr->rem_set()->occupied();
  1932   _recorded_rs_lengths += rs_length;
  1935 void
  1936 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  1937   assert(!hr->is_young(), "should not call this");
  1938   ++_recorded_non_young_regions;
  1939   record_cset_region_info(hr, false);
  1942 void
  1943 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  1944   _recorded_young_regions = n_regions;
  1947 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  1948 #if PREDICTIONS_VERBOSE
  1949   _recorded_young_bytes = bytes;
  1950 #endif // PREDICTIONS_VERBOSE
  1953 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1954   _recorded_rs_lengths = rs_lengths;
  1957 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  1958   _predicted_bytes_to_copy = bytes;
  1961 void
  1962 G1CollectorPolicy::end_recording_regions() {
  1963   // The _predicted_pause_time_ms field is referenced in code
  1964   // not under PREDICTIONS_VERBOSE. Let's initialize it.
  1965   _predicted_pause_time_ms = -1.0;
  1967 #if PREDICTIONS_VERBOSE
  1968   _predicted_pending_cards = predict_pending_cards();
  1969   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  1970   if (full_young_gcs())
  1971     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
  1972   else
  1973     _predicted_cards_scanned +=
  1974       predict_non_young_card_num(_predicted_rs_lengths);
  1975   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  1977   _predicted_rs_update_time_ms =
  1978     predict_rs_update_time_ms(_g1->pending_card_num());
  1979   _predicted_rs_scan_time_ms =
  1980     predict_rs_scan_time_ms(_predicted_cards_scanned);
  1981   _predicted_object_copy_time_ms =
  1982     predict_object_copy_time_ms(_predicted_bytes_to_copy);
  1983   _predicted_constant_other_time_ms =
  1984     predict_constant_other_time_ms();
  1985   _predicted_young_other_time_ms =
  1986     predict_young_other_time_ms(_recorded_young_regions);
  1987   _predicted_non_young_other_time_ms =
  1988     predict_non_young_other_time_ms(_recorded_non_young_regions);
  1990   _predicted_pause_time_ms =
  1991     _predicted_rs_update_time_ms +
  1992     _predicted_rs_scan_time_ms +
  1993     _predicted_object_copy_time_ms +
  1994     _predicted_constant_other_time_ms +
  1995     _predicted_young_other_time_ms +
  1996     _predicted_non_young_other_time_ms;
  1997 #endif // PREDICTIONS_VERBOSE
  2000 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  2001                                                            predicted_time_ms) {
  2002   // I don't think we need to do this when in young GC mode since
  2003   // marking will be initiated next time we hit the soft limit anyway...
  2004   if (predicted_time_ms > _expensive_region_limit_ms) {
  2005     ergo_verbose2(ErgoPartiallyYoungGCs,
  2006               "request partially-young GCs end",
  2007               ergo_format_reason("predicted region time higher than threshold")
  2008               ergo_format_ms("predicted region time")
  2009               ergo_format_ms("threshold"),
  2010               predicted_time_ms, _expensive_region_limit_ms);
  2011     // no point in doing another partial one
  2012     _should_revert_to_full_young_gcs = true;
  2016 // </NEW PREDICTION>
  2019 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  2020                                                double elapsed_ms) {
  2021   _recent_gc_times_ms->add(elapsed_ms);
  2022   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  2023   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  2026 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
  2027   if (_recent_pause_times_ms->num() == 0) {
  2028     return (double) MaxGCPauseMillis;
  2030   return _recent_pause_times_ms->avg();
  2033 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
  2034   if (_recent_rs_scan_times_ms->num() == 0) {
  2035     return (double)MaxGCPauseMillis/3.0;
  2037   return _recent_rs_scan_times_ms->avg();
  2040 int G1CollectorPolicy::number_of_recent_gcs() {
  2041   assert(_recent_rs_scan_times_ms->num() ==
  2042          _recent_pause_times_ms->num(), "Sequence out of sync");
  2043   assert(_recent_pause_times_ms->num() ==
  2044          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
  2045   assert(_recent_CS_bytes_used_before->num() ==
  2046          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
  2048   return _recent_pause_times_ms->num();
  2051 double G1CollectorPolicy::recent_avg_survival_fraction() {
  2052   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
  2053                                            _recent_CS_bytes_used_before);
  2056 double G1CollectorPolicy::last_survival_fraction() {
  2057   return last_survival_fraction_work(_recent_CS_bytes_surviving,
  2058                                      _recent_CS_bytes_used_before);
  2061 double
  2062 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
  2063                                                      TruncatedSeq* before) {
  2064   assert(surviving->num() == before->num(), "Sequence out of sync");
  2065   if (before->sum() > 0.0) {
  2066       double recent_survival_rate = surviving->sum() / before->sum();
  2067       // We exempt parallel collection from this check because Alloc Buffer
  2068       // fragmentation can produce negative collections.
  2069       // Further, we're now always doing parallel collection.  But I'm still
  2070       // leaving this here as a placeholder for a more precise assertion later.
  2071       // (DLD, 10/05.)
  2072       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2073              _g1->evacuation_failed() ||
  2074              recent_survival_rate <= 1.0, "Or bad frac");
  2075       return recent_survival_rate;
  2076   } else {
  2077     return 1.0; // Be conservative.
  2081 double
  2082 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
  2083                                                TruncatedSeq* before) {
  2084   assert(surviving->num() == before->num(), "Sequence out of sync");
  2085   if (surviving->num() > 0 && before->last() > 0.0) {
  2086     double last_survival_rate = surviving->last() / before->last();
  2087     // We exempt parallel collection from this check because Alloc Buffer
  2088     // fragmentation can produce negative collections.
  2089     // Further, we're now always doing parallel collection.  But I'm still
  2090     // leaving this here as a placeholder for a more precise assertion later.
  2091     // (DLD, 10/05.)
  2092     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2093            last_survival_rate <= 1.0, "Or bad frac");
  2094     return last_survival_rate;
  2095   } else {
  2096     return 1.0;
  2100 static const int survival_min_obs = 5;
  2101 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
  2102 static const double min_survival_rate = 0.1;
  2104 double
  2105 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
  2106                                                            double latest) {
  2107   double res = avg;
  2108   if (number_of_recent_gcs() < survival_min_obs) {
  2109     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
  2111   res = MAX2(res, latest);
  2112   res = MAX2(res, min_survival_rate);
  2113   // In the parallel case, LAB fragmentation can produce "negative
  2114   // collections"; so can evac failure.  Cap at 1.0
  2115   res = MIN2(res, 1.0);
  2116   return res;
  2119 size_t G1CollectorPolicy::expansion_amount() {
  2120   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  2121   double threshold = _gc_overhead_perc;
  2122   if (recent_gc_overhead > threshold) {
  2123     // We will double the existing space, or take
  2124     // G1ExpandByPercentOfAvailable % of the available expansion
  2125     // space, whichever is smaller, bounded below by a minimum
  2126     // expansion (unless that's all that's left.)
  2127     const size_t min_expand_bytes = 1*M;
  2128     size_t reserved_bytes = _g1->max_capacity();
  2129     size_t committed_bytes = _g1->capacity();
  2130     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  2131     size_t expand_bytes;
  2132     size_t expand_bytes_via_pct =
  2133       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  2134     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  2135     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  2136     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  2138     ergo_verbose5(ErgoHeapSizing,
  2139                   "attempt heap expansion",
  2140                   ergo_format_reason("recent GC overhead higher than "
  2141                                      "threshold after GC")
  2142                   ergo_format_perc("recent GC overhead")
  2143                   ergo_format_perc("threshold")
  2144                   ergo_format_byte("uncommitted")
  2145                   ergo_format_byte_perc("calculated expansion amount"),
  2146                   recent_gc_overhead, threshold,
  2147                   uncommitted_bytes,
  2148                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  2150     return expand_bytes;
  2151   } else {
  2152     return 0;
  2156 class CountCSClosure: public HeapRegionClosure {
  2157   G1CollectorPolicy* _g1_policy;
  2158 public:
  2159   CountCSClosure(G1CollectorPolicy* g1_policy) :
  2160     _g1_policy(g1_policy) {}
  2161   bool doHeapRegion(HeapRegion* r) {
  2162     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  2163     return false;
  2165 };
  2167 void G1CollectorPolicy::count_CS_bytes_used() {
  2168   CountCSClosure cs_closure(this);
  2169   _g1->collection_set_iterate(&cs_closure);
  2172 void G1CollectorPolicy::print_summary(int level,
  2173                                       const char* str,
  2174                                       NumberSeq* seq) const {
  2175   double sum = seq->sum();
  2176   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  2177                 str, sum / 1000.0, seq->avg());
  2180 void G1CollectorPolicy::print_summary_sd(int level,
  2181                                          const char* str,
  2182                                          NumberSeq* seq) const {
  2183   print_summary(level, str, seq);
  2184   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2185                 seq->num(), seq->sd(), seq->maximum());
  2188 void G1CollectorPolicy::check_other_times(int level,
  2189                                         NumberSeq* other_times_ms,
  2190                                         NumberSeq* calc_other_times_ms) const {
  2191   bool should_print = false;
  2192   LineBuffer buf(level + 2);
  2194   double max_sum = MAX2(fabs(other_times_ms->sum()),
  2195                         fabs(calc_other_times_ms->sum()));
  2196   double min_sum = MIN2(fabs(other_times_ms->sum()),
  2197                         fabs(calc_other_times_ms->sum()));
  2198   double sum_ratio = max_sum / min_sum;
  2199   if (sum_ratio > 1.1) {
  2200     should_print = true;
  2201     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  2204   double max_avg = MAX2(fabs(other_times_ms->avg()),
  2205                         fabs(calc_other_times_ms->avg()));
  2206   double min_avg = MIN2(fabs(other_times_ms->avg()),
  2207                         fabs(calc_other_times_ms->avg()));
  2208   double avg_ratio = max_avg / min_avg;
  2209   if (avg_ratio > 1.1) {
  2210     should_print = true;
  2211     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  2214   if (other_times_ms->sum() < -0.01) {
  2215     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  2218   if (other_times_ms->avg() < -0.01) {
  2219     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  2222   if (calc_other_times_ms->sum() < -0.01) {
  2223     should_print = true;
  2224     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  2227   if (calc_other_times_ms->avg() < -0.01) {
  2228     should_print = true;
  2229     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  2232   if (should_print)
  2233     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2236 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2237   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2238   MainBodySummary*    body_summary = summary->main_body_summary();
  2239   if (summary->get_total_seq()->num() > 0) {
  2240     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2241     if (body_summary != NULL) {
  2242       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  2243       if (parallel) {
  2244         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2245         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  2246         print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
  2247         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2248         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2249         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2250         print_summary(2, "Termination", body_summary->get_termination_seq());
  2251         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
  2253           NumberSeq* other_parts[] = {
  2254             body_summary->get_ext_root_scan_seq(),
  2255             body_summary->get_mark_stack_scan_seq(),
  2256             body_summary->get_update_rs_seq(),
  2257             body_summary->get_scan_rs_seq(),
  2258             body_summary->get_obj_copy_seq(),
  2259             body_summary->get_termination_seq()
  2260           };
  2261           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2262                                         6, other_parts);
  2263           check_other_times(2, body_summary->get_parallel_other_seq(),
  2264                             &calc_other_times_ms);
  2266       } else {
  2267         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  2268         print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
  2269         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2270         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2271         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2274     print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2275     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2276     print_summary(1, "Other", summary->get_other_seq());
  2278       if (body_summary != NULL) {
  2279         NumberSeq calc_other_times_ms;
  2280         if (parallel) {
  2281           // parallel
  2282           NumberSeq* other_parts[] = {
  2283             body_summary->get_satb_drain_seq(),
  2284             body_summary->get_parallel_seq(),
  2285             body_summary->get_clear_ct_seq()
  2286           };
  2287           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2288                                                 3, other_parts);
  2289         } else {
  2290           // serial
  2291           NumberSeq* other_parts[] = {
  2292             body_summary->get_satb_drain_seq(),
  2293             body_summary->get_update_rs_seq(),
  2294             body_summary->get_ext_root_scan_seq(),
  2295             body_summary->get_mark_stack_scan_seq(),
  2296             body_summary->get_scan_rs_seq(),
  2297             body_summary->get_obj_copy_seq()
  2298           };
  2299           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2300                                                 6, other_parts);
  2302         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2305   } else {
  2306     LineBuffer(1).append_and_print_cr("none");
  2308   LineBuffer(0).append_and_print_cr("");
  2311 void G1CollectorPolicy::print_tracing_info() const {
  2312   if (TraceGen0Time) {
  2313     gclog_or_tty->print_cr("ALL PAUSES");
  2314     print_summary_sd(0, "Total", _all_pause_times_ms);
  2315     gclog_or_tty->print_cr("");
  2316     gclog_or_tty->print_cr("");
  2317     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
  2318     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
  2319     gclog_or_tty->print_cr("");
  2321     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2322     print_summary(_summary);
  2324     gclog_or_tty->print_cr("MISC");
  2325     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2326     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2327     for (int i = 0; i < _aux_num; ++i) {
  2328       if (_all_aux_times_ms[i].num() > 0) {
  2329         char buffer[96];
  2330         sprintf(buffer, "Aux%d", i);
  2331         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2335     size_t all_region_num = _region_num_young + _region_num_tenured;
  2336     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
  2337                "Tenured %8d (%6.2lf%%)",
  2338                all_region_num,
  2339                _region_num_young,
  2340                (double) _region_num_young / (double) all_region_num * 100.0,
  2341                _region_num_tenured,
  2342                (double) _region_num_tenured / (double) all_region_num * 100.0);
  2344   if (TraceGen1Time) {
  2345     if (_all_full_gc_times_ms->num() > 0) {
  2346       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2347                  _all_full_gc_times_ms->num(),
  2348                  _all_full_gc_times_ms->sum() / 1000.0);
  2349       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2350       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2351                     _all_full_gc_times_ms->sd(),
  2352                     _all_full_gc_times_ms->maximum());
  2357 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2358 #ifndef PRODUCT
  2359   _short_lived_surv_rate_group->print_surv_rate_summary();
  2360   // add this call for any other surv rate groups
  2361 #endif // PRODUCT
  2364 void G1CollectorPolicy::update_region_num(bool young) {
  2365   if (young) {
  2366     ++_region_num_young;
  2367   } else {
  2368     ++_region_num_tenured;
  2372 #ifndef PRODUCT
  2373 // for debugging, bit of a hack...
  2374 static char*
  2375 region_num_to_mbs(int length) {
  2376   static char buffer[64];
  2377   double bytes = (double) (length * HeapRegion::GrainBytes);
  2378   double mbs = bytes / (double) (1024 * 1024);
  2379   sprintf(buffer, "%7.2lfMB", mbs);
  2380   return buffer;
  2382 #endif // PRODUCT
  2384 size_t G1CollectorPolicy::max_regions(int purpose) {
  2385   switch (purpose) {
  2386     case GCAllocForSurvived:
  2387       return _max_survivor_regions;
  2388     case GCAllocForTenured:
  2389       return REGIONS_UNLIMITED;
  2390     default:
  2391       ShouldNotReachHere();
  2392       return REGIONS_UNLIMITED;
  2393   };
  2396 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2397   size_t expansion_region_num = 0;
  2398   if (GCLockerEdenExpansionPercent > 0) {
  2399     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2400     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2401     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2402     // less than 1.0) we'll get 1.
  2403     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2404   } else {
  2405     assert(expansion_region_num == 0, "sanity");
  2407   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2408   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2411 // Calculates survivor space parameters.
  2412 void G1CollectorPolicy::update_survivors_policy() {
  2413   double max_survivor_regions_d =
  2414                  (double) _young_list_target_length / (double) SurvivorRatio;
  2415   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2416   // smaller than 1.0) we'll get 1.
  2417   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2419   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2420         HeapRegion::GrainWords * _max_survivor_regions);
  2423 #ifndef PRODUCT
  2424 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2425   CollectionSetChooser* _chooser;
  2426 public:
  2427   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2428     _chooser(chooser) {}
  2430   bool doHeapRegion(HeapRegion* r) {
  2431     if (!r->continuesHumongous()) {
  2432       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2434     return false;
  2436 };
  2438 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  2439   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2440   _g1->heap_region_iterate(&cl);
  2441   return true;
  2443 #endif
  2445 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2446                                                      GCCause::Cause gc_cause) {
  2447   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2448   if (!during_cycle) {
  2449     ergo_verbose1(ErgoConcCycles,
  2450                   "request concurrent cycle initiation",
  2451                   ergo_format_reason("requested by GC cause")
  2452                   ergo_format_str("GC cause"),
  2453                   GCCause::to_string(gc_cause));
  2454     set_initiate_conc_mark_if_possible();
  2455     return true;
  2456   } else {
  2457     ergo_verbose1(ErgoConcCycles,
  2458                   "do not request concurrent cycle initiation",
  2459                   ergo_format_reason("concurrent cycle already in progress")
  2460                   ergo_format_str("GC cause"),
  2461                   GCCause::to_string(gc_cause));
  2462     return false;
  2466 void
  2467 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2468   // We are about to decide on whether this pause will be an
  2469   // initial-mark pause.
  2471   // First, during_initial_mark_pause() should not be already set. We
  2472   // will set it here if we have to. However, it should be cleared by
  2473   // the end of the pause (it's only set for the duration of an
  2474   // initial-mark pause).
  2475   assert(!during_initial_mark_pause(), "pre-condition");
  2477   if (initiate_conc_mark_if_possible()) {
  2478     // We had noticed on a previous pause that the heap occupancy has
  2479     // gone over the initiating threshold and we should start a
  2480     // concurrent marking cycle. So we might initiate one.
  2482     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2483     if (!during_cycle) {
  2484       // The concurrent marking thread is not "during a cycle", i.e.,
  2485       // it has completed the last one. So we can go ahead and
  2486       // initiate a new cycle.
  2488       set_during_initial_mark_pause();
  2489       // We do not allow non-full young GCs during marking.
  2490       if (!full_young_gcs()) {
  2491         set_full_young_gcs(true);
  2492         ergo_verbose0(ErgoPartiallyYoungGCs,
  2493                       "end partially-young GCs",
  2494                       ergo_format_reason("concurrent cycle is about to start"));
  2497       // And we can now clear initiate_conc_mark_if_possible() as
  2498       // we've already acted on it.
  2499       clear_initiate_conc_mark_if_possible();
  2501       ergo_verbose0(ErgoConcCycles,
  2502                   "initiate concurrent cycle",
  2503                   ergo_format_reason("concurrent cycle initiation requested"));
  2504     } else {
  2505       // The concurrent marking thread is still finishing up the
  2506       // previous cycle. If we start one right now the two cycles
  2507       // overlap. In particular, the concurrent marking thread might
  2508       // be in the process of clearing the next marking bitmap (which
  2509       // we will use for the next cycle if we start one). Starting a
  2510       // cycle now will be bad given that parts of the marking
  2511       // information might get cleared by the marking thread. And we
  2512       // cannot wait for the marking thread to finish the cycle as it
  2513       // periodically yields while clearing the next marking bitmap
  2514       // and, if it's in a yield point, it's waiting for us to
  2515       // finish. So, at this point we will not start a cycle and we'll
  2516       // let the concurrent marking thread complete the last one.
  2517       ergo_verbose0(ErgoConcCycles,
  2518                     "do not initiate concurrent cycle",
  2519                     ergo_format_reason("concurrent cycle already in progress"));
  2524 class KnownGarbageClosure: public HeapRegionClosure {
  2525   CollectionSetChooser* _hrSorted;
  2527 public:
  2528   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2529     _hrSorted(hrSorted)
  2530   {}
  2532   bool doHeapRegion(HeapRegion* r) {
  2533     // We only include humongous regions in collection
  2534     // sets when concurrent mark shows that their contained object is
  2535     // unreachable.
  2537     // Do we have any marking information for this region?
  2538     if (r->is_marked()) {
  2539       // We don't include humongous regions in collection
  2540       // sets because we collect them immediately at the end of a marking
  2541       // cycle.  We also don't include young regions because we *must*
  2542       // include them in the next collection pause.
  2543       if (!r->isHumongous() && !r->is_young()) {
  2544         _hrSorted->addMarkedHeapRegion(r);
  2547     return false;
  2549 };
  2551 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2552   CollectionSetChooser* _hrSorted;
  2553   jint _marked_regions_added;
  2554   jint _chunk_size;
  2555   jint _cur_chunk_idx;
  2556   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2557   int _worker;
  2558   int _invokes;
  2560   void get_new_chunk() {
  2561     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2562     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2564   void add_region(HeapRegion* r) {
  2565     if (_cur_chunk_idx == _cur_chunk_end) {
  2566       get_new_chunk();
  2568     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2569     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2570     _marked_regions_added++;
  2571     _cur_chunk_idx++;
  2574 public:
  2575   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2576                            jint chunk_size,
  2577                            int worker) :
  2578     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2579     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2580     _invokes(0)
  2581   {}
  2583   bool doHeapRegion(HeapRegion* r) {
  2584     // We only include humongous regions in collection
  2585     // sets when concurrent mark shows that their contained object is
  2586     // unreachable.
  2587     _invokes++;
  2589     // Do we have any marking information for this region?
  2590     if (r->is_marked()) {
  2591       // We don't include humongous regions in collection
  2592       // sets because we collect them immediately at the end of a marking
  2593       // cycle.
  2594       // We also do not include young regions in collection sets
  2595       if (!r->isHumongous() && !r->is_young()) {
  2596         add_region(r);
  2599     return false;
  2601   jint marked_regions_added() { return _marked_regions_added; }
  2602   int invokes() { return _invokes; }
  2603 };
  2605 class ParKnownGarbageTask: public AbstractGangTask {
  2606   CollectionSetChooser* _hrSorted;
  2607   jint _chunk_size;
  2608   G1CollectedHeap* _g1;
  2609 public:
  2610   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2611     AbstractGangTask("ParKnownGarbageTask"),
  2612     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2613     _g1(G1CollectedHeap::heap())
  2614   {}
  2616   void work(int i) {
  2617     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2618     // Back to zero for the claim value.
  2619     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2620                                          HeapRegion::InitialClaimValue);
  2621     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2622     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2623     if (G1PrintParCleanupStats) {
  2624       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2625                  i, parKnownGarbageCl.invokes(), regions_added);
  2628 };
  2630 void
  2631 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
  2632   double start_sec;
  2633   if (G1PrintParCleanupStats) {
  2634     start_sec = os::elapsedTime();
  2637   _collectionSetChooser->clearMarkedHeapRegions();
  2638   double clear_marked_end_sec;
  2639   if (G1PrintParCleanupStats) {
  2640     clear_marked_end_sec = os::elapsedTime();
  2641     gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  2642                            (clear_marked_end_sec - start_sec) * 1000.0);
  2645   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2646     const size_t OverpartitionFactor = 4;
  2647     const size_t MinWorkUnit = 8;
  2648     const size_t WorkUnit =
  2649       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2650            MinWorkUnit);
  2651     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2652                                                              WorkUnit);
  2653     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2654                                             (int) WorkUnit);
  2655     _g1->workers()->run_task(&parKnownGarbageTask);
  2657     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2658            "sanity check");
  2659   } else {
  2660     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2661     _g1->heap_region_iterate(&knownGarbagecl);
  2663   double known_garbage_end_sec;
  2664   if (G1PrintParCleanupStats) {
  2665     known_garbage_end_sec = os::elapsedTime();
  2666     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2667                       (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  2670   _collectionSetChooser->sortMarkedHeapRegions();
  2671   double end_sec = os::elapsedTime();
  2672   if (G1PrintParCleanupStats) {
  2673     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2674                            (end_sec - known_garbage_end_sec) * 1000.0);
  2677   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  2678   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  2679   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  2680   _prev_collection_pause_end_ms += elapsed_time_ms;
  2681   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  2684 // Add the heap region at the head of the non-incremental collection set
  2685 void G1CollectorPolicy::
  2686 add_to_collection_set(HeapRegion* hr) {
  2687   assert(_inc_cset_build_state == Active, "Precondition");
  2688   assert(!hr->is_young(), "non-incremental add of young region");
  2690   if (_g1->mark_in_progress())
  2691     _g1->concurrent_mark()->registerCSetRegion(hr);
  2693   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2694   hr->set_in_collection_set(true);
  2695   hr->set_next_in_collection_set(_collection_set);
  2696   _collection_set = hr;
  2697   _collection_set_size++;
  2698   _collection_set_bytes_used_before += hr->used();
  2699   _g1->register_region_with_in_cset_fast_test(hr);
  2702 // Initialize the per-collection-set information
  2703 void G1CollectorPolicy::start_incremental_cset_building() {
  2704   assert(_inc_cset_build_state == Inactive, "Precondition");
  2706   _inc_cset_head = NULL;
  2707   _inc_cset_tail = NULL;
  2708   _inc_cset_size = 0;
  2709   _inc_cset_bytes_used_before = 0;
  2711   _inc_cset_young_index = 0;
  2713   _inc_cset_max_finger = 0;
  2714   _inc_cset_recorded_young_bytes = 0;
  2715   _inc_cset_recorded_rs_lengths = 0;
  2716   _inc_cset_predicted_elapsed_time_ms = 0;
  2717   _inc_cset_predicted_bytes_to_copy = 0;
  2718   _inc_cset_build_state = Active;
  2721 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2722   // This routine is used when:
  2723   // * adding survivor regions to the incremental cset at the end of an
  2724   //   evacuation pause,
  2725   // * adding the current allocation region to the incremental cset
  2726   //   when it is retired, and
  2727   // * updating existing policy information for a region in the
  2728   //   incremental cset via young list RSet sampling.
  2729   // Therefore this routine may be called at a safepoint by the
  2730   // VM thread, or in-between safepoints by mutator threads (when
  2731   // retiring the current allocation region) or a concurrent
  2732   // refine thread (RSet sampling).
  2734   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2735   size_t used_bytes = hr->used();
  2737   _inc_cset_recorded_rs_lengths += rs_length;
  2738   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2740   _inc_cset_bytes_used_before += used_bytes;
  2742   // Cache the values we have added to the aggregated informtion
  2743   // in the heap region in case we have to remove this region from
  2744   // the incremental collection set, or it is updated by the
  2745   // rset sampling code
  2746   hr->set_recorded_rs_length(rs_length);
  2747   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2749 #if PREDICTIONS_VERBOSE
  2750   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  2751   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  2753   // Record the number of bytes used in this region
  2754   _inc_cset_recorded_young_bytes += used_bytes;
  2756   // Cache the values we have added to the aggregated informtion
  2757   // in the heap region in case we have to remove this region from
  2758   // the incremental collection set, or it is updated by the
  2759   // rset sampling code
  2760   hr->set_predicted_bytes_to_copy(bytes_to_copy);
  2761 #endif // PREDICTIONS_VERBOSE
  2764 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2765   // This routine is currently only called as part of the updating of
  2766   // existing policy information for regions in the incremental cset that
  2767   // is performed by the concurrent refine thread(s) as part of young list
  2768   // RSet sampling. Therefore we should not be at a safepoint.
  2770   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2771   assert(hr->is_young(), "it should be");
  2773   size_t used_bytes = hr->used();
  2774   size_t old_rs_length = hr->recorded_rs_length();
  2775   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2777   // Subtract the old recorded/predicted policy information for
  2778   // the given heap region from the collection set info.
  2779   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2780   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2782   _inc_cset_bytes_used_before -= used_bytes;
  2784   // Clear the values cached in the heap region
  2785   hr->set_recorded_rs_length(0);
  2786   hr->set_predicted_elapsed_time_ms(0);
  2788 #if PREDICTIONS_VERBOSE
  2789   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  2790   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  2792   // Subtract the number of bytes used in this region
  2793   _inc_cset_recorded_young_bytes -= used_bytes;
  2795   // Clear the values cached in the heap region
  2796   hr->set_predicted_bytes_to_copy(0);
  2797 #endif // PREDICTIONS_VERBOSE
  2800 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2801   // Update the collection set information that is dependent on the new RS length
  2802   assert(hr->is_young(), "Precondition");
  2804   remove_from_incremental_cset_info(hr);
  2805   add_to_incremental_cset_info(hr, new_rs_length);
  2808 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2809   assert( hr->is_young(), "invariant");
  2810   assert( hr->young_index_in_cset() == -1, "invariant" );
  2811   assert(_inc_cset_build_state == Active, "Precondition");
  2813   // We need to clear and set the cached recorded/cached collection set
  2814   // information in the heap region here (before the region gets added
  2815   // to the collection set). An individual heap region's cached values
  2816   // are calculated, aggregated with the policy collection set info,
  2817   // and cached in the heap region here (initially) and (subsequently)
  2818   // by the Young List sampling code.
  2820   size_t rs_length = hr->rem_set()->occupied();
  2821   add_to_incremental_cset_info(hr, rs_length);
  2823   HeapWord* hr_end = hr->end();
  2824   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2826   assert(!hr->in_collection_set(), "invariant");
  2827   hr->set_in_collection_set(true);
  2828   assert( hr->next_in_collection_set() == NULL, "invariant");
  2830   _inc_cset_size++;
  2831   _g1->register_region_with_in_cset_fast_test(hr);
  2833   hr->set_young_index_in_cset((int) _inc_cset_young_index);
  2834   ++_inc_cset_young_index;
  2837 // Add the region at the RHS of the incremental cset
  2838 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2839   // We should only ever be appending survivors at the end of a pause
  2840   assert( hr->is_survivor(), "Logic");
  2842   // Do the 'common' stuff
  2843   add_region_to_incremental_cset_common(hr);
  2845   // Now add the region at the right hand side
  2846   if (_inc_cset_tail == NULL) {
  2847     assert(_inc_cset_head == NULL, "invariant");
  2848     _inc_cset_head = hr;
  2849   } else {
  2850     _inc_cset_tail->set_next_in_collection_set(hr);
  2852   _inc_cset_tail = hr;
  2855 // Add the region to the LHS of the incremental cset
  2856 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2857   // Survivors should be added to the RHS at the end of a pause
  2858   assert(!hr->is_survivor(), "Logic");
  2860   // Do the 'common' stuff
  2861   add_region_to_incremental_cset_common(hr);
  2863   // Add the region at the left hand side
  2864   hr->set_next_in_collection_set(_inc_cset_head);
  2865   if (_inc_cset_head == NULL) {
  2866     assert(_inc_cset_tail == NULL, "Invariant");
  2867     _inc_cset_tail = hr;
  2869   _inc_cset_head = hr;
  2872 #ifndef PRODUCT
  2873 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2874   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2876   st->print_cr("\nCollection_set:");
  2877   HeapRegion* csr = list_head;
  2878   while (csr != NULL) {
  2879     HeapRegion* next = csr->next_in_collection_set();
  2880     assert(csr->in_collection_set(), "bad CS");
  2881     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2882                  "age: %4d, y: %d, surv: %d",
  2883                         csr->bottom(), csr->end(),
  2884                         csr->top(),
  2885                         csr->prev_top_at_mark_start(),
  2886                         csr->next_top_at_mark_start(),
  2887                         csr->top_at_conc_mark_count(),
  2888                         csr->age_in_surv_rate_group_cond(),
  2889                         csr->is_young(),
  2890                         csr->is_survivor());
  2891     csr = next;
  2894 #endif // !PRODUCT
  2896 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
  2897   // Set this here - in case we're not doing young collections.
  2898   double non_young_start_time_sec = os::elapsedTime();
  2900   YoungList* young_list = _g1->young_list();
  2902   start_recording_regions();
  2904   guarantee(target_pause_time_ms > 0.0,
  2905             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2906                     target_pause_time_ms));
  2907   guarantee(_collection_set == NULL, "Precondition");
  2909   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2910   double predicted_pause_time_ms = base_time_ms;
  2912   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2914   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2915                 "start choosing CSet",
  2916                 ergo_format_ms("predicted base time")
  2917                 ergo_format_ms("remaining time")
  2918                 ergo_format_ms("target pause time"),
  2919                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2921   // the 10% and 50% values are arbitrary...
  2922   double threshold = 0.10 * target_pause_time_ms;
  2923   if (time_remaining_ms < threshold) {
  2924     double prev_time_remaining_ms = time_remaining_ms;
  2925     time_remaining_ms = 0.50 * target_pause_time_ms;
  2926     _within_target = false;
  2927     ergo_verbose3(ErgoCSetConstruction,
  2928                   "adjust remaining time",
  2929                   ergo_format_reason("remaining time lower than threshold")
  2930                   ergo_format_ms("remaining time")
  2931                   ergo_format_ms("threshold")
  2932                   ergo_format_ms("adjusted remaining time"),
  2933                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2934   } else {
  2935     _within_target = true;
  2938   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2940   HeapRegion* hr;
  2941   double young_start_time_sec = os::elapsedTime();
  2943   _collection_set_bytes_used_before = 0;
  2944   _collection_set_size = 0;
  2945   _young_cset_length  = 0;
  2946   _last_young_gc_full = full_young_gcs() ? true : false;
  2948   if (_last_young_gc_full) {
  2949     ++_full_young_pause_num;
  2950   } else {
  2951     ++_partial_young_pause_num;
  2954   // The young list is laid with the survivor regions from the previous
  2955   // pause are appended to the RHS of the young list, i.e.
  2956   //   [Newly Young Regions ++ Survivors from last pause].
  2958   size_t survivor_region_num = young_list->survivor_length();
  2959   size_t eden_region_num = young_list->length() - survivor_region_num;
  2960   size_t old_region_num = 0;
  2961   hr = young_list->first_survivor_region();
  2962   while (hr != NULL) {
  2963     assert(hr->is_survivor(), "badly formed young list");
  2964     hr->set_young();
  2965     hr = hr->get_next_young_region();
  2968   // Clear the fields that point to the survivor list - they are all young now.
  2969   young_list->clear_survivors();
  2971   if (_g1->mark_in_progress())
  2972     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2974   _young_cset_length = _inc_cset_young_index;
  2975   _collection_set = _inc_cset_head;
  2976   _collection_set_size = _inc_cset_size;
  2977   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2978   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2979   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2981   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2982                 "add young regions to CSet",
  2983                 ergo_format_region("eden")
  2984                 ergo_format_region("survivors")
  2985                 ergo_format_ms("predicted young region time"),
  2986                 eden_region_num, survivor_region_num,
  2987                 _inc_cset_predicted_elapsed_time_ms);
  2989   // The number of recorded young regions is the incremental
  2990   // collection set's current size
  2991   set_recorded_young_regions(_inc_cset_size);
  2992   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2993   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  2994 #if PREDICTIONS_VERBOSE
  2995   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  2996 #endif // PREDICTIONS_VERBOSE
  2998   assert(_inc_cset_size == young_list->length(), "Invariant");
  3000   double young_end_time_sec = os::elapsedTime();
  3001   _recorded_young_cset_choice_time_ms =
  3002     (young_end_time_sec - young_start_time_sec) * 1000.0;
  3004   // We are doing young collections so reset this.
  3005   non_young_start_time_sec = young_end_time_sec;
  3007   if (!full_young_gcs()) {
  3008     bool should_continue = true;
  3009     NumberSeq seq;
  3010     double avg_prediction = 100000000000000000.0; // something very large
  3012     size_t prev_collection_set_size = _collection_set_size;
  3013     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  3014     do {
  3015       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  3016                                                       avg_prediction);
  3017       if (hr != NULL) {
  3018         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  3019         time_remaining_ms -= predicted_time_ms;
  3020         predicted_pause_time_ms += predicted_time_ms;
  3021         add_to_collection_set(hr);
  3022         record_non_young_cset_region(hr);
  3023         seq.add(predicted_time_ms);
  3024         avg_prediction = seq.avg() + seq.sd();
  3027       should_continue = true;
  3028       if (hr == NULL) {
  3029         // No need for an ergo verbose message here,
  3030         // getNextMarkRegion() does this when it returns NULL.
  3031         should_continue = false;
  3032       } else {
  3033         if (adaptive_young_list_length()) {
  3034           if (time_remaining_ms < 0.0) {
  3035             ergo_verbose1(ErgoCSetConstruction,
  3036                           "stop adding old regions to CSet",
  3037                           ergo_format_reason("remaining time is lower than 0")
  3038                           ergo_format_ms("remaining time"),
  3039                           time_remaining_ms);
  3040             should_continue = false;
  3042         } else {
  3043           if (_collection_set_size >= _young_list_fixed_length) {
  3044             ergo_verbose2(ErgoCSetConstruction,
  3045                           "stop adding old regions to CSet",
  3046                           ergo_format_reason("CSet length reached target")
  3047                           ergo_format_region("CSet")
  3048                           ergo_format_region("young target"),
  3049                           _collection_set_size, _young_list_fixed_length);
  3050             should_continue = false;
  3054     } while (should_continue);
  3056     if (!adaptive_young_list_length() &&
  3057         _collection_set_size < _young_list_fixed_length) {
  3058       ergo_verbose2(ErgoCSetConstruction,
  3059                     "request partially-young GCs end",
  3060                     ergo_format_reason("CSet length lower than target")
  3061                     ergo_format_region("CSet")
  3062                     ergo_format_region("young target"),
  3063                     _collection_set_size, _young_list_fixed_length);
  3064       _should_revert_to_full_young_gcs  = true;
  3067     old_region_num = _collection_set_size - prev_collection_set_size;
  3069     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  3070                   "add old regions to CSet",
  3071                   ergo_format_region("old")
  3072                   ergo_format_ms("predicted old region time"),
  3073                   old_region_num,
  3074                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  3077   stop_incremental_cset_building();
  3079   count_CS_bytes_used();
  3081   end_recording_regions();
  3083   ergo_verbose5(ErgoCSetConstruction,
  3084                 "finish choosing CSet",
  3085                 ergo_format_region("eden")
  3086                 ergo_format_region("survivors")
  3087                 ergo_format_region("old")
  3088                 ergo_format_ms("predicted pause time")
  3089                 ergo_format_ms("target pause time"),
  3090                 eden_region_num, survivor_region_num, old_region_num,
  3091                 predicted_pause_time_ms, target_pause_time_ms);
  3093   double non_young_end_time_sec = os::elapsedTime();
  3094   _recorded_non_young_cset_choice_time_ms =
  3095     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;

mercurial