src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3172
d912b598c6c3
child 3178
273b46400613
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 #define PREDICTIONS_VERBOSE 0
    41 // <NEW PREDICTION>
    43 // Different defaults for different number of GC threads
    44 // They were chosen by running GCOld and SPECjbb on debris with different
    45 //   numbers of GC threads and choosing them based on the results
    47 // all the same
    48 static double rs_length_diff_defaults[] = {
    49   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    50 };
    52 static double cost_per_card_ms_defaults[] = {
    53   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    54 };
    56 // all the same
    57 static double fully_young_cards_per_entry_ratio_defaults[] = {
    58   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    59 };
    61 static double cost_per_entry_ms_defaults[] = {
    62   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    63 };
    65 static double cost_per_byte_ms_defaults[] = {
    66   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    67 };
    69 // these should be pretty consistent
    70 static double constant_other_time_ms_defaults[] = {
    71   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    72 };
    75 static double young_other_cost_per_region_ms_defaults[] = {
    76   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    77 };
    79 static double non_young_other_cost_per_region_ms_defaults[] = {
    80   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    81 };
    83 // </NEW PREDICTION>
    85 // Help class for avoiding interleaved logging
    86 class LineBuffer: public StackObj {
    88 private:
    89   static const int BUFFER_LEN = 1024;
    90   static const int INDENT_CHARS = 3;
    91   char _buffer[BUFFER_LEN];
    92   int _indent_level;
    93   int _cur;
    95   void vappend(const char* format, va_list ap) {
    96     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    97     if (res != -1) {
    98       _cur += res;
    99     } else {
   100       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
   101       _buffer[BUFFER_LEN -1] = 0;
   102       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
   103     }
   104   }
   106 public:
   107   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   108     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   109       _buffer[_cur] = ' ';
   110     }
   111   }
   113 #ifndef PRODUCT
   114   ~LineBuffer() {
   115     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   116   }
   117 #endif
   119   void append(const char* format, ...) {
   120     va_list ap;
   121     va_start(ap, format);
   122     vappend(format, ap);
   123     va_end(ap);
   124   }
   126   void append_and_print_cr(const char* format, ...) {
   127     va_list ap;
   128     va_start(ap, format);
   129     vappend(format, ap);
   130     va_end(ap);
   131     gclog_or_tty->print_cr("%s", _buffer);
   132     _cur = _indent_level * INDENT_CHARS;
   133   }
   134 };
   136 G1CollectorPolicy::G1CollectorPolicy() :
   137   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   138                         ? ParallelGCThreads : 1),
   140   _n_pauses(0),
   141   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   142   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   143   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   144   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   145   _all_pause_times_ms(new NumberSeq()),
   146   _stop_world_start(0.0),
   147   _all_stop_world_times_ms(new NumberSeq()),
   148   _all_yield_times_ms(new NumberSeq()),
   149   _using_new_ratio_calculations(false),
   151   _all_mod_union_times_ms(new NumberSeq()),
   153   _summary(new Summary()),
   155   _cur_clear_ct_time_ms(0.0),
   157   _cur_ref_proc_time_ms(0.0),
   158   _cur_ref_enq_time_ms(0.0),
   160 #ifndef PRODUCT
   161   _min_clear_cc_time_ms(-1.0),
   162   _max_clear_cc_time_ms(-1.0),
   163   _cur_clear_cc_time_ms(0.0),
   164   _cum_clear_cc_time_ms(0.0),
   165   _num_cc_clears(0L),
   166 #endif
   168   _region_num_young(0),
   169   _region_num_tenured(0),
   170   _prev_region_num_young(0),
   171   _prev_region_num_tenured(0),
   173   _aux_num(10),
   174   _all_aux_times_ms(new NumberSeq[_aux_num]),
   175   _cur_aux_start_times_ms(new double[_aux_num]),
   176   _cur_aux_times_ms(new double[_aux_num]),
   177   _cur_aux_times_set(new bool[_aux_num]),
   179   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   180   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   182   // <NEW PREDICTION>
   184   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _prev_collection_pause_end_ms(0.0),
   186   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   187   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   188   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   189   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   190   _partially_young_cards_per_entry_ratio_seq(
   191                                          new TruncatedSeq(TruncatedSeqLength)),
   192   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   193   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   194   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   195   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   196   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   197   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   198   _non_young_other_cost_per_region_ms_seq(
   199                                          new TruncatedSeq(TruncatedSeqLength)),
   201   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   202   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   203   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   205   _pause_time_target_ms((double) MaxGCPauseMillis),
   207   // </NEW PREDICTION>
   209   _full_young_gcs(true),
   210   _full_young_pause_num(0),
   211   _partial_young_pause_num(0),
   213   _during_marking(false),
   214   _in_marking_window(false),
   215   _in_marking_window_im(false),
   217   _known_garbage_ratio(0.0),
   218   _known_garbage_bytes(0),
   220   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   222    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
   224   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
   225   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
   227   _recent_avg_pause_time_ratio(0.0),
   228   _num_markings(0),
   229   _n_marks(0),
   230   _n_pauses_at_mark_end(0),
   232   _all_full_gc_times_ms(new NumberSeq()),
   234   // G1PausesBtwnConcMark defaults to -1
   235   // so the hack is to do the cast  QQQ FIXME
   236   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   237   _n_marks_since_last_pause(0),
   238   _initiate_conc_mark_if_possible(false),
   239   _during_initial_mark_pause(false),
   240   _should_revert_to_full_young_gcs(false),
   241   _last_full_young_gc(false),
   243   _eden_bytes_before_gc(0),
   244   _survivor_bytes_before_gc(0),
   245   _capacity_before_gc(0),
   247   _prev_collection_pause_used_at_end_bytes(0),
   249   _collection_set(NULL),
   250   _collection_set_size(0),
   251   _collection_set_bytes_used_before(0),
   253   // Incremental CSet attributes
   254   _inc_cset_build_state(Inactive),
   255   _inc_cset_head(NULL),
   256   _inc_cset_tail(NULL),
   257   _inc_cset_size(0),
   258   _inc_cset_young_index(0),
   259   _inc_cset_bytes_used_before(0),
   260   _inc_cset_max_finger(NULL),
   261   _inc_cset_recorded_young_bytes(0),
   262   _inc_cset_recorded_rs_lengths(0),
   263   _inc_cset_predicted_elapsed_time_ms(0.0),
   264   _inc_cset_predicted_bytes_to_copy(0),
   266 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   267 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   268 #endif // _MSC_VER
   270   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   271                                                  G1YoungSurvRateNumRegionsSummary)),
   272   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   273                                               G1YoungSurvRateNumRegionsSummary)),
   274   // add here any more surv rate groups
   275   _recorded_survivor_regions(0),
   276   _recorded_survivor_head(NULL),
   277   _recorded_survivor_tail(NULL),
   278   _survivors_age_table(true),
   280   _gc_overhead_perc(0.0) {
   282   // Set up the region size and associated fields. Given that the
   283   // policy is created before the heap, we have to set this up here,
   284   // so it's done as soon as possible.
   285   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   286   HeapRegionRemSet::setup_remset_size();
   288   G1ErgoVerbose::initialize();
   289   if (PrintAdaptiveSizePolicy) {
   290     // Currently, we only use a single switch for all the heuristics.
   291     G1ErgoVerbose::set_enabled(true);
   292     // Given that we don't currently have a verboseness level
   293     // parameter, we'll hardcode this to high. This can be easily
   294     // changed in the future.
   295     G1ErgoVerbose::set_level(ErgoHigh);
   296   } else {
   297     G1ErgoVerbose::set_enabled(false);
   298   }
   300   // Verify PLAB sizes
   301   const uint region_size = HeapRegion::GrainWords;
   302   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   303     char buffer[128];
   304     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
   305                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   306     vm_exit_during_initialization(buffer);
   307   }
   309   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   310   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   312   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   313   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   314   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   316   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   317   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   319   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   321   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   323   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   324   _par_last_termination_attempts = new double[_parallel_gc_threads];
   325   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   326   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   328   // start conservatively
   329   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   331   // <NEW PREDICTION>
   333   int index;
   334   if (ParallelGCThreads == 0)
   335     index = 0;
   336   else if (ParallelGCThreads > 8)
   337     index = 7;
   338   else
   339     index = ParallelGCThreads - 1;
   341   _pending_card_diff_seq->add(0.0);
   342   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   343   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   344   _fully_young_cards_per_entry_ratio_seq->add(
   345                             fully_young_cards_per_entry_ratio_defaults[index]);
   346   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   347   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   348   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   349   _young_other_cost_per_region_ms_seq->add(
   350                                young_other_cost_per_region_ms_defaults[index]);
   351   _non_young_other_cost_per_region_ms_seq->add(
   352                            non_young_other_cost_per_region_ms_defaults[index]);
   354   // </NEW PREDICTION>
   356   // Below, we might need to calculate the pause time target based on
   357   // the pause interval. When we do so we are going to give G1 maximum
   358   // flexibility and allow it to do pauses when it needs to. So, we'll
   359   // arrange that the pause interval to be pause time target + 1 to
   360   // ensure that a) the pause time target is maximized with respect to
   361   // the pause interval and b) we maintain the invariant that pause
   362   // time target < pause interval. If the user does not want this
   363   // maximum flexibility, they will have to set the pause interval
   364   // explicitly.
   366   // First make sure that, if either parameter is set, its value is
   367   // reasonable.
   368   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   369     if (MaxGCPauseMillis < 1) {
   370       vm_exit_during_initialization("MaxGCPauseMillis should be "
   371                                     "greater than 0");
   372     }
   373   }
   374   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   375     if (GCPauseIntervalMillis < 1) {
   376       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   377                                     "greater than 0");
   378     }
   379   }
   381   // Then, if the pause time target parameter was not set, set it to
   382   // the default value.
   383   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   384     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   385       // The default pause time target in G1 is 200ms
   386       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   387     } else {
   388       // We do not allow the pause interval to be set without the
   389       // pause time target
   390       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   391                                     "without setting MaxGCPauseMillis");
   392     }
   393   }
   395   // Then, if the interval parameter was not set, set it according to
   396   // the pause time target (this will also deal with the case when the
   397   // pause time target is the default value).
   398   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   399     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   400   }
   402   // Finally, make sure that the two parameters are consistent.
   403   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   404     char buffer[256];
   405     jio_snprintf(buffer, 256,
   406                  "MaxGCPauseMillis (%u) should be less than "
   407                  "GCPauseIntervalMillis (%u)",
   408                  MaxGCPauseMillis, GCPauseIntervalMillis);
   409     vm_exit_during_initialization(buffer);
   410   }
   412   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   413   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   414   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   415   _sigma = (double) G1ConfidencePercent / 100.0;
   417   // start conservatively (around 50ms is about right)
   418   _concurrent_mark_remark_times_ms->add(0.05);
   419   _concurrent_mark_cleanup_times_ms->add(0.20);
   420   _tenuring_threshold = MaxTenuringThreshold;
   421   // _max_survivor_regions will be calculated by
   422   // update_young_list_target_length() during initialization.
   423   _max_survivor_regions = 0;
   425   assert(GCTimeRatio > 0,
   426          "we should have set it to a default value set_g1_gc_flags() "
   427          "if a user set it to 0");
   428   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   430   uintx reserve_perc = G1ReservePercent;
   431   // Put an artificial ceiling on this so that it's not set to a silly value.
   432   if (reserve_perc > 50) {
   433     reserve_perc = 50;
   434     warning("G1ReservePercent is set to a value that is too large, "
   435             "it's been updated to %u", reserve_perc);
   436   }
   437   _reserve_factor = (double) reserve_perc / 100.0;
   438   // This will be set when the heap is expanded
   439   // for the first time during initialization.
   440   _reserve_regions = 0;
   442   initialize_all();
   443 }
   445 // Increment "i", mod "len"
   446 static void inc_mod(int& i, int len) {
   447   i++; if (i == len) i = 0;
   448 }
   450 void G1CollectorPolicy::initialize_flags() {
   451   set_min_alignment(HeapRegion::GrainBytes);
   452   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   453   if (SurvivorRatio < 1) {
   454     vm_exit_during_initialization("Invalid survivor ratio specified");
   455   }
   456   CollectorPolicy::initialize_flags();
   457 }
   459 // The easiest way to deal with the parsing of the NewSize /
   460 // MaxNewSize / etc. parameteres is to re-use the code in the
   461 // TwoGenerationCollectorPolicy class. This is similar to what
   462 // ParallelScavenge does with its GenerationSizer class (see
   463 // ParallelScavengeHeap::initialize()). We might change this in the
   464 // future, but it's a good start.
   465 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   466 private:
   467   size_t size_to_region_num(size_t byte_size) {
   468     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   469   }
   471 public:
   472   G1YoungGenSizer() {
   473     initialize_flags();
   474     initialize_size_info();
   475   }
   476   size_t min_young_region_num() {
   477     return size_to_region_num(_min_gen0_size);
   478   }
   479   size_t initial_young_region_num() {
   480     return size_to_region_num(_initial_gen0_size);
   481   }
   482   size_t max_young_region_num() {
   483     return size_to_region_num(_max_gen0_size);
   484   }
   485 };
   487 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
   488   assert(number_of_heap_regions > 0, "Heap must be initialized");
   489   size_t young_size = number_of_heap_regions / (NewRatio + 1);
   490   _min_desired_young_length = young_size;
   491   _max_desired_young_length = young_size;
   492 }
   494 void G1CollectorPolicy::init() {
   495   // Set aside an initial future to_space.
   496   _g1 = G1CollectedHeap::heap();
   498   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   500   initialize_gc_policy_counters();
   502   G1YoungGenSizer sizer;
   503   size_t initial_region_num = sizer.initial_young_region_num();
   504   _min_desired_young_length = sizer.min_young_region_num();
   505   _max_desired_young_length = sizer.max_young_region_num();
   507   if (FLAG_IS_CMDLINE(NewRatio)) {
   508     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   509       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   510     } else {
   511       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
   512       update_young_list_size_using_newratio(_g1->n_regions());
   513       _using_new_ratio_calculations = true;
   514     }
   515   }
   517   // GenCollectorPolicy guarantees that min <= initial <= max.
   518   // Asserting here just to state that we rely on this property.
   519   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   520   assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
   521   assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
   523   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
   524   if (adaptive_young_list_length()) {
   525     _young_list_fixed_length = 0;
   526   } else {
   527     _young_list_fixed_length = initial_region_num;
   528   }
   529   _free_regions_at_end_of_collection = _g1->free_regions();
   530   update_young_list_target_length();
   531   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   533   // We may immediately start allocating regions and placing them on the
   534   // collection set list. Initialize the per-collection set info
   535   start_incremental_cset_building();
   536 }
   538 // Create the jstat counters for the policy.
   539 void G1CollectorPolicy::initialize_gc_policy_counters() {
   540   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   541 }
   543 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   544                                          double base_time_ms,
   545                                          size_t base_free_regions,
   546                                          double target_pause_time_ms) {
   547   if (young_length >= base_free_regions) {
   548     // end condition 1: not enough space for the young regions
   549     return false;
   550   }
   552   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   553   size_t bytes_to_copy =
   554                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   555   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   556   double young_other_time_ms = predict_young_other_time_ms(young_length);
   557   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   558   if (pause_time_ms > target_pause_time_ms) {
   559     // end condition 2: prediction is over the target pause time
   560     return false;
   561   }
   563   size_t free_bytes =
   564                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   565   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   566     // end condition 3: out-of-space (conservatively!)
   567     return false;
   568   }
   570   // success!
   571   return true;
   572 }
   574 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   575   // re-calculate the necessary reserve
   576   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   577   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   578   // smaller than 1.0) we'll get 1.
   579   _reserve_regions = (size_t) ceil(reserve_regions_d);
   581   if (_using_new_ratio_calculations) {
   582     // -XX:NewRatio was specified so we need to update the
   583     // young gen length when the heap size has changed.
   584     update_young_list_size_using_newratio(new_number_of_regions);
   585   }
   586 }
   588 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   589                                                      size_t base_min_length) {
   590   size_t desired_min_length = 0;
   591   if (adaptive_young_list_length()) {
   592     if (_alloc_rate_ms_seq->num() > 3) {
   593       double now_sec = os::elapsedTime();
   594       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   595       double alloc_rate_ms = predict_alloc_rate_ms();
   596       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   597     } else {
   598       // otherwise we don't have enough info to make the prediction
   599     }
   600   }
   601   desired_min_length += base_min_length;
   602   // make sure we don't go below any user-defined minimum bound
   603   return MAX2(_min_desired_young_length, desired_min_length);
   604 }
   606 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   607   // Here, we might want to also take into account any additional
   608   // constraints (i.e., user-defined minimum bound). Currently, we
   609   // effectively don't set this bound.
   610   return _max_desired_young_length;
   611 }
   613 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   614   if (rs_lengths == (size_t) -1) {
   615     // if it's set to the default value (-1), we should predict it;
   616     // otherwise, use the given value.
   617     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   618   }
   620   // Calculate the absolute and desired min bounds.
   622   // This is how many young regions we already have (currently: the survivors).
   623   size_t base_min_length = recorded_survivor_regions();
   624   // This is the absolute minimum young length, which ensures that we
   625   // can allocate one eden region in the worst-case.
   626   size_t absolute_min_length = base_min_length + 1;
   627   size_t desired_min_length =
   628                      calculate_young_list_desired_min_length(base_min_length);
   629   if (desired_min_length < absolute_min_length) {
   630     desired_min_length = absolute_min_length;
   631   }
   633   // Calculate the absolute and desired max bounds.
   635   // We will try our best not to "eat" into the reserve.
   636   size_t absolute_max_length = 0;
   637   if (_free_regions_at_end_of_collection > _reserve_regions) {
   638     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   639   }
   640   size_t desired_max_length = calculate_young_list_desired_max_length();
   641   if (desired_max_length > absolute_max_length) {
   642     desired_max_length = absolute_max_length;
   643   }
   645   size_t young_list_target_length = 0;
   646   if (adaptive_young_list_length()) {
   647     if (full_young_gcs()) {
   648       young_list_target_length =
   649                         calculate_young_list_target_length(rs_lengths,
   650                                                            base_min_length,
   651                                                            desired_min_length,
   652                                                            desired_max_length);
   653       _rs_lengths_prediction = rs_lengths;
   654     } else {
   655       // Don't calculate anything and let the code below bound it to
   656       // the desired_min_length, i.e., do the next GC as soon as
   657       // possible to maximize how many old regions we can add to it.
   658     }
   659   } else {
   660     if (full_young_gcs()) {
   661       young_list_target_length = _young_list_fixed_length;
   662     } else {
   663       // A bit arbitrary: during partially-young GCs we allocate half
   664       // the young regions to try to add old regions to the CSet.
   665       young_list_target_length = _young_list_fixed_length / 2;
   666       // We choose to accept that we might go under the desired min
   667       // length given that we intentionally ask for a smaller young gen.
   668       desired_min_length = absolute_min_length;
   669     }
   670   }
   672   // Make sure we don't go over the desired max length, nor under the
   673   // desired min length. In case they clash, desired_min_length wins
   674   // which is why that test is second.
   675   if (young_list_target_length > desired_max_length) {
   676     young_list_target_length = desired_max_length;
   677   }
   678   if (young_list_target_length < desired_min_length) {
   679     young_list_target_length = desired_min_length;
   680   }
   682   assert(young_list_target_length > recorded_survivor_regions(),
   683          "we should be able to allocate at least one eden region");
   684   assert(young_list_target_length >= absolute_min_length, "post-condition");
   685   _young_list_target_length = young_list_target_length;
   687   update_max_gc_locker_expansion();
   688 }
   690 size_t
   691 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   692                                                    size_t base_min_length,
   693                                                    size_t desired_min_length,
   694                                                    size_t desired_max_length) {
   695   assert(adaptive_young_list_length(), "pre-condition");
   696   assert(full_young_gcs(), "only call this for fully-young GCs");
   698   // In case some edge-condition makes the desired max length too small...
   699   if (desired_max_length <= desired_min_length) {
   700     return desired_min_length;
   701   }
   703   // We'll adjust min_young_length and max_young_length not to include
   704   // the already allocated young regions (i.e., so they reflect the
   705   // min and max eden regions we'll allocate). The base_min_length
   706   // will be reflected in the predictions by the
   707   // survivor_regions_evac_time prediction.
   708   assert(desired_min_length > base_min_length, "invariant");
   709   size_t min_young_length = desired_min_length - base_min_length;
   710   assert(desired_max_length > base_min_length, "invariant");
   711   size_t max_young_length = desired_max_length - base_min_length;
   713   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   714   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   715   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   716   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   717   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   718   double base_time_ms =
   719     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   720     survivor_regions_evac_time;
   721   size_t available_free_regions = _free_regions_at_end_of_collection;
   722   size_t base_free_regions = 0;
   723   if (available_free_regions > _reserve_regions) {
   724     base_free_regions = available_free_regions - _reserve_regions;
   725   }
   727   // Here, we will make sure that the shortest young length that
   728   // makes sense fits within the target pause time.
   730   if (predict_will_fit(min_young_length, base_time_ms,
   731                        base_free_regions, target_pause_time_ms)) {
   732     // The shortest young length will fit into the target pause time;
   733     // we'll now check whether the absolute maximum number of young
   734     // regions will fit in the target pause time. If not, we'll do
   735     // a binary search between min_young_length and max_young_length.
   736     if (predict_will_fit(max_young_length, base_time_ms,
   737                          base_free_regions, target_pause_time_ms)) {
   738       // The maximum young length will fit into the target pause time.
   739       // We are done so set min young length to the maximum length (as
   740       // the result is assumed to be returned in min_young_length).
   741       min_young_length = max_young_length;
   742     } else {
   743       // The maximum possible number of young regions will not fit within
   744       // the target pause time so we'll search for the optimal
   745       // length. The loop invariants are:
   746       //
   747       // min_young_length < max_young_length
   748       // min_young_length is known to fit into the target pause time
   749       // max_young_length is known not to fit into the target pause time
   750       //
   751       // Going into the loop we know the above hold as we've just
   752       // checked them. Every time around the loop we check whether
   753       // the middle value between min_young_length and
   754       // max_young_length fits into the target pause time. If it
   755       // does, it becomes the new min. If it doesn't, it becomes
   756       // the new max. This way we maintain the loop invariants.
   758       assert(min_young_length < max_young_length, "invariant");
   759       size_t diff = (max_young_length - min_young_length) / 2;
   760       while (diff > 0) {
   761         size_t young_length = min_young_length + diff;
   762         if (predict_will_fit(young_length, base_time_ms,
   763                              base_free_regions, target_pause_time_ms)) {
   764           min_young_length = young_length;
   765         } else {
   766           max_young_length = young_length;
   767         }
   768         assert(min_young_length <  max_young_length, "invariant");
   769         diff = (max_young_length - min_young_length) / 2;
   770       }
   771       // The results is min_young_length which, according to the
   772       // loop invariants, should fit within the target pause time.
   774       // These are the post-conditions of the binary search above:
   775       assert(min_young_length < max_young_length,
   776              "otherwise we should have discovered that max_young_length "
   777              "fits into the pause target and not done the binary search");
   778       assert(predict_will_fit(min_young_length, base_time_ms,
   779                               base_free_regions, target_pause_time_ms),
   780              "min_young_length, the result of the binary search, should "
   781              "fit into the pause target");
   782       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   783                                base_free_regions, target_pause_time_ms),
   784              "min_young_length, the result of the binary search, should be "
   785              "optimal, so no larger length should fit into the pause target");
   786     }
   787   } else {
   788     // Even the minimum length doesn't fit into the pause time
   789     // target, return it as the result nevertheless.
   790   }
   791   return base_min_length + min_young_length;
   792 }
   794 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   795   double survivor_regions_evac_time = 0.0;
   796   for (HeapRegion * r = _recorded_survivor_head;
   797        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   798        r = r->get_next_young_region()) {
   799     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   800   }
   801   return survivor_regions_evac_time;
   802 }
   804 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   805   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   807   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   808   if (rs_lengths > _rs_lengths_prediction) {
   809     // add 10% to avoid having to recalculate often
   810     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   811     update_young_list_target_length(rs_lengths_prediction);
   812   }
   813 }
   817 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   818                                                bool is_tlab,
   819                                                bool* gc_overhead_limit_was_exceeded) {
   820   guarantee(false, "Not using this policy feature yet.");
   821   return NULL;
   822 }
   824 // This method controls how a collector handles one or more
   825 // of its generations being fully allocated.
   826 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   827                                                        bool is_tlab) {
   828   guarantee(false, "Not using this policy feature yet.");
   829   return NULL;
   830 }
   833 #ifndef PRODUCT
   834 bool G1CollectorPolicy::verify_young_ages() {
   835   HeapRegion* head = _g1->young_list()->first_region();
   836   return
   837     verify_young_ages(head, _short_lived_surv_rate_group);
   838   // also call verify_young_ages on any additional surv rate groups
   839 }
   841 bool
   842 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   843                                      SurvRateGroup *surv_rate_group) {
   844   guarantee( surv_rate_group != NULL, "pre-condition" );
   846   const char* name = surv_rate_group->name();
   847   bool ret = true;
   848   int prev_age = -1;
   850   for (HeapRegion* curr = head;
   851        curr != NULL;
   852        curr = curr->get_next_young_region()) {
   853     SurvRateGroup* group = curr->surv_rate_group();
   854     if (group == NULL && !curr->is_survivor()) {
   855       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   856       ret = false;
   857     }
   859     if (surv_rate_group == group) {
   860       int age = curr->age_in_surv_rate_group();
   862       if (age < 0) {
   863         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   864         ret = false;
   865       }
   867       if (age <= prev_age) {
   868         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   869                                "(%d, %d)", name, age, prev_age);
   870         ret = false;
   871       }
   872       prev_age = age;
   873     }
   874   }
   876   return ret;
   877 }
   878 #endif // PRODUCT
   880 void G1CollectorPolicy::record_full_collection_start() {
   881   _cur_collection_start_sec = os::elapsedTime();
   882   // Release the future to-space so that it is available for compaction into.
   883   _g1->set_full_collection();
   884 }
   886 void G1CollectorPolicy::record_full_collection_end() {
   887   // Consider this like a collection pause for the purposes of allocation
   888   // since last pause.
   889   double end_sec = os::elapsedTime();
   890   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   891   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   893   _all_full_gc_times_ms->add(full_gc_time_ms);
   895   update_recent_gc_times(end_sec, full_gc_time_ms);
   897   _g1->clear_full_collection();
   899   // "Nuke" the heuristics that control the fully/partially young GC
   900   // transitions and make sure we start with fully young GCs after the
   901   // Full GC.
   902   set_full_young_gcs(true);
   903   _last_full_young_gc = false;
   904   _should_revert_to_full_young_gcs = false;
   905   clear_initiate_conc_mark_if_possible();
   906   clear_during_initial_mark_pause();
   907   _known_garbage_bytes = 0;
   908   _known_garbage_ratio = 0.0;
   909   _in_marking_window = false;
   910   _in_marking_window_im = false;
   912   _short_lived_surv_rate_group->start_adding_regions();
   913   // also call this on any additional surv rate groups
   915   record_survivor_regions(0, NULL, NULL);
   917   _prev_region_num_young   = _region_num_young;
   918   _prev_region_num_tenured = _region_num_tenured;
   920   _free_regions_at_end_of_collection = _g1->free_regions();
   921   // Reset survivors SurvRateGroup.
   922   _survivor_surv_rate_group->reset();
   923   update_young_list_target_length();
   924 }
   926 void G1CollectorPolicy::record_stop_world_start() {
   927   _stop_world_start = os::elapsedTime();
   928 }
   930 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   931                                                       size_t start_used) {
   932   if (PrintGCDetails) {
   933     gclog_or_tty->stamp(PrintGCTimeStamps);
   934     gclog_or_tty->print("[GC pause");
   935     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   936   }
   938   // We only need to do this here as the policy will only be applied
   939   // to the GC we're about to start. so, no point is calculating this
   940   // every time we calculate / recalculate the target young length.
   941   update_survivors_policy();
   943   assert(_g1->used() == _g1->recalculate_used(),
   944          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   945                  _g1->used(), _g1->recalculate_used()));
   947   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   948   _all_stop_world_times_ms->add(s_w_t_ms);
   949   _stop_world_start = 0.0;
   951   _cur_collection_start_sec = start_time_sec;
   952   _cur_collection_pause_used_at_start_bytes = start_used;
   953   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   954   _pending_cards = _g1->pending_card_num();
   955   _max_pending_cards = _g1->max_pending_card_num();
   957   _bytes_in_collection_set_before_gc = 0;
   958   _bytes_copied_during_gc = 0;
   960   YoungList* young_list = _g1->young_list();
   961   _eden_bytes_before_gc = young_list->eden_used_bytes();
   962   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   963   _capacity_before_gc = _g1->capacity();
   965 #ifdef DEBUG
   966   // initialise these to something well known so that we can spot
   967   // if they are not set properly
   969   for (int i = 0; i < _parallel_gc_threads; ++i) {
   970     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   971     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   972     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   973     _par_last_update_rs_times_ms[i] = -1234.0;
   974     _par_last_update_rs_processed_buffers[i] = -1234.0;
   975     _par_last_scan_rs_times_ms[i] = -1234.0;
   976     _par_last_obj_copy_times_ms[i] = -1234.0;
   977     _par_last_termination_times_ms[i] = -1234.0;
   978     _par_last_termination_attempts[i] = -1234.0;
   979     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   980     _par_last_gc_worker_times_ms[i] = -1234.0;
   981   }
   982 #endif
   984   for (int i = 0; i < _aux_num; ++i) {
   985     _cur_aux_times_ms[i] = 0.0;
   986     _cur_aux_times_set[i] = false;
   987   }
   989   _satb_drain_time_set = false;
   990   _last_satb_drain_processed_buffers = -1;
   992   _last_young_gc_full = false;
   994   // do that for any other surv rate groups
   995   _short_lived_surv_rate_group->stop_adding_regions();
   996   _survivors_age_table.clear();
   998   assert( verify_young_ages(), "region age verification" );
   999 }
  1001 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
  1002   _mark_closure_time_ms = mark_closure_time_ms;
  1005 void G1CollectorPolicy::record_concurrent_mark_init_end(double
  1006                                                    mark_init_elapsed_time_ms) {
  1007   _during_marking = true;
  1008   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
  1009   clear_during_initial_mark_pause();
  1010   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
  1013 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
  1014   _mark_remark_start_sec = os::elapsedTime();
  1015   _during_marking = false;
  1018 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
  1019   double end_time_sec = os::elapsedTime();
  1020   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
  1021   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
  1022   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1023   _prev_collection_pause_end_ms += elapsed_time_ms;
  1025   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
  1028 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
  1029   _mark_cleanup_start_sec = os::elapsedTime();
  1032 void
  1033 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
  1034                                                       size_t max_live_bytes) {
  1035   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  1036   record_concurrent_mark_cleanup_end_work2();
  1039 void
  1040 G1CollectorPolicy::
  1041 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
  1042                                          size_t max_live_bytes) {
  1043   if (_n_marks < 2) {
  1044     _n_marks++;
  1048 // The important thing about this is that it includes "os::elapsedTime".
  1049 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
  1050   double end_time_sec = os::elapsedTime();
  1051   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
  1052   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1053   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1054   _prev_collection_pause_end_ms += elapsed_time_ms;
  1056   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
  1058   _num_markings++;
  1059   _n_pauses_at_mark_end = _n_pauses;
  1060   _n_marks_since_last_pause++;
  1063 void
  1064 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
  1065   _should_revert_to_full_young_gcs = false;
  1066   _last_full_young_gc = true;
  1067   _in_marking_window = false;
  1070 void G1CollectorPolicy::record_concurrent_pause() {
  1071   if (_stop_world_start > 0.0) {
  1072     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
  1073     _all_yield_times_ms->add(yield_ms);
  1077 void G1CollectorPolicy::record_concurrent_pause_end() {
  1080 template<class T>
  1081 T sum_of(T* sum_arr, int start, int n, int N) {
  1082   T sum = (T)0;
  1083   for (int i = 0; i < n; i++) {
  1084     int j = (start + i) % N;
  1085     sum += sum_arr[j];
  1087   return sum;
  1090 void G1CollectorPolicy::print_par_stats(int level,
  1091                                         const char* str,
  1092                                         double* data) {
  1093   double min = data[0], max = data[0];
  1094   double total = 0.0;
  1095   LineBuffer buf(level);
  1096   buf.append("[%s (ms):", str);
  1097   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1098     double val = data[i];
  1099     if (val < min)
  1100       min = val;
  1101     if (val > max)
  1102       max = val;
  1103     total += val;
  1104     buf.append("  %3.1lf", val);
  1106   buf.append_and_print_cr("");
  1107   double avg = total / (double) ParallelGCThreads;
  1108   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1109     avg, min, max, max - min);
  1112 void G1CollectorPolicy::print_par_sizes(int level,
  1113                                         const char* str,
  1114                                         double* data) {
  1115   double min = data[0], max = data[0];
  1116   double total = 0.0;
  1117   LineBuffer buf(level);
  1118   buf.append("[%s :", str);
  1119   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1120     double val = data[i];
  1121     if (val < min)
  1122       min = val;
  1123     if (val > max)
  1124       max = val;
  1125     total += val;
  1126     buf.append(" %d", (int) val);
  1128   buf.append_and_print_cr("");
  1129   double avg = total / (double) ParallelGCThreads;
  1130   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1131     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1134 void G1CollectorPolicy::print_stats (int level,
  1135                                      const char* str,
  1136                                      double value) {
  1137   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1140 void G1CollectorPolicy::print_stats (int level,
  1141                                      const char* str,
  1142                                      int value) {
  1143   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1146 double G1CollectorPolicy::avg_value (double* data) {
  1147   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1148     double ret = 0.0;
  1149     for (uint i = 0; i < ParallelGCThreads; ++i)
  1150       ret += data[i];
  1151     return ret / (double) ParallelGCThreads;
  1152   } else {
  1153     return data[0];
  1157 double G1CollectorPolicy::max_value (double* data) {
  1158   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1159     double ret = data[0];
  1160     for (uint i = 1; i < ParallelGCThreads; ++i)
  1161       if (data[i] > ret)
  1162         ret = data[i];
  1163     return ret;
  1164   } else {
  1165     return data[0];
  1169 double G1CollectorPolicy::sum_of_values (double* data) {
  1170   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1171     double sum = 0.0;
  1172     for (uint i = 0; i < ParallelGCThreads; i++)
  1173       sum += data[i];
  1174     return sum;
  1175   } else {
  1176     return data[0];
  1180 double G1CollectorPolicy::max_sum (double* data1,
  1181                                    double* data2) {
  1182   double ret = data1[0] + data2[0];
  1184   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1185     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1186       double data = data1[i] + data2[i];
  1187       if (data > ret)
  1188         ret = data;
  1191   return ret;
  1194 // Anything below that is considered to be zero
  1195 #define MIN_TIMER_GRANULARITY 0.0000001
  1197 void G1CollectorPolicy::record_collection_pause_end() {
  1198   double end_time_sec = os::elapsedTime();
  1199   double elapsed_ms = _last_pause_time_ms;
  1200   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1201   size_t rs_size =
  1202     _cur_collection_pause_used_regions_at_start - collection_set_size();
  1203   size_t cur_used_bytes = _g1->used();
  1204   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1205   bool last_pause_included_initial_mark = false;
  1206   bool update_stats = !_g1->evacuation_failed();
  1208 #ifndef PRODUCT
  1209   if (G1YoungSurvRateVerbose) {
  1210     gclog_or_tty->print_cr("");
  1211     _short_lived_surv_rate_group->print();
  1212     // do that for any other surv rate groups too
  1214 #endif // PRODUCT
  1216   last_pause_included_initial_mark = during_initial_mark_pause();
  1217   if (last_pause_included_initial_mark)
  1218     record_concurrent_mark_init_end(0.0);
  1220   size_t marking_initiating_used_threshold =
  1221     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1223   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
  1224     assert(!last_pause_included_initial_mark, "invariant");
  1225     if (cur_used_bytes > marking_initiating_used_threshold) {
  1226       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1227         assert(!during_initial_mark_pause(), "we should not see this here");
  1229         ergo_verbose3(ErgoConcCycles,
  1230                       "request concurrent cycle initiation",
  1231                       ergo_format_reason("occupancy higher than threshold")
  1232                       ergo_format_byte("occupancy")
  1233                       ergo_format_byte_perc("threshold"),
  1234                       cur_used_bytes,
  1235                       marking_initiating_used_threshold,
  1236                       (double) InitiatingHeapOccupancyPercent);
  1238         // Note: this might have already been set, if during the last
  1239         // pause we decided to start a cycle but at the beginning of
  1240         // this pause we decided to postpone it. That's OK.
  1241         set_initiate_conc_mark_if_possible();
  1242       } else {
  1243         ergo_verbose2(ErgoConcCycles,
  1244                   "do not request concurrent cycle initiation",
  1245                   ergo_format_reason("occupancy lower than previous occupancy")
  1246                   ergo_format_byte("occupancy")
  1247                   ergo_format_byte("previous occupancy"),
  1248                   cur_used_bytes,
  1249                   _prev_collection_pause_used_at_end_bytes);
  1254   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1256   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1257                           end_time_sec, false);
  1259   guarantee(_cur_collection_pause_used_regions_at_start >=
  1260             collection_set_size(),
  1261             "Negative RS size?");
  1263   // This assert is exempted when we're doing parallel collection pauses,
  1264   // because the fragmentation caused by the parallel GC allocation buffers
  1265   // can lead to more memory being used during collection than was used
  1266   // before. Best leave this out until the fragmentation problem is fixed.
  1267   // Pauses in which evacuation failed can also lead to negative
  1268   // collections, since no space is reclaimed from a region containing an
  1269   // object whose evacuation failed.
  1270   // Further, we're now always doing parallel collection.  But I'm still
  1271   // leaving this here as a placeholder for a more precise assertion later.
  1272   // (DLD, 10/05.)
  1273   assert((true || parallel) // Always using GC LABs now.
  1274          || _g1->evacuation_failed()
  1275          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1276          "Negative collection");
  1278   size_t freed_bytes =
  1279     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1280   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1282   double survival_fraction =
  1283     (double)surviving_bytes/
  1284     (double)_collection_set_bytes_used_before;
  1286   _n_pauses++;
  1288   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1289   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1290   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1291   double update_rs_processed_buffers =
  1292     sum_of_values(_par_last_update_rs_processed_buffers);
  1293   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1294   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1295   double termination_time = avg_value(_par_last_termination_times_ms);
  1297   double parallel_known_time = update_rs_time +
  1298                                ext_root_scan_time +
  1299                                mark_stack_scan_time +
  1300                                scan_rs_time +
  1301                                obj_copy_time +
  1302                                termination_time;
  1304   double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1306   PauseSummary* summary = _summary;
  1308   if (update_stats) {
  1309     _recent_rs_scan_times_ms->add(scan_rs_time);
  1310     _recent_pause_times_ms->add(elapsed_ms);
  1311     _recent_rs_sizes->add(rs_size);
  1313     MainBodySummary* body_summary = summary->main_body_summary();
  1314     guarantee(body_summary != NULL, "should not be null!");
  1316     if (_satb_drain_time_set)
  1317       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1318     else
  1319       body_summary->record_satb_drain_time_ms(0.0);
  1321     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1322     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1323     body_summary->record_update_rs_time_ms(update_rs_time);
  1324     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1325     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1326     if (parallel) {
  1327       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1328       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1329       body_summary->record_termination_time_ms(termination_time);
  1330       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1332     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1334     // We exempt parallel collection from this check because Alloc Buffer
  1335     // fragmentation can produce negative collections.  Same with evac
  1336     // failure.
  1337     // Further, we're now always doing parallel collection.  But I'm still
  1338     // leaving this here as a placeholder for a more precise assertion later.
  1339     // (DLD, 10/05.
  1340     assert((true || parallel)
  1341            || _g1->evacuation_failed()
  1342            || surviving_bytes <= _collection_set_bytes_used_before,
  1343            "Or else negative collection!");
  1344     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
  1345     _recent_CS_bytes_surviving->add(surviving_bytes);
  1347     // this is where we update the allocation rate of the application
  1348     double app_time_ms =
  1349       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1350     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1351       // This usually happens due to the timer not having the required
  1352       // granularity. Some Linuxes are the usual culprits.
  1353       // We'll just set it to something (arbitrarily) small.
  1354       app_time_ms = 1.0;
  1356     size_t regions_allocated =
  1357       (_region_num_young - _prev_region_num_young) +
  1358       (_region_num_tenured - _prev_region_num_tenured);
  1359     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1360     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1361     _prev_region_num_young   = _region_num_young;
  1362     _prev_region_num_tenured = _region_num_tenured;
  1364     double interval_ms =
  1365       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1366     update_recent_gc_times(end_time_sec, elapsed_ms);
  1367     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1368     if (recent_avg_pause_time_ratio() < 0.0 ||
  1369         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1370 #ifndef PRODUCT
  1371       // Dump info to allow post-facto debugging
  1372       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1373       gclog_or_tty->print_cr("-------------------------------------------");
  1374       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1375       _recent_gc_times_ms->dump();
  1376       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1377       _recent_prev_end_times_for_all_gcs_sec->dump();
  1378       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1379                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1380       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1381       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1382 #endif  // !PRODUCT
  1383       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1384       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1385       if (_recent_avg_pause_time_ratio < 0.0) {
  1386         _recent_avg_pause_time_ratio = 0.0;
  1387       } else {
  1388         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1389         _recent_avg_pause_time_ratio = 1.0;
  1394   if (G1PolicyVerbose > 1) {
  1395     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
  1398   if (G1PolicyVerbose > 1) {
  1399     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
  1400                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
  1401                            "      |RS|: " SIZE_FORMAT,
  1402                            elapsed_ms, recent_avg_time_for_pauses_ms(),
  1403                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
  1404                            rs_size);
  1406     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
  1407                            "       At end " SIZE_FORMAT "K\n"
  1408                            "       garbage      : " SIZE_FORMAT "K"
  1409                            "       of     " SIZE_FORMAT "K\n"
  1410                            "       survival     : %6.2f%%  (%6.2f%% avg)",
  1411                            _cur_collection_pause_used_at_start_bytes/K,
  1412                            _g1->used()/K, freed_bytes/K,
  1413                            _collection_set_bytes_used_before/K,
  1414                            survival_fraction*100.0,
  1415                            recent_avg_survival_fraction()*100.0);
  1416     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
  1417                            recent_avg_pause_time_ratio() * 100.0);
  1420   double other_time_ms = elapsed_ms;
  1422   if (_satb_drain_time_set) {
  1423     other_time_ms -= _cur_satb_drain_time_ms;
  1426   if (parallel) {
  1427     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
  1428   } else {
  1429     other_time_ms -=
  1430       update_rs_time +
  1431       ext_root_scan_time + mark_stack_scan_time +
  1432       scan_rs_time + obj_copy_time;
  1435   if (PrintGCDetails) {
  1436     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1437                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1438                            elapsed_ms / 1000.0);
  1440     if (_satb_drain_time_set) {
  1441       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1443     if (_last_satb_drain_processed_buffers >= 0) {
  1444       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
  1446     if (parallel) {
  1447       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1448       print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
  1449       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1450       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1451       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1452       print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
  1453       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1454       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1455       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1456       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1457       print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
  1459       for (int i = 0; i < _parallel_gc_threads; i++) {
  1460         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1462       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
  1464       print_stats(2, "Parallel Other", parallel_other_time);
  1465       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1466     } else {
  1467       print_stats(1, "Update RS", update_rs_time);
  1468       print_stats(2, "Processed Buffers",
  1469                   (int)update_rs_processed_buffers);
  1470       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1471       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1472       print_stats(1, "Scan RS", scan_rs_time);
  1473       print_stats(1, "Object Copying", obj_copy_time);
  1475 #ifndef PRODUCT
  1476     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1477     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1478     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1479     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1480     if (_num_cc_clears > 0) {
  1481       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1483 #endif
  1484     print_stats(1, "Other", other_time_ms);
  1485     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
  1486     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1487     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1489     for (int i = 0; i < _aux_num; ++i) {
  1490       if (_cur_aux_times_set[i]) {
  1491         char buffer[96];
  1492         sprintf(buffer, "Aux%d", i);
  1493         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1498   _all_pause_times_ms->add(elapsed_ms);
  1499   if (update_stats) {
  1500     summary->record_total_time_ms(elapsed_ms);
  1501     summary->record_other_time_ms(other_time_ms);
  1503   for (int i = 0; i < _aux_num; ++i)
  1504     if (_cur_aux_times_set[i])
  1505       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1507   // Reset marks-between-pauses counter.
  1508   _n_marks_since_last_pause = 0;
  1510   // Update the efficiency-since-mark vars.
  1511   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1512   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1513     // This usually happens due to the timer not having the required
  1514     // granularity. Some Linuxes are the usual culprits.
  1515     // We'll just set it to something (arbitrarily) small.
  1516     proc_ms = 1.0;
  1518   double cur_efficiency = (double) freed_bytes / proc_ms;
  1520   bool new_in_marking_window = _in_marking_window;
  1521   bool new_in_marking_window_im = false;
  1522   if (during_initial_mark_pause()) {
  1523     new_in_marking_window = true;
  1524     new_in_marking_window_im = true;
  1527   if (_last_full_young_gc) {
  1528     ergo_verbose2(ErgoPartiallyYoungGCs,
  1529                   "start partially-young GCs",
  1530                   ergo_format_byte_perc("known garbage"),
  1531                   _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1532     set_full_young_gcs(false);
  1533     _last_full_young_gc = false;
  1536   if ( !_last_young_gc_full ) {
  1537     if (_should_revert_to_full_young_gcs) {
  1538       ergo_verbose2(ErgoPartiallyYoungGCs,
  1539                     "end partially-young GCs",
  1540                     ergo_format_reason("partially-young GCs end requested")
  1541                     ergo_format_byte_perc("known garbage"),
  1542                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1543       set_full_young_gcs(true);
  1544     } else if (_known_garbage_ratio < 0.05) {
  1545       ergo_verbose3(ErgoPartiallyYoungGCs,
  1546                "end partially-young GCs",
  1547                ergo_format_reason("known garbage percent lower than threshold")
  1548                ergo_format_byte_perc("known garbage")
  1549                ergo_format_perc("threshold"),
  1550                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1551                0.05 * 100.0);
  1552       set_full_young_gcs(true);
  1553     } else if (adaptive_young_list_length() &&
  1554               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1555       ergo_verbose5(ErgoPartiallyYoungGCs,
  1556                     "end partially-young GCs",
  1557                     ergo_format_reason("current GC efficiency lower than "
  1558                                        "predicted fully-young GC efficiency")
  1559                     ergo_format_double("GC efficiency factor")
  1560                     ergo_format_double("current GC efficiency")
  1561                     ergo_format_double("predicted fully-young GC efficiency")
  1562                     ergo_format_byte_perc("known garbage"),
  1563                     get_gc_eff_factor(), cur_efficiency,
  1564                     predict_young_gc_eff(),
  1565                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1566       set_full_young_gcs(true);
  1569   _should_revert_to_full_young_gcs = false;
  1571   if (_last_young_gc_full && !_during_marking) {
  1572     _young_gc_eff_seq->add(cur_efficiency);
  1575   _short_lived_surv_rate_group->start_adding_regions();
  1576   // do that for any other surv rate groupsx
  1578   // <NEW PREDICTION>
  1580   if (update_stats) {
  1581     double pause_time_ms = elapsed_ms;
  1583     size_t diff = 0;
  1584     if (_max_pending_cards >= _pending_cards)
  1585       diff = _max_pending_cards - _pending_cards;
  1586     _pending_card_diff_seq->add((double) diff);
  1588     double cost_per_card_ms = 0.0;
  1589     if (_pending_cards > 0) {
  1590       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1591       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1594     size_t cards_scanned = _g1->cards_scanned();
  1596     double cost_per_entry_ms = 0.0;
  1597     if (cards_scanned > 10) {
  1598       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1599       if (_last_young_gc_full)
  1600         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1601       else
  1602         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1605     if (_max_rs_lengths > 0) {
  1606       double cards_per_entry_ratio =
  1607         (double) cards_scanned / (double) _max_rs_lengths;
  1608       if (_last_young_gc_full)
  1609         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1610       else
  1611         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1614     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1615     if (rs_length_diff >= 0)
  1616       _rs_length_diff_seq->add((double) rs_length_diff);
  1618     size_t copied_bytes = surviving_bytes;
  1619     double cost_per_byte_ms = 0.0;
  1620     if (copied_bytes > 0) {
  1621       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1622       if (_in_marking_window)
  1623         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1624       else
  1625         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1628     double all_other_time_ms = pause_time_ms -
  1629       (update_rs_time + scan_rs_time + obj_copy_time +
  1630        _mark_closure_time_ms + termination_time);
  1632     double young_other_time_ms = 0.0;
  1633     if (_recorded_young_regions > 0) {
  1634       young_other_time_ms =
  1635         _recorded_young_cset_choice_time_ms +
  1636         _recorded_young_free_cset_time_ms;
  1637       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1638                                              (double) _recorded_young_regions);
  1640     double non_young_other_time_ms = 0.0;
  1641     if (_recorded_non_young_regions > 0) {
  1642       non_young_other_time_ms =
  1643         _recorded_non_young_cset_choice_time_ms +
  1644         _recorded_non_young_free_cset_time_ms;
  1646       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1647                                          (double) _recorded_non_young_regions);
  1650     double constant_other_time_ms = all_other_time_ms -
  1651       (young_other_time_ms + non_young_other_time_ms);
  1652     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1654     double survival_ratio = 0.0;
  1655     if (_bytes_in_collection_set_before_gc > 0) {
  1656       survival_ratio = (double) _bytes_copied_during_gc /
  1657                                    (double) _bytes_in_collection_set_before_gc;
  1660     _pending_cards_seq->add((double) _pending_cards);
  1661     _scanned_cards_seq->add((double) cards_scanned);
  1662     _rs_lengths_seq->add((double) _max_rs_lengths);
  1664     double expensive_region_limit_ms =
  1665       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1666     if (expensive_region_limit_ms < 0.0) {
  1667       // this means that the other time was predicted to be longer than
  1668       // than the max pause time
  1669       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1671     _expensive_region_limit_ms = expensive_region_limit_ms;
  1673     if (PREDICTIONS_VERBOSE) {
  1674       gclog_or_tty->print_cr("");
  1675       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
  1676                     "REGIONS %d %d %d "
  1677                     "PENDING_CARDS %d %d "
  1678                     "CARDS_SCANNED %d %d "
  1679                     "RS_LENGTHS %d %d "
  1680                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
  1681                     "SURVIVAL_RATIO %1.6lf %1.6lf "
  1682                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  1683                     "OTHER_YOUNG %1.6lf %1.6lf "
  1684                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
  1685                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
  1686                     "ELAPSED %1.6lf %1.6lf ",
  1687                     _cur_collection_start_sec,
  1688                     (!_last_young_gc_full) ? 2 :
  1689                     (last_pause_included_initial_mark) ? 1 : 0,
  1690                     _recorded_region_num,
  1691                     _recorded_young_regions,
  1692                     _recorded_non_young_regions,
  1693                     _predicted_pending_cards, _pending_cards,
  1694                     _predicted_cards_scanned, cards_scanned,
  1695                     _predicted_rs_lengths, _max_rs_lengths,
  1696                     _predicted_rs_update_time_ms, update_rs_time,
  1697                     _predicted_rs_scan_time_ms, scan_rs_time,
  1698                     _predicted_survival_ratio, survival_ratio,
  1699                     _predicted_object_copy_time_ms, obj_copy_time,
  1700                     _predicted_constant_other_time_ms, constant_other_time_ms,
  1701                     _predicted_young_other_time_ms, young_other_time_ms,
  1702                     _predicted_non_young_other_time_ms,
  1703                     non_young_other_time_ms,
  1704                     _vtime_diff_ms, termination_time,
  1705                     _predicted_pause_time_ms, elapsed_ms);
  1708     if (G1PolicyVerbose > 0) {
  1709       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
  1710                     _predicted_pause_time_ms,
  1711                     (_within_target) ? "within" : "outside",
  1712                     elapsed_ms);
  1717   _in_marking_window = new_in_marking_window;
  1718   _in_marking_window_im = new_in_marking_window_im;
  1719   _free_regions_at_end_of_collection = _g1->free_regions();
  1720   update_young_list_target_length();
  1722   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1723   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1724   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1725   // </NEW PREDICTION>
  1728 #define EXT_SIZE_FORMAT "%d%s"
  1729 #define EXT_SIZE_PARAMS(bytes)                                  \
  1730   byte_size_in_proper_unit((bytes)),                            \
  1731   proper_unit_for_byte_size((bytes))
  1733 void G1CollectorPolicy::print_heap_transition() {
  1734   if (PrintGCDetails) {
  1735     YoungList* young_list = _g1->young_list();
  1736     size_t eden_bytes = young_list->eden_used_bytes();
  1737     size_t survivor_bytes = young_list->survivor_used_bytes();
  1738     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1739     size_t used = _g1->used();
  1740     size_t capacity = _g1->capacity();
  1741     size_t eden_capacity =
  1742       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1744     gclog_or_tty->print_cr(
  1745       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1746       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1747       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1748       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1749       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1750       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1751       EXT_SIZE_PARAMS(eden_bytes),
  1752       EXT_SIZE_PARAMS(eden_capacity),
  1753       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1754       EXT_SIZE_PARAMS(survivor_bytes),
  1755       EXT_SIZE_PARAMS(used_before_gc),
  1756       EXT_SIZE_PARAMS(_capacity_before_gc),
  1757       EXT_SIZE_PARAMS(used),
  1758       EXT_SIZE_PARAMS(capacity));
  1760     _prev_eden_capacity = eden_capacity;
  1761   } else if (PrintGC) {
  1762     _g1->print_size_transition(gclog_or_tty,
  1763                                _cur_collection_pause_used_at_start_bytes,
  1764                                _g1->used(), _g1->capacity());
  1768 // <NEW PREDICTION>
  1770 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1771                                                      double update_rs_processed_buffers,
  1772                                                      double goal_ms) {
  1773   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1774   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1776   if (G1UseAdaptiveConcRefinement) {
  1777     const int k_gy = 3, k_gr = 6;
  1778     const double inc_k = 1.1, dec_k = 0.9;
  1780     int g = cg1r->green_zone();
  1781     if (update_rs_time > goal_ms) {
  1782       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1783     } else {
  1784       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1785         g = (int)MAX2(g * inc_k, g + 1.0);
  1788     // Change the refinement threads params
  1789     cg1r->set_green_zone(g);
  1790     cg1r->set_yellow_zone(g * k_gy);
  1791     cg1r->set_red_zone(g * k_gr);
  1792     cg1r->reinitialize_threads();
  1794     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1795     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1796                                     cg1r->yellow_zone());
  1797     // Change the barrier params
  1798     dcqs.set_process_completed_threshold(processing_threshold);
  1799     dcqs.set_max_completed_queue(cg1r->red_zone());
  1802   int curr_queue_size = dcqs.completed_buffers_num();
  1803   if (curr_queue_size >= cg1r->yellow_zone()) {
  1804     dcqs.set_completed_queue_padding(curr_queue_size);
  1805   } else {
  1806     dcqs.set_completed_queue_padding(0);
  1808   dcqs.notify_if_necessary();
  1811 double
  1812 G1CollectorPolicy::
  1813 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1814   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1816   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1817   size_t young_num = g1h->young_list()->length();
  1818   if (young_num == 0)
  1819     return 0.0;
  1821   young_num += adjustment;
  1822   size_t pending_cards = predict_pending_cards();
  1823   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1824                       predict_rs_length_diff();
  1825   size_t card_num;
  1826   if (full_young_gcs())
  1827     card_num = predict_young_card_num(rs_lengths);
  1828   else
  1829     card_num = predict_non_young_card_num(rs_lengths);
  1830   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1831   double accum_yg_surv_rate =
  1832     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1834   size_t bytes_to_copy =
  1835     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1837   return
  1838     predict_rs_update_time_ms(pending_cards) +
  1839     predict_rs_scan_time_ms(card_num) +
  1840     predict_object_copy_time_ms(bytes_to_copy) +
  1841     predict_young_other_time_ms(young_num) +
  1842     predict_constant_other_time_ms();
  1845 double
  1846 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1847   size_t rs_length = predict_rs_length_diff();
  1848   size_t card_num;
  1849   if (full_young_gcs())
  1850     card_num = predict_young_card_num(rs_length);
  1851   else
  1852     card_num = predict_non_young_card_num(rs_length);
  1853   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1856 double
  1857 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1858                                                 size_t scanned_cards) {
  1859   return
  1860     predict_rs_update_time_ms(pending_cards) +
  1861     predict_rs_scan_time_ms(scanned_cards) +
  1862     predict_constant_other_time_ms();
  1865 double
  1866 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1867                                                   bool young) {
  1868   size_t rs_length = hr->rem_set()->occupied();
  1869   size_t card_num;
  1870   if (full_young_gcs())
  1871     card_num = predict_young_card_num(rs_length);
  1872   else
  1873     card_num = predict_non_young_card_num(rs_length);
  1874   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1876   double region_elapsed_time_ms =
  1877     predict_rs_scan_time_ms(card_num) +
  1878     predict_object_copy_time_ms(bytes_to_copy);
  1880   if (young)
  1881     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1882   else
  1883     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1885   return region_elapsed_time_ms;
  1888 size_t
  1889 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1890   size_t bytes_to_copy;
  1891   if (hr->is_marked())
  1892     bytes_to_copy = hr->max_live_bytes();
  1893   else {
  1894     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1895                "invariant" );
  1896     int age = hr->age_in_surv_rate_group();
  1897     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1898     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1901   return bytes_to_copy;
  1904 void
  1905 G1CollectorPolicy::start_recording_regions() {
  1906   _recorded_rs_lengths            = 0;
  1907   _recorded_young_regions         = 0;
  1908   _recorded_non_young_regions     = 0;
  1910 #if PREDICTIONS_VERBOSE
  1911   _recorded_marked_bytes          = 0;
  1912   _recorded_young_bytes           = 0;
  1913   _predicted_bytes_to_copy        = 0;
  1914   _predicted_rs_lengths           = 0;
  1915   _predicted_cards_scanned        = 0;
  1916 #endif // PREDICTIONS_VERBOSE
  1919 void
  1920 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  1921 #if PREDICTIONS_VERBOSE
  1922   if (!young) {
  1923     _recorded_marked_bytes += hr->max_live_bytes();
  1925   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  1926 #endif // PREDICTIONS_VERBOSE
  1928   size_t rs_length = hr->rem_set()->occupied();
  1929   _recorded_rs_lengths += rs_length;
  1932 void
  1933 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  1934   assert(!hr->is_young(), "should not call this");
  1935   ++_recorded_non_young_regions;
  1936   record_cset_region_info(hr, false);
  1939 void
  1940 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  1941   _recorded_young_regions = n_regions;
  1944 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  1945 #if PREDICTIONS_VERBOSE
  1946   _recorded_young_bytes = bytes;
  1947 #endif // PREDICTIONS_VERBOSE
  1950 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1951   _recorded_rs_lengths = rs_lengths;
  1954 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  1955   _predicted_bytes_to_copy = bytes;
  1958 void
  1959 G1CollectorPolicy::end_recording_regions() {
  1960   // The _predicted_pause_time_ms field is referenced in code
  1961   // not under PREDICTIONS_VERBOSE. Let's initialize it.
  1962   _predicted_pause_time_ms = -1.0;
  1964 #if PREDICTIONS_VERBOSE
  1965   _predicted_pending_cards = predict_pending_cards();
  1966   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  1967   if (full_young_gcs())
  1968     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
  1969   else
  1970     _predicted_cards_scanned +=
  1971       predict_non_young_card_num(_predicted_rs_lengths);
  1972   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  1974   _predicted_rs_update_time_ms =
  1975     predict_rs_update_time_ms(_g1->pending_card_num());
  1976   _predicted_rs_scan_time_ms =
  1977     predict_rs_scan_time_ms(_predicted_cards_scanned);
  1978   _predicted_object_copy_time_ms =
  1979     predict_object_copy_time_ms(_predicted_bytes_to_copy);
  1980   _predicted_constant_other_time_ms =
  1981     predict_constant_other_time_ms();
  1982   _predicted_young_other_time_ms =
  1983     predict_young_other_time_ms(_recorded_young_regions);
  1984   _predicted_non_young_other_time_ms =
  1985     predict_non_young_other_time_ms(_recorded_non_young_regions);
  1987   _predicted_pause_time_ms =
  1988     _predicted_rs_update_time_ms +
  1989     _predicted_rs_scan_time_ms +
  1990     _predicted_object_copy_time_ms +
  1991     _predicted_constant_other_time_ms +
  1992     _predicted_young_other_time_ms +
  1993     _predicted_non_young_other_time_ms;
  1994 #endif // PREDICTIONS_VERBOSE
  1997 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  1998                                                            predicted_time_ms) {
  1999   // I don't think we need to do this when in young GC mode since
  2000   // marking will be initiated next time we hit the soft limit anyway...
  2001   if (predicted_time_ms > _expensive_region_limit_ms) {
  2002     ergo_verbose2(ErgoPartiallyYoungGCs,
  2003               "request partially-young GCs end",
  2004               ergo_format_reason("predicted region time higher than threshold")
  2005               ergo_format_ms("predicted region time")
  2006               ergo_format_ms("threshold"),
  2007               predicted_time_ms, _expensive_region_limit_ms);
  2008     // no point in doing another partial one
  2009     _should_revert_to_full_young_gcs = true;
  2013 // </NEW PREDICTION>
  2016 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  2017                                                double elapsed_ms) {
  2018   _recent_gc_times_ms->add(elapsed_ms);
  2019   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  2020   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  2023 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
  2024   if (_recent_pause_times_ms->num() == 0) {
  2025     return (double) MaxGCPauseMillis;
  2027   return _recent_pause_times_ms->avg();
  2030 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
  2031   if (_recent_rs_scan_times_ms->num() == 0) {
  2032     return (double)MaxGCPauseMillis/3.0;
  2034   return _recent_rs_scan_times_ms->avg();
  2037 int G1CollectorPolicy::number_of_recent_gcs() {
  2038   assert(_recent_rs_scan_times_ms->num() ==
  2039          _recent_pause_times_ms->num(), "Sequence out of sync");
  2040   assert(_recent_pause_times_ms->num() ==
  2041          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
  2042   assert(_recent_CS_bytes_used_before->num() ==
  2043          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
  2045   return _recent_pause_times_ms->num();
  2048 double G1CollectorPolicy::recent_avg_survival_fraction() {
  2049   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
  2050                                            _recent_CS_bytes_used_before);
  2053 double G1CollectorPolicy::last_survival_fraction() {
  2054   return last_survival_fraction_work(_recent_CS_bytes_surviving,
  2055                                      _recent_CS_bytes_used_before);
  2058 double
  2059 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
  2060                                                      TruncatedSeq* before) {
  2061   assert(surviving->num() == before->num(), "Sequence out of sync");
  2062   if (before->sum() > 0.0) {
  2063       double recent_survival_rate = surviving->sum() / before->sum();
  2064       // We exempt parallel collection from this check because Alloc Buffer
  2065       // fragmentation can produce negative collections.
  2066       // Further, we're now always doing parallel collection.  But I'm still
  2067       // leaving this here as a placeholder for a more precise assertion later.
  2068       // (DLD, 10/05.)
  2069       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2070              _g1->evacuation_failed() ||
  2071              recent_survival_rate <= 1.0, "Or bad frac");
  2072       return recent_survival_rate;
  2073   } else {
  2074     return 1.0; // Be conservative.
  2078 double
  2079 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
  2080                                                TruncatedSeq* before) {
  2081   assert(surviving->num() == before->num(), "Sequence out of sync");
  2082   if (surviving->num() > 0 && before->last() > 0.0) {
  2083     double last_survival_rate = surviving->last() / before->last();
  2084     // We exempt parallel collection from this check because Alloc Buffer
  2085     // fragmentation can produce negative collections.
  2086     // Further, we're now always doing parallel collection.  But I'm still
  2087     // leaving this here as a placeholder for a more precise assertion later.
  2088     // (DLD, 10/05.)
  2089     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2090            last_survival_rate <= 1.0, "Or bad frac");
  2091     return last_survival_rate;
  2092   } else {
  2093     return 1.0;
  2097 static const int survival_min_obs = 5;
  2098 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
  2099 static const double min_survival_rate = 0.1;
  2101 double
  2102 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
  2103                                                            double latest) {
  2104   double res = avg;
  2105   if (number_of_recent_gcs() < survival_min_obs) {
  2106     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
  2108   res = MAX2(res, latest);
  2109   res = MAX2(res, min_survival_rate);
  2110   // In the parallel case, LAB fragmentation can produce "negative
  2111   // collections"; so can evac failure.  Cap at 1.0
  2112   res = MIN2(res, 1.0);
  2113   return res;
  2116 size_t G1CollectorPolicy::expansion_amount() {
  2117   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  2118   double threshold = _gc_overhead_perc;
  2119   if (recent_gc_overhead > threshold) {
  2120     // We will double the existing space, or take
  2121     // G1ExpandByPercentOfAvailable % of the available expansion
  2122     // space, whichever is smaller, bounded below by a minimum
  2123     // expansion (unless that's all that's left.)
  2124     const size_t min_expand_bytes = 1*M;
  2125     size_t reserved_bytes = _g1->max_capacity();
  2126     size_t committed_bytes = _g1->capacity();
  2127     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  2128     size_t expand_bytes;
  2129     size_t expand_bytes_via_pct =
  2130       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  2131     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  2132     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  2133     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  2135     ergo_verbose5(ErgoHeapSizing,
  2136                   "attempt heap expansion",
  2137                   ergo_format_reason("recent GC overhead higher than "
  2138                                      "threshold after GC")
  2139                   ergo_format_perc("recent GC overhead")
  2140                   ergo_format_perc("threshold")
  2141                   ergo_format_byte("uncommitted")
  2142                   ergo_format_byte_perc("calculated expansion amount"),
  2143                   recent_gc_overhead, threshold,
  2144                   uncommitted_bytes,
  2145                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  2147     return expand_bytes;
  2148   } else {
  2149     return 0;
  2153 void G1CollectorPolicy::note_start_of_mark_thread() {
  2154   _mark_thread_startup_sec = os::elapsedTime();
  2157 class CountCSClosure: public HeapRegionClosure {
  2158   G1CollectorPolicy* _g1_policy;
  2159 public:
  2160   CountCSClosure(G1CollectorPolicy* g1_policy) :
  2161     _g1_policy(g1_policy) {}
  2162   bool doHeapRegion(HeapRegion* r) {
  2163     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  2164     return false;
  2166 };
  2168 void G1CollectorPolicy::count_CS_bytes_used() {
  2169   CountCSClosure cs_closure(this);
  2170   _g1->collection_set_iterate(&cs_closure);
  2173 void G1CollectorPolicy::print_summary (int level,
  2174                                        const char* str,
  2175                                        NumberSeq* seq) const {
  2176   double sum = seq->sum();
  2177   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  2178                 str, sum / 1000.0, seq->avg());
  2181 void G1CollectorPolicy::print_summary_sd (int level,
  2182                                           const char* str,
  2183                                           NumberSeq* seq) const {
  2184   print_summary(level, str, seq);
  2185   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2186                 seq->num(), seq->sd(), seq->maximum());
  2189 void G1CollectorPolicy::check_other_times(int level,
  2190                                         NumberSeq* other_times_ms,
  2191                                         NumberSeq* calc_other_times_ms) const {
  2192   bool should_print = false;
  2193   LineBuffer buf(level + 2);
  2195   double max_sum = MAX2(fabs(other_times_ms->sum()),
  2196                         fabs(calc_other_times_ms->sum()));
  2197   double min_sum = MIN2(fabs(other_times_ms->sum()),
  2198                         fabs(calc_other_times_ms->sum()));
  2199   double sum_ratio = max_sum / min_sum;
  2200   if (sum_ratio > 1.1) {
  2201     should_print = true;
  2202     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  2205   double max_avg = MAX2(fabs(other_times_ms->avg()),
  2206                         fabs(calc_other_times_ms->avg()));
  2207   double min_avg = MIN2(fabs(other_times_ms->avg()),
  2208                         fabs(calc_other_times_ms->avg()));
  2209   double avg_ratio = max_avg / min_avg;
  2210   if (avg_ratio > 1.1) {
  2211     should_print = true;
  2212     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  2215   if (other_times_ms->sum() < -0.01) {
  2216     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  2219   if (other_times_ms->avg() < -0.01) {
  2220     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  2223   if (calc_other_times_ms->sum() < -0.01) {
  2224     should_print = true;
  2225     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  2228   if (calc_other_times_ms->avg() < -0.01) {
  2229     should_print = true;
  2230     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  2233   if (should_print)
  2234     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2237 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2238   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2239   MainBodySummary*    body_summary = summary->main_body_summary();
  2240   if (summary->get_total_seq()->num() > 0) {
  2241     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2242     if (body_summary != NULL) {
  2243       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  2244       if (parallel) {
  2245         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2246         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2247         print_summary(2, "Ext Root Scanning",
  2248                       body_summary->get_ext_root_scan_seq());
  2249         print_summary(2, "Mark Stack Scanning",
  2250                       body_summary->get_mark_stack_scan_seq());
  2251         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2252         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2253         print_summary(2, "Termination", body_summary->get_termination_seq());
  2254         print_summary(2, "Other", body_summary->get_parallel_other_seq());
  2256           NumberSeq* other_parts[] = {
  2257             body_summary->get_update_rs_seq(),
  2258             body_summary->get_ext_root_scan_seq(),
  2259             body_summary->get_mark_stack_scan_seq(),
  2260             body_summary->get_scan_rs_seq(),
  2261             body_summary->get_obj_copy_seq(),
  2262             body_summary->get_termination_seq()
  2263           };
  2264           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2265                                         6, other_parts);
  2266           check_other_times(2, body_summary->get_parallel_other_seq(),
  2267                             &calc_other_times_ms);
  2269         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2270         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2271       } else {
  2272         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2273         print_summary(1, "Ext Root Scanning",
  2274                       body_summary->get_ext_root_scan_seq());
  2275         print_summary(1, "Mark Stack Scanning",
  2276                       body_summary->get_mark_stack_scan_seq());
  2277         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2278         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2281     print_summary(1, "Other", summary->get_other_seq());
  2283       if (body_summary != NULL) {
  2284         NumberSeq calc_other_times_ms;
  2285         if (parallel) {
  2286           // parallel
  2287           NumberSeq* other_parts[] = {
  2288             body_summary->get_satb_drain_seq(),
  2289             body_summary->get_parallel_seq(),
  2290             body_summary->get_clear_ct_seq()
  2291           };
  2292           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2293                                                 3, other_parts);
  2294         } else {
  2295           // serial
  2296           NumberSeq* other_parts[] = {
  2297             body_summary->get_satb_drain_seq(),
  2298             body_summary->get_update_rs_seq(),
  2299             body_summary->get_ext_root_scan_seq(),
  2300             body_summary->get_mark_stack_scan_seq(),
  2301             body_summary->get_scan_rs_seq(),
  2302             body_summary->get_obj_copy_seq()
  2303           };
  2304           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2305                                                 6, other_parts);
  2307         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2310   } else {
  2311     LineBuffer(1).append_and_print_cr("none");
  2313   LineBuffer(0).append_and_print_cr("");
  2316 void G1CollectorPolicy::print_tracing_info() const {
  2317   if (TraceGen0Time) {
  2318     gclog_or_tty->print_cr("ALL PAUSES");
  2319     print_summary_sd(0, "Total", _all_pause_times_ms);
  2320     gclog_or_tty->print_cr("");
  2321     gclog_or_tty->print_cr("");
  2322     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
  2323     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
  2324     gclog_or_tty->print_cr("");
  2326     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2327     print_summary(_summary);
  2329     gclog_or_tty->print_cr("MISC");
  2330     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2331     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2332     for (int i = 0; i < _aux_num; ++i) {
  2333       if (_all_aux_times_ms[i].num() > 0) {
  2334         char buffer[96];
  2335         sprintf(buffer, "Aux%d", i);
  2336         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2340     size_t all_region_num = _region_num_young + _region_num_tenured;
  2341     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
  2342                "Tenured %8d (%6.2lf%%)",
  2343                all_region_num,
  2344                _region_num_young,
  2345                (double) _region_num_young / (double) all_region_num * 100.0,
  2346                _region_num_tenured,
  2347                (double) _region_num_tenured / (double) all_region_num * 100.0);
  2349   if (TraceGen1Time) {
  2350     if (_all_full_gc_times_ms->num() > 0) {
  2351       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2352                  _all_full_gc_times_ms->num(),
  2353                  _all_full_gc_times_ms->sum() / 1000.0);
  2354       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2355       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2356                     _all_full_gc_times_ms->sd(),
  2357                     _all_full_gc_times_ms->maximum());
  2362 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2363 #ifndef PRODUCT
  2364   _short_lived_surv_rate_group->print_surv_rate_summary();
  2365   // add this call for any other surv rate groups
  2366 #endif // PRODUCT
  2369 void G1CollectorPolicy::update_region_num(bool young) {
  2370   if (young) {
  2371     ++_region_num_young;
  2372   } else {
  2373     ++_region_num_tenured;
  2377 #ifndef PRODUCT
  2378 // for debugging, bit of a hack...
  2379 static char*
  2380 region_num_to_mbs(int length) {
  2381   static char buffer[64];
  2382   double bytes = (double) (length * HeapRegion::GrainBytes);
  2383   double mbs = bytes / (double) (1024 * 1024);
  2384   sprintf(buffer, "%7.2lfMB", mbs);
  2385   return buffer;
  2387 #endif // PRODUCT
  2389 size_t G1CollectorPolicy::max_regions(int purpose) {
  2390   switch (purpose) {
  2391     case GCAllocForSurvived:
  2392       return _max_survivor_regions;
  2393     case GCAllocForTenured:
  2394       return REGIONS_UNLIMITED;
  2395     default:
  2396       ShouldNotReachHere();
  2397       return REGIONS_UNLIMITED;
  2398   };
  2401 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2402   size_t expansion_region_num = 0;
  2403   if (GCLockerEdenExpansionPercent > 0) {
  2404     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2405     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2406     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2407     // less than 1.0) we'll get 1.
  2408     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2409   } else {
  2410     assert(expansion_region_num == 0, "sanity");
  2412   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2413   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2416 // Calculates survivor space parameters.
  2417 void G1CollectorPolicy::update_survivors_policy() {
  2418   double max_survivor_regions_d =
  2419                  (double) _young_list_target_length / (double) SurvivorRatio;
  2420   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2421   // smaller than 1.0) we'll get 1.
  2422   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2424   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2425         HeapRegion::GrainWords * _max_survivor_regions);
  2428 #ifndef PRODUCT
  2429 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2430   CollectionSetChooser* _chooser;
  2431 public:
  2432   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2433     _chooser(chooser) {}
  2435   bool doHeapRegion(HeapRegion* r) {
  2436     if (!r->continuesHumongous()) {
  2437       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2439     return false;
  2441 };
  2443 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
  2444   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2445   _g1->heap_region_iterate(&cl);
  2446   return true;
  2448 #endif
  2450 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2451                                                      GCCause::Cause gc_cause) {
  2452   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2453   if (!during_cycle) {
  2454     ergo_verbose1(ErgoConcCycles,
  2455                   "request concurrent cycle initiation",
  2456                   ergo_format_reason("requested by GC cause")
  2457                   ergo_format_str("GC cause"),
  2458                   GCCause::to_string(gc_cause));
  2459     set_initiate_conc_mark_if_possible();
  2460     return true;
  2461   } else {
  2462     ergo_verbose1(ErgoConcCycles,
  2463                   "do not request concurrent cycle initiation",
  2464                   ergo_format_reason("concurrent cycle already in progress")
  2465                   ergo_format_str("GC cause"),
  2466                   GCCause::to_string(gc_cause));
  2467     return false;
  2471 void
  2472 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2473   // We are about to decide on whether this pause will be an
  2474   // initial-mark pause.
  2476   // First, during_initial_mark_pause() should not be already set. We
  2477   // will set it here if we have to. However, it should be cleared by
  2478   // the end of the pause (it's only set for the duration of an
  2479   // initial-mark pause).
  2480   assert(!during_initial_mark_pause(), "pre-condition");
  2482   if (initiate_conc_mark_if_possible()) {
  2483     // We had noticed on a previous pause that the heap occupancy has
  2484     // gone over the initiating threshold and we should start a
  2485     // concurrent marking cycle. So we might initiate one.
  2487     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2488     if (!during_cycle) {
  2489       // The concurrent marking thread is not "during a cycle", i.e.,
  2490       // it has completed the last one. So we can go ahead and
  2491       // initiate a new cycle.
  2493       set_during_initial_mark_pause();
  2495       // And we can now clear initiate_conc_mark_if_possible() as
  2496       // we've already acted on it.
  2497       clear_initiate_conc_mark_if_possible();
  2499       ergo_verbose0(ErgoConcCycles,
  2500                   "initiate concurrent cycle",
  2501                   ergo_format_reason("concurrent cycle initiation requested"));
  2502     } else {
  2503       // The concurrent marking thread is still finishing up the
  2504       // previous cycle. If we start one right now the two cycles
  2505       // overlap. In particular, the concurrent marking thread might
  2506       // be in the process of clearing the next marking bitmap (which
  2507       // we will use for the next cycle if we start one). Starting a
  2508       // cycle now will be bad given that parts of the marking
  2509       // information might get cleared by the marking thread. And we
  2510       // cannot wait for the marking thread to finish the cycle as it
  2511       // periodically yields while clearing the next marking bitmap
  2512       // and, if it's in a yield point, it's waiting for us to
  2513       // finish. So, at this point we will not start a cycle and we'll
  2514       // let the concurrent marking thread complete the last one.
  2515       ergo_verbose0(ErgoConcCycles,
  2516                     "do not initiate concurrent cycle",
  2517                     ergo_format_reason("concurrent cycle already in progress"));
  2522 void
  2523 G1CollectorPolicy_BestRegionsFirst::
  2524 record_collection_pause_start(double start_time_sec, size_t start_used) {
  2525   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
  2528 class KnownGarbageClosure: public HeapRegionClosure {
  2529   CollectionSetChooser* _hrSorted;
  2531 public:
  2532   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2533     _hrSorted(hrSorted)
  2534   {}
  2536   bool doHeapRegion(HeapRegion* r) {
  2537     // We only include humongous regions in collection
  2538     // sets when concurrent mark shows that their contained object is
  2539     // unreachable.
  2541     // Do we have any marking information for this region?
  2542     if (r->is_marked()) {
  2543       // We don't include humongous regions in collection
  2544       // sets because we collect them immediately at the end of a marking
  2545       // cycle.  We also don't include young regions because we *must*
  2546       // include them in the next collection pause.
  2547       if (!r->isHumongous() && !r->is_young()) {
  2548         _hrSorted->addMarkedHeapRegion(r);
  2551     return false;
  2553 };
  2555 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2556   CollectionSetChooser* _hrSorted;
  2557   jint _marked_regions_added;
  2558   jint _chunk_size;
  2559   jint _cur_chunk_idx;
  2560   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2561   int _worker;
  2562   int _invokes;
  2564   void get_new_chunk() {
  2565     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2566     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2568   void add_region(HeapRegion* r) {
  2569     if (_cur_chunk_idx == _cur_chunk_end) {
  2570       get_new_chunk();
  2572     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2573     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2574     _marked_regions_added++;
  2575     _cur_chunk_idx++;
  2578 public:
  2579   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2580                            jint chunk_size,
  2581                            int worker) :
  2582     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2583     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2584     _invokes(0)
  2585   {}
  2587   bool doHeapRegion(HeapRegion* r) {
  2588     // We only include humongous regions in collection
  2589     // sets when concurrent mark shows that their contained object is
  2590     // unreachable.
  2591     _invokes++;
  2593     // Do we have any marking information for this region?
  2594     if (r->is_marked()) {
  2595       // We don't include humongous regions in collection
  2596       // sets because we collect them immediately at the end of a marking
  2597       // cycle.
  2598       // We also do not include young regions in collection sets
  2599       if (!r->isHumongous() && !r->is_young()) {
  2600         add_region(r);
  2603     return false;
  2605   jint marked_regions_added() { return _marked_regions_added; }
  2606   int invokes() { return _invokes; }
  2607 };
  2609 class ParKnownGarbageTask: public AbstractGangTask {
  2610   CollectionSetChooser* _hrSorted;
  2611   jint _chunk_size;
  2612   G1CollectedHeap* _g1;
  2613 public:
  2614   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2615     AbstractGangTask("ParKnownGarbageTask"),
  2616     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2617     _g1(G1CollectedHeap::heap())
  2618   {}
  2620   void work(int i) {
  2621     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2622     // Back to zero for the claim value.
  2623     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2624                                          HeapRegion::InitialClaimValue);
  2625     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2626     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2627     if (G1PrintParCleanupStats) {
  2628       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2629                  i, parKnownGarbageCl.invokes(), regions_added);
  2632 };
  2634 void
  2635 G1CollectorPolicy_BestRegionsFirst::
  2636 record_concurrent_mark_cleanup_end(size_t freed_bytes,
  2637                                    size_t max_live_bytes) {
  2638   double start;
  2639   if (G1PrintParCleanupStats) start = os::elapsedTime();
  2640   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  2642   _collectionSetChooser->clearMarkedHeapRegions();
  2643   double clear_marked_end;
  2644   if (G1PrintParCleanupStats) {
  2645     clear_marked_end = os::elapsedTime();
  2646     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
  2647                   (clear_marked_end - start)*1000.0);
  2649   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2650     const size_t OverpartitionFactor = 4;
  2651     const size_t MinWorkUnit = 8;
  2652     const size_t WorkUnit =
  2653       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2654            MinWorkUnit);
  2655     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2656                                                              WorkUnit);
  2657     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2658                                             (int) WorkUnit);
  2659     _g1->workers()->run_task(&parKnownGarbageTask);
  2661     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2662            "sanity check");
  2663   } else {
  2664     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2665     _g1->heap_region_iterate(&knownGarbagecl);
  2667   double known_garbage_end;
  2668   if (G1PrintParCleanupStats) {
  2669     known_garbage_end = os::elapsedTime();
  2670     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2671                   (known_garbage_end - clear_marked_end)*1000.0);
  2673   _collectionSetChooser->sortMarkedHeapRegions();
  2674   double sort_end;
  2675   if (G1PrintParCleanupStats) {
  2676     sort_end = os::elapsedTime();
  2677     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2678                   (sort_end - known_garbage_end)*1000.0);
  2681   record_concurrent_mark_cleanup_end_work2();
  2682   double work2_end;
  2683   if (G1PrintParCleanupStats) {
  2684     work2_end = os::elapsedTime();
  2685     gclog_or_tty->print_cr("  work2: %8.3f ms.",
  2686                   (work2_end - sort_end)*1000.0);
  2690 // Add the heap region at the head of the non-incremental collection set
  2691 void G1CollectorPolicy::
  2692 add_to_collection_set(HeapRegion* hr) {
  2693   assert(_inc_cset_build_state == Active, "Precondition");
  2694   assert(!hr->is_young(), "non-incremental add of young region");
  2696   if (_g1->mark_in_progress())
  2697     _g1->concurrent_mark()->registerCSetRegion(hr);
  2699   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2700   hr->set_in_collection_set(true);
  2701   hr->set_next_in_collection_set(_collection_set);
  2702   _collection_set = hr;
  2703   _collection_set_size++;
  2704   _collection_set_bytes_used_before += hr->used();
  2705   _g1->register_region_with_in_cset_fast_test(hr);
  2708 // Initialize the per-collection-set information
  2709 void G1CollectorPolicy::start_incremental_cset_building() {
  2710   assert(_inc_cset_build_state == Inactive, "Precondition");
  2712   _inc_cset_head = NULL;
  2713   _inc_cset_tail = NULL;
  2714   _inc_cset_size = 0;
  2715   _inc_cset_bytes_used_before = 0;
  2717   _inc_cset_young_index = 0;
  2719   _inc_cset_max_finger = 0;
  2720   _inc_cset_recorded_young_bytes = 0;
  2721   _inc_cset_recorded_rs_lengths = 0;
  2722   _inc_cset_predicted_elapsed_time_ms = 0;
  2723   _inc_cset_predicted_bytes_to_copy = 0;
  2724   _inc_cset_build_state = Active;
  2727 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2728   // This routine is used when:
  2729   // * adding survivor regions to the incremental cset at the end of an
  2730   //   evacuation pause,
  2731   // * adding the current allocation region to the incremental cset
  2732   //   when it is retired, and
  2733   // * updating existing policy information for a region in the
  2734   //   incremental cset via young list RSet sampling.
  2735   // Therefore this routine may be called at a safepoint by the
  2736   // VM thread, or in-between safepoints by mutator threads (when
  2737   // retiring the current allocation region) or a concurrent
  2738   // refine thread (RSet sampling).
  2740   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2741   size_t used_bytes = hr->used();
  2743   _inc_cset_recorded_rs_lengths += rs_length;
  2744   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2746   _inc_cset_bytes_used_before += used_bytes;
  2748   // Cache the values we have added to the aggregated informtion
  2749   // in the heap region in case we have to remove this region from
  2750   // the incremental collection set, or it is updated by the
  2751   // rset sampling code
  2752   hr->set_recorded_rs_length(rs_length);
  2753   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2755 #if PREDICTIONS_VERBOSE
  2756   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  2757   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  2759   // Record the number of bytes used in this region
  2760   _inc_cset_recorded_young_bytes += used_bytes;
  2762   // Cache the values we have added to the aggregated informtion
  2763   // in the heap region in case we have to remove this region from
  2764   // the incremental collection set, or it is updated by the
  2765   // rset sampling code
  2766   hr->set_predicted_bytes_to_copy(bytes_to_copy);
  2767 #endif // PREDICTIONS_VERBOSE
  2770 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2771   // This routine is currently only called as part of the updating of
  2772   // existing policy information for regions in the incremental cset that
  2773   // is performed by the concurrent refine thread(s) as part of young list
  2774   // RSet sampling. Therefore we should not be at a safepoint.
  2776   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2777   assert(hr->is_young(), "it should be");
  2779   size_t used_bytes = hr->used();
  2780   size_t old_rs_length = hr->recorded_rs_length();
  2781   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2783   // Subtract the old recorded/predicted policy information for
  2784   // the given heap region from the collection set info.
  2785   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2786   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2788   _inc_cset_bytes_used_before -= used_bytes;
  2790   // Clear the values cached in the heap region
  2791   hr->set_recorded_rs_length(0);
  2792   hr->set_predicted_elapsed_time_ms(0);
  2794 #if PREDICTIONS_VERBOSE
  2795   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  2796   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  2798   // Subtract the number of bytes used in this region
  2799   _inc_cset_recorded_young_bytes -= used_bytes;
  2801   // Clear the values cached in the heap region
  2802   hr->set_predicted_bytes_to_copy(0);
  2803 #endif // PREDICTIONS_VERBOSE
  2806 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2807   // Update the collection set information that is dependent on the new RS length
  2808   assert(hr->is_young(), "Precondition");
  2810   remove_from_incremental_cset_info(hr);
  2811   add_to_incremental_cset_info(hr, new_rs_length);
  2814 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2815   assert( hr->is_young(), "invariant");
  2816   assert( hr->young_index_in_cset() == -1, "invariant" );
  2817   assert(_inc_cset_build_state == Active, "Precondition");
  2819   // We need to clear and set the cached recorded/cached collection set
  2820   // information in the heap region here (before the region gets added
  2821   // to the collection set). An individual heap region's cached values
  2822   // are calculated, aggregated with the policy collection set info,
  2823   // and cached in the heap region here (initially) and (subsequently)
  2824   // by the Young List sampling code.
  2826   size_t rs_length = hr->rem_set()->occupied();
  2827   add_to_incremental_cset_info(hr, rs_length);
  2829   HeapWord* hr_end = hr->end();
  2830   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2832   assert(!hr->in_collection_set(), "invariant");
  2833   hr->set_in_collection_set(true);
  2834   assert( hr->next_in_collection_set() == NULL, "invariant");
  2836   _inc_cset_size++;
  2837   _g1->register_region_with_in_cset_fast_test(hr);
  2839   hr->set_young_index_in_cset((int) _inc_cset_young_index);
  2840   ++_inc_cset_young_index;
  2843 // Add the region at the RHS of the incremental cset
  2844 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2845   // We should only ever be appending survivors at the end of a pause
  2846   assert( hr->is_survivor(), "Logic");
  2848   // Do the 'common' stuff
  2849   add_region_to_incremental_cset_common(hr);
  2851   // Now add the region at the right hand side
  2852   if (_inc_cset_tail == NULL) {
  2853     assert(_inc_cset_head == NULL, "invariant");
  2854     _inc_cset_head = hr;
  2855   } else {
  2856     _inc_cset_tail->set_next_in_collection_set(hr);
  2858   _inc_cset_tail = hr;
  2861 // Add the region to the LHS of the incremental cset
  2862 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2863   // Survivors should be added to the RHS at the end of a pause
  2864   assert(!hr->is_survivor(), "Logic");
  2866   // Do the 'common' stuff
  2867   add_region_to_incremental_cset_common(hr);
  2869   // Add the region at the left hand side
  2870   hr->set_next_in_collection_set(_inc_cset_head);
  2871   if (_inc_cset_head == NULL) {
  2872     assert(_inc_cset_tail == NULL, "Invariant");
  2873     _inc_cset_tail = hr;
  2875   _inc_cset_head = hr;
  2878 #ifndef PRODUCT
  2879 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2880   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2882   st->print_cr("\nCollection_set:");
  2883   HeapRegion* csr = list_head;
  2884   while (csr != NULL) {
  2885     HeapRegion* next = csr->next_in_collection_set();
  2886     assert(csr->in_collection_set(), "bad CS");
  2887     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2888                  "age: %4d, y: %d, surv: %d",
  2889                         csr->bottom(), csr->end(),
  2890                         csr->top(),
  2891                         csr->prev_top_at_mark_start(),
  2892                         csr->next_top_at_mark_start(),
  2893                         csr->top_at_conc_mark_count(),
  2894                         csr->age_in_surv_rate_group_cond(),
  2895                         csr->is_young(),
  2896                         csr->is_survivor());
  2897     csr = next;
  2900 #endif // !PRODUCT
  2902 void
  2903 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
  2904                                                   double target_pause_time_ms) {
  2905   // Set this here - in case we're not doing young collections.
  2906   double non_young_start_time_sec = os::elapsedTime();
  2908   YoungList* young_list = _g1->young_list();
  2910   start_recording_regions();
  2912   guarantee(target_pause_time_ms > 0.0,
  2913             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2914                     target_pause_time_ms));
  2915   guarantee(_collection_set == NULL, "Precondition");
  2917   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2918   double predicted_pause_time_ms = base_time_ms;
  2920   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2922   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2923                 "start choosing CSet",
  2924                 ergo_format_ms("predicted base time")
  2925                 ergo_format_ms("remaining time")
  2926                 ergo_format_ms("target pause time"),
  2927                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2929   // the 10% and 50% values are arbitrary...
  2930   double threshold = 0.10 * target_pause_time_ms;
  2931   if (time_remaining_ms < threshold) {
  2932     double prev_time_remaining_ms = time_remaining_ms;
  2933     time_remaining_ms = 0.50 * target_pause_time_ms;
  2934     _within_target = false;
  2935     ergo_verbose3(ErgoCSetConstruction,
  2936                   "adjust remaining time",
  2937                   ergo_format_reason("remaining time lower than threshold")
  2938                   ergo_format_ms("remaining time")
  2939                   ergo_format_ms("threshold")
  2940                   ergo_format_ms("adjusted remaining time"),
  2941                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2942   } else {
  2943     _within_target = true;
  2946   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2948   HeapRegion* hr;
  2949   double young_start_time_sec = os::elapsedTime();
  2951   _collection_set_bytes_used_before = 0;
  2952   _collection_set_size = 0;
  2953   _young_cset_length  = 0;
  2954   _last_young_gc_full = full_young_gcs() ? true : false;
  2956   if (_last_young_gc_full) {
  2957     ++_full_young_pause_num;
  2958   } else {
  2959     ++_partial_young_pause_num;
  2962   // The young list is laid with the survivor regions from the previous
  2963   // pause are appended to the RHS of the young list, i.e.
  2964   //   [Newly Young Regions ++ Survivors from last pause].
  2966   size_t survivor_region_num = young_list->survivor_length();
  2967   size_t eden_region_num = young_list->length() - survivor_region_num;
  2968   size_t old_region_num = 0;
  2969   hr = young_list->first_survivor_region();
  2970   while (hr != NULL) {
  2971     assert(hr->is_survivor(), "badly formed young list");
  2972     hr->set_young();
  2973     hr = hr->get_next_young_region();
  2976   // Clear the fields that point to the survivor list - they are all young now.
  2977   young_list->clear_survivors();
  2979   if (_g1->mark_in_progress())
  2980     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2982   _young_cset_length = _inc_cset_young_index;
  2983   _collection_set = _inc_cset_head;
  2984   _collection_set_size = _inc_cset_size;
  2985   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2986   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2987   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2989   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2990                 "add young regions to CSet",
  2991                 ergo_format_region("eden")
  2992                 ergo_format_region("survivors")
  2993                 ergo_format_ms("predicted young region time"),
  2994                 eden_region_num, survivor_region_num,
  2995                 _inc_cset_predicted_elapsed_time_ms);
  2997   // The number of recorded young regions is the incremental
  2998   // collection set's current size
  2999   set_recorded_young_regions(_inc_cset_size);
  3000   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  3001   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  3002 #if PREDICTIONS_VERBOSE
  3003   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  3004 #endif // PREDICTIONS_VERBOSE
  3006   assert(_inc_cset_size == young_list->length(), "Invariant");
  3008   double young_end_time_sec = os::elapsedTime();
  3009   _recorded_young_cset_choice_time_ms =
  3010     (young_end_time_sec - young_start_time_sec) * 1000.0;
  3012   // We are doing young collections so reset this.
  3013   non_young_start_time_sec = young_end_time_sec;
  3015   if (!full_young_gcs()) {
  3016     bool should_continue = true;
  3017     NumberSeq seq;
  3018     double avg_prediction = 100000000000000000.0; // something very large
  3020     size_t prev_collection_set_size = _collection_set_size;
  3021     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  3022     do {
  3023       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  3024                                                       avg_prediction);
  3025       if (hr != NULL) {
  3026         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  3027         time_remaining_ms -= predicted_time_ms;
  3028         predicted_pause_time_ms += predicted_time_ms;
  3029         add_to_collection_set(hr);
  3030         record_non_young_cset_region(hr);
  3031         seq.add(predicted_time_ms);
  3032         avg_prediction = seq.avg() + seq.sd();
  3035       should_continue = true;
  3036       if (hr == NULL) {
  3037         // No need for an ergo verbose message here,
  3038         // getNextMarkRegion() does this when it returns NULL.
  3039         should_continue = false;
  3040       } else {
  3041         if (adaptive_young_list_length()) {
  3042           if (time_remaining_ms < 0.0) {
  3043             ergo_verbose1(ErgoCSetConstruction,
  3044                           "stop adding old regions to CSet",
  3045                           ergo_format_reason("remaining time is lower than 0")
  3046                           ergo_format_ms("remaining time"),
  3047                           time_remaining_ms);
  3048             should_continue = false;
  3050         } else {
  3051           if (_collection_set_size >= _young_list_fixed_length) {
  3052             ergo_verbose2(ErgoCSetConstruction,
  3053                           "stop adding old regions to CSet",
  3054                           ergo_format_reason("CSet length reached target")
  3055                           ergo_format_region("CSet")
  3056                           ergo_format_region("young target"),
  3057                           _collection_set_size, _young_list_fixed_length);
  3058             should_continue = false;
  3062     } while (should_continue);
  3064     if (!adaptive_young_list_length() &&
  3065         _collection_set_size < _young_list_fixed_length) {
  3066       ergo_verbose2(ErgoCSetConstruction,
  3067                     "request partially-young GCs end",
  3068                     ergo_format_reason("CSet length lower than target")
  3069                     ergo_format_region("CSet")
  3070                     ergo_format_region("young target"),
  3071                     _collection_set_size, _young_list_fixed_length);
  3072       _should_revert_to_full_young_gcs  = true;
  3075     old_region_num = _collection_set_size - prev_collection_set_size;
  3077     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  3078                   "add old regions to CSet",
  3079                   ergo_format_region("old")
  3080                   ergo_format_ms("predicted old region time"),
  3081                   old_region_num,
  3082                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  3085   stop_incremental_cset_building();
  3087   count_CS_bytes_used();
  3089   end_recording_regions();
  3091   ergo_verbose5(ErgoCSetConstruction,
  3092                 "finish choosing CSet",
  3093                 ergo_format_region("eden")
  3094                 ergo_format_region("survivors")
  3095                 ergo_format_region("old")
  3096                 ergo_format_ms("predicted pause time")
  3097                 ergo_format_ms("target pause time"),
  3098                 eden_region_num, survivor_region_num, old_region_num,
  3099                 predicted_pause_time_ms, target_pause_time_ms);
  3101   double non_young_end_time_sec = os::elapsedTime();
  3102   _recorded_non_young_cset_choice_time_ms =
  3103     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
  3106 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
  3107   G1CollectorPolicy::record_full_collection_end();
  3108   _collectionSetChooser->updateAfterFullCollection();
  3111 void G1CollectorPolicy_BestRegionsFirst::
  3112 record_collection_pause_end() {
  3113   G1CollectorPolicy::record_collection_pause_end();
  3114   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");

mercurial