src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 28 Sep 2011 10:36:31 -0700

author
johnc
date
Wed, 28 Sep 2011 10:36:31 -0700
changeset 3178
273b46400613
parent 3175
4dfb2df418f2
child 3182
65a8ff39a6da
permissions
-rw-r--r--

7086533: G1: assert(!_g1->is_obj_dead(obj)): We should not be preserving dead objs: g1CollectedHeap.cpp:3835
Summary: Some objects may not be marked in the event of an evacuation failure in a partially young GC, during a marking cycle. Avoid this situation by not allowing partially young GCs during a marking cycle.
Reviewed-by: tonyp, ysr, brutisso

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 #define PREDICTIONS_VERBOSE 0
    41 // <NEW PREDICTION>
    43 // Different defaults for different number of GC threads
    44 // They were chosen by running GCOld and SPECjbb on debris with different
    45 //   numbers of GC threads and choosing them based on the results
    47 // all the same
    48 static double rs_length_diff_defaults[] = {
    49   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    50 };
    52 static double cost_per_card_ms_defaults[] = {
    53   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    54 };
    56 // all the same
    57 static double fully_young_cards_per_entry_ratio_defaults[] = {
    58   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    59 };
    61 static double cost_per_entry_ms_defaults[] = {
    62   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    63 };
    65 static double cost_per_byte_ms_defaults[] = {
    66   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    67 };
    69 // these should be pretty consistent
    70 static double constant_other_time_ms_defaults[] = {
    71   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    72 };
    75 static double young_other_cost_per_region_ms_defaults[] = {
    76   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    77 };
    79 static double non_young_other_cost_per_region_ms_defaults[] = {
    80   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    81 };
    83 // </NEW PREDICTION>
    85 // Help class for avoiding interleaved logging
    86 class LineBuffer: public StackObj {
    88 private:
    89   static const int BUFFER_LEN = 1024;
    90   static const int INDENT_CHARS = 3;
    91   char _buffer[BUFFER_LEN];
    92   int _indent_level;
    93   int _cur;
    95   void vappend(const char* format, va_list ap) {
    96     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    97     if (res != -1) {
    98       _cur += res;
    99     } else {
   100       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
   101       _buffer[BUFFER_LEN -1] = 0;
   102       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
   103     }
   104   }
   106 public:
   107   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   108     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   109       _buffer[_cur] = ' ';
   110     }
   111   }
   113 #ifndef PRODUCT
   114   ~LineBuffer() {
   115     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   116   }
   117 #endif
   119   void append(const char* format, ...) {
   120     va_list ap;
   121     va_start(ap, format);
   122     vappend(format, ap);
   123     va_end(ap);
   124   }
   126   void append_and_print_cr(const char* format, ...) {
   127     va_list ap;
   128     va_start(ap, format);
   129     vappend(format, ap);
   130     va_end(ap);
   131     gclog_or_tty->print_cr("%s", _buffer);
   132     _cur = _indent_level * INDENT_CHARS;
   133   }
   134 };
   136 G1CollectorPolicy::G1CollectorPolicy() :
   137   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   138                         ? ParallelGCThreads : 1),
   140   _n_pauses(0),
   141   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   142   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   143   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   144   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   145   _all_pause_times_ms(new NumberSeq()),
   146   _stop_world_start(0.0),
   147   _all_stop_world_times_ms(new NumberSeq()),
   148   _all_yield_times_ms(new NumberSeq()),
   149   _using_new_ratio_calculations(false),
   151   _all_mod_union_times_ms(new NumberSeq()),
   153   _summary(new Summary()),
   155   _cur_clear_ct_time_ms(0.0),
   157   _cur_ref_proc_time_ms(0.0),
   158   _cur_ref_enq_time_ms(0.0),
   160 #ifndef PRODUCT
   161   _min_clear_cc_time_ms(-1.0),
   162   _max_clear_cc_time_ms(-1.0),
   163   _cur_clear_cc_time_ms(0.0),
   164   _cum_clear_cc_time_ms(0.0),
   165   _num_cc_clears(0L),
   166 #endif
   168   _region_num_young(0),
   169   _region_num_tenured(0),
   170   _prev_region_num_young(0),
   171   _prev_region_num_tenured(0),
   173   _aux_num(10),
   174   _all_aux_times_ms(new NumberSeq[_aux_num]),
   175   _cur_aux_start_times_ms(new double[_aux_num]),
   176   _cur_aux_times_ms(new double[_aux_num]),
   177   _cur_aux_times_set(new bool[_aux_num]),
   179   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   180   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   182   // <NEW PREDICTION>
   184   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _prev_collection_pause_end_ms(0.0),
   186   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   187   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   188   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   189   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   190   _partially_young_cards_per_entry_ratio_seq(
   191                                          new TruncatedSeq(TruncatedSeqLength)),
   192   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   193   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   194   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   195   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   196   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   197   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   198   _non_young_other_cost_per_region_ms_seq(
   199                                          new TruncatedSeq(TruncatedSeqLength)),
   201   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   202   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   203   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   205   _pause_time_target_ms((double) MaxGCPauseMillis),
   207   // </NEW PREDICTION>
   209   _full_young_gcs(true),
   210   _full_young_pause_num(0),
   211   _partial_young_pause_num(0),
   213   _during_marking(false),
   214   _in_marking_window(false),
   215   _in_marking_window_im(false),
   217   _known_garbage_ratio(0.0),
   218   _known_garbage_bytes(0),
   220   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   222    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
   224   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
   225   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
   227   _recent_avg_pause_time_ratio(0.0),
   228   _num_markings(0),
   229   _n_marks(0),
   230   _n_pauses_at_mark_end(0),
   232   _all_full_gc_times_ms(new NumberSeq()),
   234   // G1PausesBtwnConcMark defaults to -1
   235   // so the hack is to do the cast  QQQ FIXME
   236   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   237   _n_marks_since_last_pause(0),
   238   _initiate_conc_mark_if_possible(false),
   239   _during_initial_mark_pause(false),
   240   _should_revert_to_full_young_gcs(false),
   241   _last_full_young_gc(false),
   243   _eden_bytes_before_gc(0),
   244   _survivor_bytes_before_gc(0),
   245   _capacity_before_gc(0),
   247   _prev_collection_pause_used_at_end_bytes(0),
   249   _collection_set(NULL),
   250   _collection_set_size(0),
   251   _collection_set_bytes_used_before(0),
   253   // Incremental CSet attributes
   254   _inc_cset_build_state(Inactive),
   255   _inc_cset_head(NULL),
   256   _inc_cset_tail(NULL),
   257   _inc_cset_size(0),
   258   _inc_cset_young_index(0),
   259   _inc_cset_bytes_used_before(0),
   260   _inc_cset_max_finger(NULL),
   261   _inc_cset_recorded_young_bytes(0),
   262   _inc_cset_recorded_rs_lengths(0),
   263   _inc_cset_predicted_elapsed_time_ms(0.0),
   264   _inc_cset_predicted_bytes_to_copy(0),
   266 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   267 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   268 #endif // _MSC_VER
   270   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   271                                                  G1YoungSurvRateNumRegionsSummary)),
   272   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   273                                               G1YoungSurvRateNumRegionsSummary)),
   274   // add here any more surv rate groups
   275   _recorded_survivor_regions(0),
   276   _recorded_survivor_head(NULL),
   277   _recorded_survivor_tail(NULL),
   278   _survivors_age_table(true),
   280   _gc_overhead_perc(0.0) {
   282   // Set up the region size and associated fields. Given that the
   283   // policy is created before the heap, we have to set this up here,
   284   // so it's done as soon as possible.
   285   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   286   HeapRegionRemSet::setup_remset_size();
   288   G1ErgoVerbose::initialize();
   289   if (PrintAdaptiveSizePolicy) {
   290     // Currently, we only use a single switch for all the heuristics.
   291     G1ErgoVerbose::set_enabled(true);
   292     // Given that we don't currently have a verboseness level
   293     // parameter, we'll hardcode this to high. This can be easily
   294     // changed in the future.
   295     G1ErgoVerbose::set_level(ErgoHigh);
   296   } else {
   297     G1ErgoVerbose::set_enabled(false);
   298   }
   300   // Verify PLAB sizes
   301   const uint region_size = HeapRegion::GrainWords;
   302   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   303     char buffer[128];
   304     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
   305                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   306     vm_exit_during_initialization(buffer);
   307   }
   309   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   310   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   312   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   313   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   314   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   316   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   317   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   319   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   321   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   323   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   324   _par_last_termination_attempts = new double[_parallel_gc_threads];
   325   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   326   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   328   // start conservatively
   329   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   331   // <NEW PREDICTION>
   333   int index;
   334   if (ParallelGCThreads == 0)
   335     index = 0;
   336   else if (ParallelGCThreads > 8)
   337     index = 7;
   338   else
   339     index = ParallelGCThreads - 1;
   341   _pending_card_diff_seq->add(0.0);
   342   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   343   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   344   _fully_young_cards_per_entry_ratio_seq->add(
   345                             fully_young_cards_per_entry_ratio_defaults[index]);
   346   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   347   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   348   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   349   _young_other_cost_per_region_ms_seq->add(
   350                                young_other_cost_per_region_ms_defaults[index]);
   351   _non_young_other_cost_per_region_ms_seq->add(
   352                            non_young_other_cost_per_region_ms_defaults[index]);
   354   // </NEW PREDICTION>
   356   // Below, we might need to calculate the pause time target based on
   357   // the pause interval. When we do so we are going to give G1 maximum
   358   // flexibility and allow it to do pauses when it needs to. So, we'll
   359   // arrange that the pause interval to be pause time target + 1 to
   360   // ensure that a) the pause time target is maximized with respect to
   361   // the pause interval and b) we maintain the invariant that pause
   362   // time target < pause interval. If the user does not want this
   363   // maximum flexibility, they will have to set the pause interval
   364   // explicitly.
   366   // First make sure that, if either parameter is set, its value is
   367   // reasonable.
   368   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   369     if (MaxGCPauseMillis < 1) {
   370       vm_exit_during_initialization("MaxGCPauseMillis should be "
   371                                     "greater than 0");
   372     }
   373   }
   374   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   375     if (GCPauseIntervalMillis < 1) {
   376       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   377                                     "greater than 0");
   378     }
   379   }
   381   // Then, if the pause time target parameter was not set, set it to
   382   // the default value.
   383   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   384     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   385       // The default pause time target in G1 is 200ms
   386       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   387     } else {
   388       // We do not allow the pause interval to be set without the
   389       // pause time target
   390       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   391                                     "without setting MaxGCPauseMillis");
   392     }
   393   }
   395   // Then, if the interval parameter was not set, set it according to
   396   // the pause time target (this will also deal with the case when the
   397   // pause time target is the default value).
   398   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   399     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   400   }
   402   // Finally, make sure that the two parameters are consistent.
   403   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   404     char buffer[256];
   405     jio_snprintf(buffer, 256,
   406                  "MaxGCPauseMillis (%u) should be less than "
   407                  "GCPauseIntervalMillis (%u)",
   408                  MaxGCPauseMillis, GCPauseIntervalMillis);
   409     vm_exit_during_initialization(buffer);
   410   }
   412   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   413   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   414   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   415   _sigma = (double) G1ConfidencePercent / 100.0;
   417   // start conservatively (around 50ms is about right)
   418   _concurrent_mark_remark_times_ms->add(0.05);
   419   _concurrent_mark_cleanup_times_ms->add(0.20);
   420   _tenuring_threshold = MaxTenuringThreshold;
   421   // _max_survivor_regions will be calculated by
   422   // update_young_list_target_length() during initialization.
   423   _max_survivor_regions = 0;
   425   assert(GCTimeRatio > 0,
   426          "we should have set it to a default value set_g1_gc_flags() "
   427          "if a user set it to 0");
   428   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   430   uintx reserve_perc = G1ReservePercent;
   431   // Put an artificial ceiling on this so that it's not set to a silly value.
   432   if (reserve_perc > 50) {
   433     reserve_perc = 50;
   434     warning("G1ReservePercent is set to a value that is too large, "
   435             "it's been updated to %u", reserve_perc);
   436   }
   437   _reserve_factor = (double) reserve_perc / 100.0;
   438   // This will be set when the heap is expanded
   439   // for the first time during initialization.
   440   _reserve_regions = 0;
   442   initialize_all();
   443 }
   445 // Increment "i", mod "len"
   446 static void inc_mod(int& i, int len) {
   447   i++; if (i == len) i = 0;
   448 }
   450 void G1CollectorPolicy::initialize_flags() {
   451   set_min_alignment(HeapRegion::GrainBytes);
   452   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   453   if (SurvivorRatio < 1) {
   454     vm_exit_during_initialization("Invalid survivor ratio specified");
   455   }
   456   CollectorPolicy::initialize_flags();
   457 }
   459 // The easiest way to deal with the parsing of the NewSize /
   460 // MaxNewSize / etc. parameteres is to re-use the code in the
   461 // TwoGenerationCollectorPolicy class. This is similar to what
   462 // ParallelScavenge does with its GenerationSizer class (see
   463 // ParallelScavengeHeap::initialize()). We might change this in the
   464 // future, but it's a good start.
   465 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   466 private:
   467   size_t size_to_region_num(size_t byte_size) {
   468     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   469   }
   471 public:
   472   G1YoungGenSizer() {
   473     initialize_flags();
   474     initialize_size_info();
   475   }
   476   size_t min_young_region_num() {
   477     return size_to_region_num(_min_gen0_size);
   478   }
   479   size_t initial_young_region_num() {
   480     return size_to_region_num(_initial_gen0_size);
   481   }
   482   size_t max_young_region_num() {
   483     return size_to_region_num(_max_gen0_size);
   484   }
   485 };
   487 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
   488   assert(number_of_heap_regions > 0, "Heap must be initialized");
   489   size_t young_size = number_of_heap_regions / (NewRatio + 1);
   490   _min_desired_young_length = young_size;
   491   _max_desired_young_length = young_size;
   492 }
   494 void G1CollectorPolicy::init() {
   495   // Set aside an initial future to_space.
   496   _g1 = G1CollectedHeap::heap();
   498   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   500   initialize_gc_policy_counters();
   502   G1YoungGenSizer sizer;
   503   size_t initial_region_num = sizer.initial_young_region_num();
   504   _min_desired_young_length = sizer.min_young_region_num();
   505   _max_desired_young_length = sizer.max_young_region_num();
   507   if (FLAG_IS_CMDLINE(NewRatio)) {
   508     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   509       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   510     } else {
   511       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
   512       update_young_list_size_using_newratio(_g1->n_regions());
   513       _using_new_ratio_calculations = true;
   514     }
   515   }
   517   // GenCollectorPolicy guarantees that min <= initial <= max.
   518   // Asserting here just to state that we rely on this property.
   519   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   520   assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
   521   assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
   523   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
   524   if (adaptive_young_list_length()) {
   525     _young_list_fixed_length = 0;
   526   } else {
   527     _young_list_fixed_length = initial_region_num;
   528   }
   529   _free_regions_at_end_of_collection = _g1->free_regions();
   530   update_young_list_target_length();
   531   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   533   // We may immediately start allocating regions and placing them on the
   534   // collection set list. Initialize the per-collection set info
   535   start_incremental_cset_building();
   536 }
   538 // Create the jstat counters for the policy.
   539 void G1CollectorPolicy::initialize_gc_policy_counters() {
   540   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   541 }
   543 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   544                                          double base_time_ms,
   545                                          size_t base_free_regions,
   546                                          double target_pause_time_ms) {
   547   if (young_length >= base_free_regions) {
   548     // end condition 1: not enough space for the young regions
   549     return false;
   550   }
   552   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   553   size_t bytes_to_copy =
   554                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   555   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   556   double young_other_time_ms = predict_young_other_time_ms(young_length);
   557   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   558   if (pause_time_ms > target_pause_time_ms) {
   559     // end condition 2: prediction is over the target pause time
   560     return false;
   561   }
   563   size_t free_bytes =
   564                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   565   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   566     // end condition 3: out-of-space (conservatively!)
   567     return false;
   568   }
   570   // success!
   571   return true;
   572 }
   574 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   575   // re-calculate the necessary reserve
   576   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   577   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   578   // smaller than 1.0) we'll get 1.
   579   _reserve_regions = (size_t) ceil(reserve_regions_d);
   581   if (_using_new_ratio_calculations) {
   582     // -XX:NewRatio was specified so we need to update the
   583     // young gen length when the heap size has changed.
   584     update_young_list_size_using_newratio(new_number_of_regions);
   585   }
   586 }
   588 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   589                                                      size_t base_min_length) {
   590   size_t desired_min_length = 0;
   591   if (adaptive_young_list_length()) {
   592     if (_alloc_rate_ms_seq->num() > 3) {
   593       double now_sec = os::elapsedTime();
   594       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   595       double alloc_rate_ms = predict_alloc_rate_ms();
   596       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   597     } else {
   598       // otherwise we don't have enough info to make the prediction
   599     }
   600   }
   601   desired_min_length += base_min_length;
   602   // make sure we don't go below any user-defined minimum bound
   603   return MAX2(_min_desired_young_length, desired_min_length);
   604 }
   606 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   607   // Here, we might want to also take into account any additional
   608   // constraints (i.e., user-defined minimum bound). Currently, we
   609   // effectively don't set this bound.
   610   return _max_desired_young_length;
   611 }
   613 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   614   if (rs_lengths == (size_t) -1) {
   615     // if it's set to the default value (-1), we should predict it;
   616     // otherwise, use the given value.
   617     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   618   }
   620   // Calculate the absolute and desired min bounds.
   622   // This is how many young regions we already have (currently: the survivors).
   623   size_t base_min_length = recorded_survivor_regions();
   624   // This is the absolute minimum young length, which ensures that we
   625   // can allocate one eden region in the worst-case.
   626   size_t absolute_min_length = base_min_length + 1;
   627   size_t desired_min_length =
   628                      calculate_young_list_desired_min_length(base_min_length);
   629   if (desired_min_length < absolute_min_length) {
   630     desired_min_length = absolute_min_length;
   631   }
   633   // Calculate the absolute and desired max bounds.
   635   // We will try our best not to "eat" into the reserve.
   636   size_t absolute_max_length = 0;
   637   if (_free_regions_at_end_of_collection > _reserve_regions) {
   638     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   639   }
   640   size_t desired_max_length = calculate_young_list_desired_max_length();
   641   if (desired_max_length > absolute_max_length) {
   642     desired_max_length = absolute_max_length;
   643   }
   645   size_t young_list_target_length = 0;
   646   if (adaptive_young_list_length()) {
   647     if (full_young_gcs()) {
   648       young_list_target_length =
   649                         calculate_young_list_target_length(rs_lengths,
   650                                                            base_min_length,
   651                                                            desired_min_length,
   652                                                            desired_max_length);
   653       _rs_lengths_prediction = rs_lengths;
   654     } else {
   655       // Don't calculate anything and let the code below bound it to
   656       // the desired_min_length, i.e., do the next GC as soon as
   657       // possible to maximize how many old regions we can add to it.
   658     }
   659   } else {
   660     if (full_young_gcs()) {
   661       young_list_target_length = _young_list_fixed_length;
   662     } else {
   663       // A bit arbitrary: during partially-young GCs we allocate half
   664       // the young regions to try to add old regions to the CSet.
   665       young_list_target_length = _young_list_fixed_length / 2;
   666       // We choose to accept that we might go under the desired min
   667       // length given that we intentionally ask for a smaller young gen.
   668       desired_min_length = absolute_min_length;
   669     }
   670   }
   672   // Make sure we don't go over the desired max length, nor under the
   673   // desired min length. In case they clash, desired_min_length wins
   674   // which is why that test is second.
   675   if (young_list_target_length > desired_max_length) {
   676     young_list_target_length = desired_max_length;
   677   }
   678   if (young_list_target_length < desired_min_length) {
   679     young_list_target_length = desired_min_length;
   680   }
   682   assert(young_list_target_length > recorded_survivor_regions(),
   683          "we should be able to allocate at least one eden region");
   684   assert(young_list_target_length >= absolute_min_length, "post-condition");
   685   _young_list_target_length = young_list_target_length;
   687   update_max_gc_locker_expansion();
   688 }
   690 size_t
   691 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   692                                                    size_t base_min_length,
   693                                                    size_t desired_min_length,
   694                                                    size_t desired_max_length) {
   695   assert(adaptive_young_list_length(), "pre-condition");
   696   assert(full_young_gcs(), "only call this for fully-young GCs");
   698   // In case some edge-condition makes the desired max length too small...
   699   if (desired_max_length <= desired_min_length) {
   700     return desired_min_length;
   701   }
   703   // We'll adjust min_young_length and max_young_length not to include
   704   // the already allocated young regions (i.e., so they reflect the
   705   // min and max eden regions we'll allocate). The base_min_length
   706   // will be reflected in the predictions by the
   707   // survivor_regions_evac_time prediction.
   708   assert(desired_min_length > base_min_length, "invariant");
   709   size_t min_young_length = desired_min_length - base_min_length;
   710   assert(desired_max_length > base_min_length, "invariant");
   711   size_t max_young_length = desired_max_length - base_min_length;
   713   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   714   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   715   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   716   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   717   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   718   double base_time_ms =
   719     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   720     survivor_regions_evac_time;
   721   size_t available_free_regions = _free_regions_at_end_of_collection;
   722   size_t base_free_regions = 0;
   723   if (available_free_regions > _reserve_regions) {
   724     base_free_regions = available_free_regions - _reserve_regions;
   725   }
   727   // Here, we will make sure that the shortest young length that
   728   // makes sense fits within the target pause time.
   730   if (predict_will_fit(min_young_length, base_time_ms,
   731                        base_free_regions, target_pause_time_ms)) {
   732     // The shortest young length will fit into the target pause time;
   733     // we'll now check whether the absolute maximum number of young
   734     // regions will fit in the target pause time. If not, we'll do
   735     // a binary search between min_young_length and max_young_length.
   736     if (predict_will_fit(max_young_length, base_time_ms,
   737                          base_free_regions, target_pause_time_ms)) {
   738       // The maximum young length will fit into the target pause time.
   739       // We are done so set min young length to the maximum length (as
   740       // the result is assumed to be returned in min_young_length).
   741       min_young_length = max_young_length;
   742     } else {
   743       // The maximum possible number of young regions will not fit within
   744       // the target pause time so we'll search for the optimal
   745       // length. The loop invariants are:
   746       //
   747       // min_young_length < max_young_length
   748       // min_young_length is known to fit into the target pause time
   749       // max_young_length is known not to fit into the target pause time
   750       //
   751       // Going into the loop we know the above hold as we've just
   752       // checked them. Every time around the loop we check whether
   753       // the middle value between min_young_length and
   754       // max_young_length fits into the target pause time. If it
   755       // does, it becomes the new min. If it doesn't, it becomes
   756       // the new max. This way we maintain the loop invariants.
   758       assert(min_young_length < max_young_length, "invariant");
   759       size_t diff = (max_young_length - min_young_length) / 2;
   760       while (diff > 0) {
   761         size_t young_length = min_young_length + diff;
   762         if (predict_will_fit(young_length, base_time_ms,
   763                              base_free_regions, target_pause_time_ms)) {
   764           min_young_length = young_length;
   765         } else {
   766           max_young_length = young_length;
   767         }
   768         assert(min_young_length <  max_young_length, "invariant");
   769         diff = (max_young_length - min_young_length) / 2;
   770       }
   771       // The results is min_young_length which, according to the
   772       // loop invariants, should fit within the target pause time.
   774       // These are the post-conditions of the binary search above:
   775       assert(min_young_length < max_young_length,
   776              "otherwise we should have discovered that max_young_length "
   777              "fits into the pause target and not done the binary search");
   778       assert(predict_will_fit(min_young_length, base_time_ms,
   779                               base_free_regions, target_pause_time_ms),
   780              "min_young_length, the result of the binary search, should "
   781              "fit into the pause target");
   782       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   783                                base_free_regions, target_pause_time_ms),
   784              "min_young_length, the result of the binary search, should be "
   785              "optimal, so no larger length should fit into the pause target");
   786     }
   787   } else {
   788     // Even the minimum length doesn't fit into the pause time
   789     // target, return it as the result nevertheless.
   790   }
   791   return base_min_length + min_young_length;
   792 }
   794 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   795   double survivor_regions_evac_time = 0.0;
   796   for (HeapRegion * r = _recorded_survivor_head;
   797        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   798        r = r->get_next_young_region()) {
   799     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   800   }
   801   return survivor_regions_evac_time;
   802 }
   804 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   805   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   807   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   808   if (rs_lengths > _rs_lengths_prediction) {
   809     // add 10% to avoid having to recalculate often
   810     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   811     update_young_list_target_length(rs_lengths_prediction);
   812   }
   813 }
   817 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   818                                                bool is_tlab,
   819                                                bool* gc_overhead_limit_was_exceeded) {
   820   guarantee(false, "Not using this policy feature yet.");
   821   return NULL;
   822 }
   824 // This method controls how a collector handles one or more
   825 // of its generations being fully allocated.
   826 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   827                                                        bool is_tlab) {
   828   guarantee(false, "Not using this policy feature yet.");
   829   return NULL;
   830 }
   833 #ifndef PRODUCT
   834 bool G1CollectorPolicy::verify_young_ages() {
   835   HeapRegion* head = _g1->young_list()->first_region();
   836   return
   837     verify_young_ages(head, _short_lived_surv_rate_group);
   838   // also call verify_young_ages on any additional surv rate groups
   839 }
   841 bool
   842 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   843                                      SurvRateGroup *surv_rate_group) {
   844   guarantee( surv_rate_group != NULL, "pre-condition" );
   846   const char* name = surv_rate_group->name();
   847   bool ret = true;
   848   int prev_age = -1;
   850   for (HeapRegion* curr = head;
   851        curr != NULL;
   852        curr = curr->get_next_young_region()) {
   853     SurvRateGroup* group = curr->surv_rate_group();
   854     if (group == NULL && !curr->is_survivor()) {
   855       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   856       ret = false;
   857     }
   859     if (surv_rate_group == group) {
   860       int age = curr->age_in_surv_rate_group();
   862       if (age < 0) {
   863         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   864         ret = false;
   865       }
   867       if (age <= prev_age) {
   868         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   869                                "(%d, %d)", name, age, prev_age);
   870         ret = false;
   871       }
   872       prev_age = age;
   873     }
   874   }
   876   return ret;
   877 }
   878 #endif // PRODUCT
   880 void G1CollectorPolicy::record_full_collection_start() {
   881   _cur_collection_start_sec = os::elapsedTime();
   882   // Release the future to-space so that it is available for compaction into.
   883   _g1->set_full_collection();
   884 }
   886 void G1CollectorPolicy::record_full_collection_end() {
   887   // Consider this like a collection pause for the purposes of allocation
   888   // since last pause.
   889   double end_sec = os::elapsedTime();
   890   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   891   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   893   _all_full_gc_times_ms->add(full_gc_time_ms);
   895   update_recent_gc_times(end_sec, full_gc_time_ms);
   897   _g1->clear_full_collection();
   899   // "Nuke" the heuristics that control the fully/partially young GC
   900   // transitions and make sure we start with fully young GCs after the
   901   // Full GC.
   902   set_full_young_gcs(true);
   903   _last_full_young_gc = false;
   904   _should_revert_to_full_young_gcs = false;
   905   clear_initiate_conc_mark_if_possible();
   906   clear_during_initial_mark_pause();
   907   _known_garbage_bytes = 0;
   908   _known_garbage_ratio = 0.0;
   909   _in_marking_window = false;
   910   _in_marking_window_im = false;
   912   _short_lived_surv_rate_group->start_adding_regions();
   913   // also call this on any additional surv rate groups
   915   record_survivor_regions(0, NULL, NULL);
   917   _prev_region_num_young   = _region_num_young;
   918   _prev_region_num_tenured = _region_num_tenured;
   920   _free_regions_at_end_of_collection = _g1->free_regions();
   921   // Reset survivors SurvRateGroup.
   922   _survivor_surv_rate_group->reset();
   923   update_young_list_target_length();
   924 }
   926 void G1CollectorPolicy::record_stop_world_start() {
   927   _stop_world_start = os::elapsedTime();
   928 }
   930 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   931                                                       size_t start_used) {
   932   if (PrintGCDetails) {
   933     gclog_or_tty->stamp(PrintGCTimeStamps);
   934     gclog_or_tty->print("[GC pause");
   935     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   936   }
   938   // We only need to do this here as the policy will only be applied
   939   // to the GC we're about to start. so, no point is calculating this
   940   // every time we calculate / recalculate the target young length.
   941   update_survivors_policy();
   943   assert(_g1->used() == _g1->recalculate_used(),
   944          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   945                  _g1->used(), _g1->recalculate_used()));
   947   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   948   _all_stop_world_times_ms->add(s_w_t_ms);
   949   _stop_world_start = 0.0;
   951   _cur_collection_start_sec = start_time_sec;
   952   _cur_collection_pause_used_at_start_bytes = start_used;
   953   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   954   _pending_cards = _g1->pending_card_num();
   955   _max_pending_cards = _g1->max_pending_card_num();
   957   _bytes_in_collection_set_before_gc = 0;
   958   _bytes_copied_during_gc = 0;
   960   YoungList* young_list = _g1->young_list();
   961   _eden_bytes_before_gc = young_list->eden_used_bytes();
   962   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   963   _capacity_before_gc = _g1->capacity();
   965 #ifdef DEBUG
   966   // initialise these to something well known so that we can spot
   967   // if they are not set properly
   969   for (int i = 0; i < _parallel_gc_threads; ++i) {
   970     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   971     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   972     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   973     _par_last_update_rs_times_ms[i] = -1234.0;
   974     _par_last_update_rs_processed_buffers[i] = -1234.0;
   975     _par_last_scan_rs_times_ms[i] = -1234.0;
   976     _par_last_obj_copy_times_ms[i] = -1234.0;
   977     _par_last_termination_times_ms[i] = -1234.0;
   978     _par_last_termination_attempts[i] = -1234.0;
   979     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   980     _par_last_gc_worker_times_ms[i] = -1234.0;
   981   }
   982 #endif
   984   for (int i = 0; i < _aux_num; ++i) {
   985     _cur_aux_times_ms[i] = 0.0;
   986     _cur_aux_times_set[i] = false;
   987   }
   989   _satb_drain_time_set = false;
   990   _last_satb_drain_processed_buffers = -1;
   992   _last_young_gc_full = false;
   994   // do that for any other surv rate groups
   995   _short_lived_surv_rate_group->stop_adding_regions();
   996   _survivors_age_table.clear();
   998   assert( verify_young_ages(), "region age verification" );
   999 }
  1001 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
  1002   _mark_closure_time_ms = mark_closure_time_ms;
  1005 void G1CollectorPolicy::record_concurrent_mark_init_end(double
  1006                                                    mark_init_elapsed_time_ms) {
  1007   _during_marking = true;
  1008   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
  1009   clear_during_initial_mark_pause();
  1010   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
  1013 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
  1014   _mark_remark_start_sec = os::elapsedTime();
  1015   _during_marking = false;
  1018 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
  1019   double end_time_sec = os::elapsedTime();
  1020   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
  1021   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
  1022   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1023   _prev_collection_pause_end_ms += elapsed_time_ms;
  1025   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
  1028 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
  1029   _mark_cleanup_start_sec = os::elapsedTime();
  1032 void
  1033 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
  1034                                                       size_t max_live_bytes) {
  1035   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  1036   record_concurrent_mark_cleanup_end_work2();
  1039 void
  1040 G1CollectorPolicy::
  1041 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
  1042                                          size_t max_live_bytes) {
  1043   if (_n_marks < 2) {
  1044     _n_marks++;
  1048 // The important thing about this is that it includes "os::elapsedTime".
  1049 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
  1050   double end_time_sec = os::elapsedTime();
  1051   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
  1052   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1053   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1054   _prev_collection_pause_end_ms += elapsed_time_ms;
  1056   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
  1058   _num_markings++;
  1059   _n_pauses_at_mark_end = _n_pauses;
  1060   _n_marks_since_last_pause++;
  1063 void
  1064 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
  1065   _should_revert_to_full_young_gcs = false;
  1066   _last_full_young_gc = true;
  1067   _in_marking_window = false;
  1070 void G1CollectorPolicy::record_concurrent_pause() {
  1071   if (_stop_world_start > 0.0) {
  1072     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
  1073     _all_yield_times_ms->add(yield_ms);
  1077 void G1CollectorPolicy::record_concurrent_pause_end() {
  1080 template<class T>
  1081 T sum_of(T* sum_arr, int start, int n, int N) {
  1082   T sum = (T)0;
  1083   for (int i = 0; i < n; i++) {
  1084     int j = (start + i) % N;
  1085     sum += sum_arr[j];
  1087   return sum;
  1090 void G1CollectorPolicy::print_par_stats(int level,
  1091                                         const char* str,
  1092                                         double* data) {
  1093   double min = data[0], max = data[0];
  1094   double total = 0.0;
  1095   LineBuffer buf(level);
  1096   buf.append("[%s (ms):", str);
  1097   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1098     double val = data[i];
  1099     if (val < min)
  1100       min = val;
  1101     if (val > max)
  1102       max = val;
  1103     total += val;
  1104     buf.append("  %3.1lf", val);
  1106   buf.append_and_print_cr("");
  1107   double avg = total / (double) ParallelGCThreads;
  1108   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1109     avg, min, max, max - min);
  1112 void G1CollectorPolicy::print_par_sizes(int level,
  1113                                         const char* str,
  1114                                         double* data) {
  1115   double min = data[0], max = data[0];
  1116   double total = 0.0;
  1117   LineBuffer buf(level);
  1118   buf.append("[%s :", str);
  1119   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1120     double val = data[i];
  1121     if (val < min)
  1122       min = val;
  1123     if (val > max)
  1124       max = val;
  1125     total += val;
  1126     buf.append(" %d", (int) val);
  1128   buf.append_and_print_cr("");
  1129   double avg = total / (double) ParallelGCThreads;
  1130   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1131     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1134 void G1CollectorPolicy::print_stats (int level,
  1135                                      const char* str,
  1136                                      double value) {
  1137   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1140 void G1CollectorPolicy::print_stats (int level,
  1141                                      const char* str,
  1142                                      int value) {
  1143   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1146 double G1CollectorPolicy::avg_value (double* data) {
  1147   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1148     double ret = 0.0;
  1149     for (uint i = 0; i < ParallelGCThreads; ++i)
  1150       ret += data[i];
  1151     return ret / (double) ParallelGCThreads;
  1152   } else {
  1153     return data[0];
  1157 double G1CollectorPolicy::max_value (double* data) {
  1158   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1159     double ret = data[0];
  1160     for (uint i = 1; i < ParallelGCThreads; ++i)
  1161       if (data[i] > ret)
  1162         ret = data[i];
  1163     return ret;
  1164   } else {
  1165     return data[0];
  1169 double G1CollectorPolicy::sum_of_values (double* data) {
  1170   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1171     double sum = 0.0;
  1172     for (uint i = 0; i < ParallelGCThreads; i++)
  1173       sum += data[i];
  1174     return sum;
  1175   } else {
  1176     return data[0];
  1180 double G1CollectorPolicy::max_sum (double* data1,
  1181                                    double* data2) {
  1182   double ret = data1[0] + data2[0];
  1184   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1185     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1186       double data = data1[i] + data2[i];
  1187       if (data > ret)
  1188         ret = data;
  1191   return ret;
  1194 // Anything below that is considered to be zero
  1195 #define MIN_TIMER_GRANULARITY 0.0000001
  1197 void G1CollectorPolicy::record_collection_pause_end() {
  1198   double end_time_sec = os::elapsedTime();
  1199   double elapsed_ms = _last_pause_time_ms;
  1200   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1201   size_t rs_size =
  1202     _cur_collection_pause_used_regions_at_start - collection_set_size();
  1203   size_t cur_used_bytes = _g1->used();
  1204   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1205   bool last_pause_included_initial_mark = false;
  1206   bool update_stats = !_g1->evacuation_failed();
  1208 #ifndef PRODUCT
  1209   if (G1YoungSurvRateVerbose) {
  1210     gclog_or_tty->print_cr("");
  1211     _short_lived_surv_rate_group->print();
  1212     // do that for any other surv rate groups too
  1214 #endif // PRODUCT
  1216   last_pause_included_initial_mark = during_initial_mark_pause();
  1217   if (last_pause_included_initial_mark)
  1218     record_concurrent_mark_init_end(0.0);
  1220   size_t marking_initiating_used_threshold =
  1221     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1223   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
  1224     assert(!last_pause_included_initial_mark, "invariant");
  1225     if (cur_used_bytes > marking_initiating_used_threshold) {
  1226       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1227         assert(!during_initial_mark_pause(), "we should not see this here");
  1229         ergo_verbose3(ErgoConcCycles,
  1230                       "request concurrent cycle initiation",
  1231                       ergo_format_reason("occupancy higher than threshold")
  1232                       ergo_format_byte("occupancy")
  1233                       ergo_format_byte_perc("threshold"),
  1234                       cur_used_bytes,
  1235                       marking_initiating_used_threshold,
  1236                       (double) InitiatingHeapOccupancyPercent);
  1238         // Note: this might have already been set, if during the last
  1239         // pause we decided to start a cycle but at the beginning of
  1240         // this pause we decided to postpone it. That's OK.
  1241         set_initiate_conc_mark_if_possible();
  1242       } else {
  1243         ergo_verbose2(ErgoConcCycles,
  1244                   "do not request concurrent cycle initiation",
  1245                   ergo_format_reason("occupancy lower than previous occupancy")
  1246                   ergo_format_byte("occupancy")
  1247                   ergo_format_byte("previous occupancy"),
  1248                   cur_used_bytes,
  1249                   _prev_collection_pause_used_at_end_bytes);
  1254   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1256   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1257                           end_time_sec, false);
  1259   guarantee(_cur_collection_pause_used_regions_at_start >=
  1260             collection_set_size(),
  1261             "Negative RS size?");
  1263   // This assert is exempted when we're doing parallel collection pauses,
  1264   // because the fragmentation caused by the parallel GC allocation buffers
  1265   // can lead to more memory being used during collection than was used
  1266   // before. Best leave this out until the fragmentation problem is fixed.
  1267   // Pauses in which evacuation failed can also lead to negative
  1268   // collections, since no space is reclaimed from a region containing an
  1269   // object whose evacuation failed.
  1270   // Further, we're now always doing parallel collection.  But I'm still
  1271   // leaving this here as a placeholder for a more precise assertion later.
  1272   // (DLD, 10/05.)
  1273   assert((true || parallel) // Always using GC LABs now.
  1274          || _g1->evacuation_failed()
  1275          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1276          "Negative collection");
  1278   size_t freed_bytes =
  1279     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1280   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1282   double survival_fraction =
  1283     (double)surviving_bytes/
  1284     (double)_collection_set_bytes_used_before;
  1286   _n_pauses++;
  1288   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1289   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1290   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1291   double update_rs_processed_buffers =
  1292     sum_of_values(_par_last_update_rs_processed_buffers);
  1293   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1294   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1295   double termination_time = avg_value(_par_last_termination_times_ms);
  1297   double parallel_known_time = update_rs_time +
  1298                                ext_root_scan_time +
  1299                                mark_stack_scan_time +
  1300                                scan_rs_time +
  1301                                obj_copy_time +
  1302                                termination_time;
  1304   double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1306   PauseSummary* summary = _summary;
  1308   if (update_stats) {
  1309     _recent_rs_scan_times_ms->add(scan_rs_time);
  1310     _recent_pause_times_ms->add(elapsed_ms);
  1311     _recent_rs_sizes->add(rs_size);
  1313     MainBodySummary* body_summary = summary->main_body_summary();
  1314     guarantee(body_summary != NULL, "should not be null!");
  1316     if (_satb_drain_time_set)
  1317       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1318     else
  1319       body_summary->record_satb_drain_time_ms(0.0);
  1321     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1322     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1323     body_summary->record_update_rs_time_ms(update_rs_time);
  1324     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1325     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1326     if (parallel) {
  1327       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1328       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1329       body_summary->record_termination_time_ms(termination_time);
  1330       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1332     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1334     // We exempt parallel collection from this check because Alloc Buffer
  1335     // fragmentation can produce negative collections.  Same with evac
  1336     // failure.
  1337     // Further, we're now always doing parallel collection.  But I'm still
  1338     // leaving this here as a placeholder for a more precise assertion later.
  1339     // (DLD, 10/05.
  1340     assert((true || parallel)
  1341            || _g1->evacuation_failed()
  1342            || surviving_bytes <= _collection_set_bytes_used_before,
  1343            "Or else negative collection!");
  1344     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
  1345     _recent_CS_bytes_surviving->add(surviving_bytes);
  1347     // this is where we update the allocation rate of the application
  1348     double app_time_ms =
  1349       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1350     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1351       // This usually happens due to the timer not having the required
  1352       // granularity. Some Linuxes are the usual culprits.
  1353       // We'll just set it to something (arbitrarily) small.
  1354       app_time_ms = 1.0;
  1356     size_t regions_allocated =
  1357       (_region_num_young - _prev_region_num_young) +
  1358       (_region_num_tenured - _prev_region_num_tenured);
  1359     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1360     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1361     _prev_region_num_young   = _region_num_young;
  1362     _prev_region_num_tenured = _region_num_tenured;
  1364     double interval_ms =
  1365       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1366     update_recent_gc_times(end_time_sec, elapsed_ms);
  1367     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1368     if (recent_avg_pause_time_ratio() < 0.0 ||
  1369         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1370 #ifndef PRODUCT
  1371       // Dump info to allow post-facto debugging
  1372       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1373       gclog_or_tty->print_cr("-------------------------------------------");
  1374       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1375       _recent_gc_times_ms->dump();
  1376       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1377       _recent_prev_end_times_for_all_gcs_sec->dump();
  1378       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1379                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1380       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1381       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1382 #endif  // !PRODUCT
  1383       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1384       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1385       if (_recent_avg_pause_time_ratio < 0.0) {
  1386         _recent_avg_pause_time_ratio = 0.0;
  1387       } else {
  1388         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1389         _recent_avg_pause_time_ratio = 1.0;
  1394   if (G1PolicyVerbose > 1) {
  1395     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
  1398   if (G1PolicyVerbose > 1) {
  1399     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
  1400                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
  1401                            "      |RS|: " SIZE_FORMAT,
  1402                            elapsed_ms, recent_avg_time_for_pauses_ms(),
  1403                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
  1404                            rs_size);
  1406     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
  1407                            "       At end " SIZE_FORMAT "K\n"
  1408                            "       garbage      : " SIZE_FORMAT "K"
  1409                            "       of     " SIZE_FORMAT "K\n"
  1410                            "       survival     : %6.2f%%  (%6.2f%% avg)",
  1411                            _cur_collection_pause_used_at_start_bytes/K,
  1412                            _g1->used()/K, freed_bytes/K,
  1413                            _collection_set_bytes_used_before/K,
  1414                            survival_fraction*100.0,
  1415                            recent_avg_survival_fraction()*100.0);
  1416     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
  1417                            recent_avg_pause_time_ratio() * 100.0);
  1420   double other_time_ms = elapsed_ms;
  1422   if (_satb_drain_time_set) {
  1423     other_time_ms -= _cur_satb_drain_time_ms;
  1426   if (parallel) {
  1427     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
  1428   } else {
  1429     other_time_ms -=
  1430       update_rs_time +
  1431       ext_root_scan_time + mark_stack_scan_time +
  1432       scan_rs_time + obj_copy_time;
  1435   if (PrintGCDetails) {
  1436     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1437                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1438                            elapsed_ms / 1000.0);
  1440     if (_satb_drain_time_set) {
  1441       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1443     if (_last_satb_drain_processed_buffers >= 0) {
  1444       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
  1446     if (parallel) {
  1447       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1448       print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
  1449       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1450       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1451       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1452       print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
  1453       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1454       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1455       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1456       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1457       print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
  1459       for (int i = 0; i < _parallel_gc_threads; i++) {
  1460         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1462       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
  1464       print_stats(2, "Parallel Other", parallel_other_time);
  1465       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1466     } else {
  1467       print_stats(1, "Update RS", update_rs_time);
  1468       print_stats(2, "Processed Buffers",
  1469                   (int)update_rs_processed_buffers);
  1470       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1471       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1472       print_stats(1, "Scan RS", scan_rs_time);
  1473       print_stats(1, "Object Copying", obj_copy_time);
  1475 #ifndef PRODUCT
  1476     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1477     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1478     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1479     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1480     if (_num_cc_clears > 0) {
  1481       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1483 #endif
  1484     print_stats(1, "Other", other_time_ms);
  1485     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
  1486     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1487     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1489     for (int i = 0; i < _aux_num; ++i) {
  1490       if (_cur_aux_times_set[i]) {
  1491         char buffer[96];
  1492         sprintf(buffer, "Aux%d", i);
  1493         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1498   _all_pause_times_ms->add(elapsed_ms);
  1499   if (update_stats) {
  1500     summary->record_total_time_ms(elapsed_ms);
  1501     summary->record_other_time_ms(other_time_ms);
  1503   for (int i = 0; i < _aux_num; ++i)
  1504     if (_cur_aux_times_set[i])
  1505       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1507   // Reset marks-between-pauses counter.
  1508   _n_marks_since_last_pause = 0;
  1510   // Update the efficiency-since-mark vars.
  1511   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1512   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1513     // This usually happens due to the timer not having the required
  1514     // granularity. Some Linuxes are the usual culprits.
  1515     // We'll just set it to something (arbitrarily) small.
  1516     proc_ms = 1.0;
  1518   double cur_efficiency = (double) freed_bytes / proc_ms;
  1520   bool new_in_marking_window = _in_marking_window;
  1521   bool new_in_marking_window_im = false;
  1522   if (during_initial_mark_pause()) {
  1523     new_in_marking_window = true;
  1524     new_in_marking_window_im = true;
  1527   if (_last_full_young_gc) {
  1528     if (!last_pause_included_initial_mark) {
  1529       ergo_verbose2(ErgoPartiallyYoungGCs,
  1530                     "start partially-young GCs",
  1531                     ergo_format_byte_perc("known garbage"),
  1532                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1533       set_full_young_gcs(false);
  1534     } else {
  1535       ergo_verbose0(ErgoPartiallyYoungGCs,
  1536                     "do not start partially-young GCs",
  1537                     ergo_format_reason("concurrent cycle is about to start"));
  1539     _last_full_young_gc = false;
  1542   if ( !_last_young_gc_full ) {
  1543     if (_should_revert_to_full_young_gcs) {
  1544       ergo_verbose2(ErgoPartiallyYoungGCs,
  1545                     "end partially-young GCs",
  1546                     ergo_format_reason("partially-young GCs end requested")
  1547                     ergo_format_byte_perc("known garbage"),
  1548                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1549       set_full_young_gcs(true);
  1550     } else if (_known_garbage_ratio < 0.05) {
  1551       ergo_verbose3(ErgoPartiallyYoungGCs,
  1552                "end partially-young GCs",
  1553                ergo_format_reason("known garbage percent lower than threshold")
  1554                ergo_format_byte_perc("known garbage")
  1555                ergo_format_perc("threshold"),
  1556                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1557                0.05 * 100.0);
  1558       set_full_young_gcs(true);
  1559     } else if (adaptive_young_list_length() &&
  1560               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1561       ergo_verbose5(ErgoPartiallyYoungGCs,
  1562                     "end partially-young GCs",
  1563                     ergo_format_reason("current GC efficiency lower than "
  1564                                        "predicted fully-young GC efficiency")
  1565                     ergo_format_double("GC efficiency factor")
  1566                     ergo_format_double("current GC efficiency")
  1567                     ergo_format_double("predicted fully-young GC efficiency")
  1568                     ergo_format_byte_perc("known garbage"),
  1569                     get_gc_eff_factor(), cur_efficiency,
  1570                     predict_young_gc_eff(),
  1571                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1572       set_full_young_gcs(true);
  1575   _should_revert_to_full_young_gcs = false;
  1577   if (_last_young_gc_full && !_during_marking) {
  1578     _young_gc_eff_seq->add(cur_efficiency);
  1581   _short_lived_surv_rate_group->start_adding_regions();
  1582   // do that for any other surv rate groupsx
  1584   // <NEW PREDICTION>
  1586   if (update_stats) {
  1587     double pause_time_ms = elapsed_ms;
  1589     size_t diff = 0;
  1590     if (_max_pending_cards >= _pending_cards)
  1591       diff = _max_pending_cards - _pending_cards;
  1592     _pending_card_diff_seq->add((double) diff);
  1594     double cost_per_card_ms = 0.0;
  1595     if (_pending_cards > 0) {
  1596       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1597       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1600     size_t cards_scanned = _g1->cards_scanned();
  1602     double cost_per_entry_ms = 0.0;
  1603     if (cards_scanned > 10) {
  1604       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1605       if (_last_young_gc_full)
  1606         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1607       else
  1608         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1611     if (_max_rs_lengths > 0) {
  1612       double cards_per_entry_ratio =
  1613         (double) cards_scanned / (double) _max_rs_lengths;
  1614       if (_last_young_gc_full)
  1615         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1616       else
  1617         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1620     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1621     if (rs_length_diff >= 0)
  1622       _rs_length_diff_seq->add((double) rs_length_diff);
  1624     size_t copied_bytes = surviving_bytes;
  1625     double cost_per_byte_ms = 0.0;
  1626     if (copied_bytes > 0) {
  1627       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1628       if (_in_marking_window)
  1629         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1630       else
  1631         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1634     double all_other_time_ms = pause_time_ms -
  1635       (update_rs_time + scan_rs_time + obj_copy_time +
  1636        _mark_closure_time_ms + termination_time);
  1638     double young_other_time_ms = 0.0;
  1639     if (_recorded_young_regions > 0) {
  1640       young_other_time_ms =
  1641         _recorded_young_cset_choice_time_ms +
  1642         _recorded_young_free_cset_time_ms;
  1643       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1644                                              (double) _recorded_young_regions);
  1646     double non_young_other_time_ms = 0.0;
  1647     if (_recorded_non_young_regions > 0) {
  1648       non_young_other_time_ms =
  1649         _recorded_non_young_cset_choice_time_ms +
  1650         _recorded_non_young_free_cset_time_ms;
  1652       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1653                                          (double) _recorded_non_young_regions);
  1656     double constant_other_time_ms = all_other_time_ms -
  1657       (young_other_time_ms + non_young_other_time_ms);
  1658     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1660     double survival_ratio = 0.0;
  1661     if (_bytes_in_collection_set_before_gc > 0) {
  1662       survival_ratio = (double) _bytes_copied_during_gc /
  1663                                    (double) _bytes_in_collection_set_before_gc;
  1666     _pending_cards_seq->add((double) _pending_cards);
  1667     _scanned_cards_seq->add((double) cards_scanned);
  1668     _rs_lengths_seq->add((double) _max_rs_lengths);
  1670     double expensive_region_limit_ms =
  1671       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1672     if (expensive_region_limit_ms < 0.0) {
  1673       // this means that the other time was predicted to be longer than
  1674       // than the max pause time
  1675       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1677     _expensive_region_limit_ms = expensive_region_limit_ms;
  1679     if (PREDICTIONS_VERBOSE) {
  1680       gclog_or_tty->print_cr("");
  1681       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
  1682                     "REGIONS %d %d %d "
  1683                     "PENDING_CARDS %d %d "
  1684                     "CARDS_SCANNED %d %d "
  1685                     "RS_LENGTHS %d %d "
  1686                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
  1687                     "SURVIVAL_RATIO %1.6lf %1.6lf "
  1688                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  1689                     "OTHER_YOUNG %1.6lf %1.6lf "
  1690                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
  1691                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
  1692                     "ELAPSED %1.6lf %1.6lf ",
  1693                     _cur_collection_start_sec,
  1694                     (!_last_young_gc_full) ? 2 :
  1695                     (last_pause_included_initial_mark) ? 1 : 0,
  1696                     _recorded_region_num,
  1697                     _recorded_young_regions,
  1698                     _recorded_non_young_regions,
  1699                     _predicted_pending_cards, _pending_cards,
  1700                     _predicted_cards_scanned, cards_scanned,
  1701                     _predicted_rs_lengths, _max_rs_lengths,
  1702                     _predicted_rs_update_time_ms, update_rs_time,
  1703                     _predicted_rs_scan_time_ms, scan_rs_time,
  1704                     _predicted_survival_ratio, survival_ratio,
  1705                     _predicted_object_copy_time_ms, obj_copy_time,
  1706                     _predicted_constant_other_time_ms, constant_other_time_ms,
  1707                     _predicted_young_other_time_ms, young_other_time_ms,
  1708                     _predicted_non_young_other_time_ms,
  1709                     non_young_other_time_ms,
  1710                     _vtime_diff_ms, termination_time,
  1711                     _predicted_pause_time_ms, elapsed_ms);
  1714     if (G1PolicyVerbose > 0) {
  1715       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
  1716                     _predicted_pause_time_ms,
  1717                     (_within_target) ? "within" : "outside",
  1718                     elapsed_ms);
  1723   _in_marking_window = new_in_marking_window;
  1724   _in_marking_window_im = new_in_marking_window_im;
  1725   _free_regions_at_end_of_collection = _g1->free_regions();
  1726   update_young_list_target_length();
  1728   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1729   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1730   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1731   // </NEW PREDICTION>
  1734 #define EXT_SIZE_FORMAT "%d%s"
  1735 #define EXT_SIZE_PARAMS(bytes)                                  \
  1736   byte_size_in_proper_unit((bytes)),                            \
  1737   proper_unit_for_byte_size((bytes))
  1739 void G1CollectorPolicy::print_heap_transition() {
  1740   if (PrintGCDetails) {
  1741     YoungList* young_list = _g1->young_list();
  1742     size_t eden_bytes = young_list->eden_used_bytes();
  1743     size_t survivor_bytes = young_list->survivor_used_bytes();
  1744     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1745     size_t used = _g1->used();
  1746     size_t capacity = _g1->capacity();
  1747     size_t eden_capacity =
  1748       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1750     gclog_or_tty->print_cr(
  1751       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1752       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1753       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1754       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1755       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1756       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1757       EXT_SIZE_PARAMS(eden_bytes),
  1758       EXT_SIZE_PARAMS(eden_capacity),
  1759       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1760       EXT_SIZE_PARAMS(survivor_bytes),
  1761       EXT_SIZE_PARAMS(used_before_gc),
  1762       EXT_SIZE_PARAMS(_capacity_before_gc),
  1763       EXT_SIZE_PARAMS(used),
  1764       EXT_SIZE_PARAMS(capacity));
  1766     _prev_eden_capacity = eden_capacity;
  1767   } else if (PrintGC) {
  1768     _g1->print_size_transition(gclog_or_tty,
  1769                                _cur_collection_pause_used_at_start_bytes,
  1770                                _g1->used(), _g1->capacity());
  1774 // <NEW PREDICTION>
  1776 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1777                                                      double update_rs_processed_buffers,
  1778                                                      double goal_ms) {
  1779   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1780   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1782   if (G1UseAdaptiveConcRefinement) {
  1783     const int k_gy = 3, k_gr = 6;
  1784     const double inc_k = 1.1, dec_k = 0.9;
  1786     int g = cg1r->green_zone();
  1787     if (update_rs_time > goal_ms) {
  1788       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1789     } else {
  1790       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1791         g = (int)MAX2(g * inc_k, g + 1.0);
  1794     // Change the refinement threads params
  1795     cg1r->set_green_zone(g);
  1796     cg1r->set_yellow_zone(g * k_gy);
  1797     cg1r->set_red_zone(g * k_gr);
  1798     cg1r->reinitialize_threads();
  1800     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1801     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1802                                     cg1r->yellow_zone());
  1803     // Change the barrier params
  1804     dcqs.set_process_completed_threshold(processing_threshold);
  1805     dcqs.set_max_completed_queue(cg1r->red_zone());
  1808   int curr_queue_size = dcqs.completed_buffers_num();
  1809   if (curr_queue_size >= cg1r->yellow_zone()) {
  1810     dcqs.set_completed_queue_padding(curr_queue_size);
  1811   } else {
  1812     dcqs.set_completed_queue_padding(0);
  1814   dcqs.notify_if_necessary();
  1817 double
  1818 G1CollectorPolicy::
  1819 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1820   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1822   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1823   size_t young_num = g1h->young_list()->length();
  1824   if (young_num == 0)
  1825     return 0.0;
  1827   young_num += adjustment;
  1828   size_t pending_cards = predict_pending_cards();
  1829   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1830                       predict_rs_length_diff();
  1831   size_t card_num;
  1832   if (full_young_gcs())
  1833     card_num = predict_young_card_num(rs_lengths);
  1834   else
  1835     card_num = predict_non_young_card_num(rs_lengths);
  1836   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1837   double accum_yg_surv_rate =
  1838     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1840   size_t bytes_to_copy =
  1841     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1843   return
  1844     predict_rs_update_time_ms(pending_cards) +
  1845     predict_rs_scan_time_ms(card_num) +
  1846     predict_object_copy_time_ms(bytes_to_copy) +
  1847     predict_young_other_time_ms(young_num) +
  1848     predict_constant_other_time_ms();
  1851 double
  1852 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1853   size_t rs_length = predict_rs_length_diff();
  1854   size_t card_num;
  1855   if (full_young_gcs())
  1856     card_num = predict_young_card_num(rs_length);
  1857   else
  1858     card_num = predict_non_young_card_num(rs_length);
  1859   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1862 double
  1863 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1864                                                 size_t scanned_cards) {
  1865   return
  1866     predict_rs_update_time_ms(pending_cards) +
  1867     predict_rs_scan_time_ms(scanned_cards) +
  1868     predict_constant_other_time_ms();
  1871 double
  1872 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1873                                                   bool young) {
  1874   size_t rs_length = hr->rem_set()->occupied();
  1875   size_t card_num;
  1876   if (full_young_gcs())
  1877     card_num = predict_young_card_num(rs_length);
  1878   else
  1879     card_num = predict_non_young_card_num(rs_length);
  1880   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1882   double region_elapsed_time_ms =
  1883     predict_rs_scan_time_ms(card_num) +
  1884     predict_object_copy_time_ms(bytes_to_copy);
  1886   if (young)
  1887     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1888   else
  1889     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1891   return region_elapsed_time_ms;
  1894 size_t
  1895 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1896   size_t bytes_to_copy;
  1897   if (hr->is_marked())
  1898     bytes_to_copy = hr->max_live_bytes();
  1899   else {
  1900     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1901                "invariant" );
  1902     int age = hr->age_in_surv_rate_group();
  1903     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1904     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1907   return bytes_to_copy;
  1910 void
  1911 G1CollectorPolicy::start_recording_regions() {
  1912   _recorded_rs_lengths            = 0;
  1913   _recorded_young_regions         = 0;
  1914   _recorded_non_young_regions     = 0;
  1916 #if PREDICTIONS_VERBOSE
  1917   _recorded_marked_bytes          = 0;
  1918   _recorded_young_bytes           = 0;
  1919   _predicted_bytes_to_copy        = 0;
  1920   _predicted_rs_lengths           = 0;
  1921   _predicted_cards_scanned        = 0;
  1922 #endif // PREDICTIONS_VERBOSE
  1925 void
  1926 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  1927 #if PREDICTIONS_VERBOSE
  1928   if (!young) {
  1929     _recorded_marked_bytes += hr->max_live_bytes();
  1931   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  1932 #endif // PREDICTIONS_VERBOSE
  1934   size_t rs_length = hr->rem_set()->occupied();
  1935   _recorded_rs_lengths += rs_length;
  1938 void
  1939 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  1940   assert(!hr->is_young(), "should not call this");
  1941   ++_recorded_non_young_regions;
  1942   record_cset_region_info(hr, false);
  1945 void
  1946 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  1947   _recorded_young_regions = n_regions;
  1950 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  1951 #if PREDICTIONS_VERBOSE
  1952   _recorded_young_bytes = bytes;
  1953 #endif // PREDICTIONS_VERBOSE
  1956 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1957   _recorded_rs_lengths = rs_lengths;
  1960 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  1961   _predicted_bytes_to_copy = bytes;
  1964 void
  1965 G1CollectorPolicy::end_recording_regions() {
  1966   // The _predicted_pause_time_ms field is referenced in code
  1967   // not under PREDICTIONS_VERBOSE. Let's initialize it.
  1968   _predicted_pause_time_ms = -1.0;
  1970 #if PREDICTIONS_VERBOSE
  1971   _predicted_pending_cards = predict_pending_cards();
  1972   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  1973   if (full_young_gcs())
  1974     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
  1975   else
  1976     _predicted_cards_scanned +=
  1977       predict_non_young_card_num(_predicted_rs_lengths);
  1978   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  1980   _predicted_rs_update_time_ms =
  1981     predict_rs_update_time_ms(_g1->pending_card_num());
  1982   _predicted_rs_scan_time_ms =
  1983     predict_rs_scan_time_ms(_predicted_cards_scanned);
  1984   _predicted_object_copy_time_ms =
  1985     predict_object_copy_time_ms(_predicted_bytes_to_copy);
  1986   _predicted_constant_other_time_ms =
  1987     predict_constant_other_time_ms();
  1988   _predicted_young_other_time_ms =
  1989     predict_young_other_time_ms(_recorded_young_regions);
  1990   _predicted_non_young_other_time_ms =
  1991     predict_non_young_other_time_ms(_recorded_non_young_regions);
  1993   _predicted_pause_time_ms =
  1994     _predicted_rs_update_time_ms +
  1995     _predicted_rs_scan_time_ms +
  1996     _predicted_object_copy_time_ms +
  1997     _predicted_constant_other_time_ms +
  1998     _predicted_young_other_time_ms +
  1999     _predicted_non_young_other_time_ms;
  2000 #endif // PREDICTIONS_VERBOSE
  2003 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  2004                                                            predicted_time_ms) {
  2005   // I don't think we need to do this when in young GC mode since
  2006   // marking will be initiated next time we hit the soft limit anyway...
  2007   if (predicted_time_ms > _expensive_region_limit_ms) {
  2008     ergo_verbose2(ErgoPartiallyYoungGCs,
  2009               "request partially-young GCs end",
  2010               ergo_format_reason("predicted region time higher than threshold")
  2011               ergo_format_ms("predicted region time")
  2012               ergo_format_ms("threshold"),
  2013               predicted_time_ms, _expensive_region_limit_ms);
  2014     // no point in doing another partial one
  2015     _should_revert_to_full_young_gcs = true;
  2019 // </NEW PREDICTION>
  2022 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  2023                                                double elapsed_ms) {
  2024   _recent_gc_times_ms->add(elapsed_ms);
  2025   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  2026   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  2029 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
  2030   if (_recent_pause_times_ms->num() == 0) {
  2031     return (double) MaxGCPauseMillis;
  2033   return _recent_pause_times_ms->avg();
  2036 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
  2037   if (_recent_rs_scan_times_ms->num() == 0) {
  2038     return (double)MaxGCPauseMillis/3.0;
  2040   return _recent_rs_scan_times_ms->avg();
  2043 int G1CollectorPolicy::number_of_recent_gcs() {
  2044   assert(_recent_rs_scan_times_ms->num() ==
  2045          _recent_pause_times_ms->num(), "Sequence out of sync");
  2046   assert(_recent_pause_times_ms->num() ==
  2047          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
  2048   assert(_recent_CS_bytes_used_before->num() ==
  2049          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
  2051   return _recent_pause_times_ms->num();
  2054 double G1CollectorPolicy::recent_avg_survival_fraction() {
  2055   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
  2056                                            _recent_CS_bytes_used_before);
  2059 double G1CollectorPolicy::last_survival_fraction() {
  2060   return last_survival_fraction_work(_recent_CS_bytes_surviving,
  2061                                      _recent_CS_bytes_used_before);
  2064 double
  2065 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
  2066                                                      TruncatedSeq* before) {
  2067   assert(surviving->num() == before->num(), "Sequence out of sync");
  2068   if (before->sum() > 0.0) {
  2069       double recent_survival_rate = surviving->sum() / before->sum();
  2070       // We exempt parallel collection from this check because Alloc Buffer
  2071       // fragmentation can produce negative collections.
  2072       // Further, we're now always doing parallel collection.  But I'm still
  2073       // leaving this here as a placeholder for a more precise assertion later.
  2074       // (DLD, 10/05.)
  2075       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2076              _g1->evacuation_failed() ||
  2077              recent_survival_rate <= 1.0, "Or bad frac");
  2078       return recent_survival_rate;
  2079   } else {
  2080     return 1.0; // Be conservative.
  2084 double
  2085 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
  2086                                                TruncatedSeq* before) {
  2087   assert(surviving->num() == before->num(), "Sequence out of sync");
  2088   if (surviving->num() > 0 && before->last() > 0.0) {
  2089     double last_survival_rate = surviving->last() / before->last();
  2090     // We exempt parallel collection from this check because Alloc Buffer
  2091     // fragmentation can produce negative collections.
  2092     // Further, we're now always doing parallel collection.  But I'm still
  2093     // leaving this here as a placeholder for a more precise assertion later.
  2094     // (DLD, 10/05.)
  2095     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  2096            last_survival_rate <= 1.0, "Or bad frac");
  2097     return last_survival_rate;
  2098   } else {
  2099     return 1.0;
  2103 static const int survival_min_obs = 5;
  2104 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
  2105 static const double min_survival_rate = 0.1;
  2107 double
  2108 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
  2109                                                            double latest) {
  2110   double res = avg;
  2111   if (number_of_recent_gcs() < survival_min_obs) {
  2112     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
  2114   res = MAX2(res, latest);
  2115   res = MAX2(res, min_survival_rate);
  2116   // In the parallel case, LAB fragmentation can produce "negative
  2117   // collections"; so can evac failure.  Cap at 1.0
  2118   res = MIN2(res, 1.0);
  2119   return res;
  2122 size_t G1CollectorPolicy::expansion_amount() {
  2123   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  2124   double threshold = _gc_overhead_perc;
  2125   if (recent_gc_overhead > threshold) {
  2126     // We will double the existing space, or take
  2127     // G1ExpandByPercentOfAvailable % of the available expansion
  2128     // space, whichever is smaller, bounded below by a minimum
  2129     // expansion (unless that's all that's left.)
  2130     const size_t min_expand_bytes = 1*M;
  2131     size_t reserved_bytes = _g1->max_capacity();
  2132     size_t committed_bytes = _g1->capacity();
  2133     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  2134     size_t expand_bytes;
  2135     size_t expand_bytes_via_pct =
  2136       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  2137     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  2138     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  2139     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  2141     ergo_verbose5(ErgoHeapSizing,
  2142                   "attempt heap expansion",
  2143                   ergo_format_reason("recent GC overhead higher than "
  2144                                      "threshold after GC")
  2145                   ergo_format_perc("recent GC overhead")
  2146                   ergo_format_perc("threshold")
  2147                   ergo_format_byte("uncommitted")
  2148                   ergo_format_byte_perc("calculated expansion amount"),
  2149                   recent_gc_overhead, threshold,
  2150                   uncommitted_bytes,
  2151                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  2153     return expand_bytes;
  2154   } else {
  2155     return 0;
  2159 void G1CollectorPolicy::note_start_of_mark_thread() {
  2160   _mark_thread_startup_sec = os::elapsedTime();
  2163 class CountCSClosure: public HeapRegionClosure {
  2164   G1CollectorPolicy* _g1_policy;
  2165 public:
  2166   CountCSClosure(G1CollectorPolicy* g1_policy) :
  2167     _g1_policy(g1_policy) {}
  2168   bool doHeapRegion(HeapRegion* r) {
  2169     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  2170     return false;
  2172 };
  2174 void G1CollectorPolicy::count_CS_bytes_used() {
  2175   CountCSClosure cs_closure(this);
  2176   _g1->collection_set_iterate(&cs_closure);
  2179 void G1CollectorPolicy::print_summary (int level,
  2180                                        const char* str,
  2181                                        NumberSeq* seq) const {
  2182   double sum = seq->sum();
  2183   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  2184                 str, sum / 1000.0, seq->avg());
  2187 void G1CollectorPolicy::print_summary_sd (int level,
  2188                                           const char* str,
  2189                                           NumberSeq* seq) const {
  2190   print_summary(level, str, seq);
  2191   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2192                 seq->num(), seq->sd(), seq->maximum());
  2195 void G1CollectorPolicy::check_other_times(int level,
  2196                                         NumberSeq* other_times_ms,
  2197                                         NumberSeq* calc_other_times_ms) const {
  2198   bool should_print = false;
  2199   LineBuffer buf(level + 2);
  2201   double max_sum = MAX2(fabs(other_times_ms->sum()),
  2202                         fabs(calc_other_times_ms->sum()));
  2203   double min_sum = MIN2(fabs(other_times_ms->sum()),
  2204                         fabs(calc_other_times_ms->sum()));
  2205   double sum_ratio = max_sum / min_sum;
  2206   if (sum_ratio > 1.1) {
  2207     should_print = true;
  2208     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  2211   double max_avg = MAX2(fabs(other_times_ms->avg()),
  2212                         fabs(calc_other_times_ms->avg()));
  2213   double min_avg = MIN2(fabs(other_times_ms->avg()),
  2214                         fabs(calc_other_times_ms->avg()));
  2215   double avg_ratio = max_avg / min_avg;
  2216   if (avg_ratio > 1.1) {
  2217     should_print = true;
  2218     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  2221   if (other_times_ms->sum() < -0.01) {
  2222     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  2225   if (other_times_ms->avg() < -0.01) {
  2226     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  2229   if (calc_other_times_ms->sum() < -0.01) {
  2230     should_print = true;
  2231     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  2234   if (calc_other_times_ms->avg() < -0.01) {
  2235     should_print = true;
  2236     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  2239   if (should_print)
  2240     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2243 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2244   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2245   MainBodySummary*    body_summary = summary->main_body_summary();
  2246   if (summary->get_total_seq()->num() > 0) {
  2247     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2248     if (body_summary != NULL) {
  2249       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  2250       if (parallel) {
  2251         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2252         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2253         print_summary(2, "Ext Root Scanning",
  2254                       body_summary->get_ext_root_scan_seq());
  2255         print_summary(2, "Mark Stack Scanning",
  2256                       body_summary->get_mark_stack_scan_seq());
  2257         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2258         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2259         print_summary(2, "Termination", body_summary->get_termination_seq());
  2260         print_summary(2, "Other", body_summary->get_parallel_other_seq());
  2262           NumberSeq* other_parts[] = {
  2263             body_summary->get_update_rs_seq(),
  2264             body_summary->get_ext_root_scan_seq(),
  2265             body_summary->get_mark_stack_scan_seq(),
  2266             body_summary->get_scan_rs_seq(),
  2267             body_summary->get_obj_copy_seq(),
  2268             body_summary->get_termination_seq()
  2269           };
  2270           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2271                                         6, other_parts);
  2272           check_other_times(2, body_summary->get_parallel_other_seq(),
  2273                             &calc_other_times_ms);
  2275         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2276         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2277       } else {
  2278         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2279         print_summary(1, "Ext Root Scanning",
  2280                       body_summary->get_ext_root_scan_seq());
  2281         print_summary(1, "Mark Stack Scanning",
  2282                       body_summary->get_mark_stack_scan_seq());
  2283         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2284         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2287     print_summary(1, "Other", summary->get_other_seq());
  2289       if (body_summary != NULL) {
  2290         NumberSeq calc_other_times_ms;
  2291         if (parallel) {
  2292           // parallel
  2293           NumberSeq* other_parts[] = {
  2294             body_summary->get_satb_drain_seq(),
  2295             body_summary->get_parallel_seq(),
  2296             body_summary->get_clear_ct_seq()
  2297           };
  2298           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2299                                                 3, other_parts);
  2300         } else {
  2301           // serial
  2302           NumberSeq* other_parts[] = {
  2303             body_summary->get_satb_drain_seq(),
  2304             body_summary->get_update_rs_seq(),
  2305             body_summary->get_ext_root_scan_seq(),
  2306             body_summary->get_mark_stack_scan_seq(),
  2307             body_summary->get_scan_rs_seq(),
  2308             body_summary->get_obj_copy_seq()
  2309           };
  2310           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2311                                                 6, other_parts);
  2313         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2316   } else {
  2317     LineBuffer(1).append_and_print_cr("none");
  2319   LineBuffer(0).append_and_print_cr("");
  2322 void G1CollectorPolicy::print_tracing_info() const {
  2323   if (TraceGen0Time) {
  2324     gclog_or_tty->print_cr("ALL PAUSES");
  2325     print_summary_sd(0, "Total", _all_pause_times_ms);
  2326     gclog_or_tty->print_cr("");
  2327     gclog_or_tty->print_cr("");
  2328     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
  2329     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
  2330     gclog_or_tty->print_cr("");
  2332     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2333     print_summary(_summary);
  2335     gclog_or_tty->print_cr("MISC");
  2336     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2337     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2338     for (int i = 0; i < _aux_num; ++i) {
  2339       if (_all_aux_times_ms[i].num() > 0) {
  2340         char buffer[96];
  2341         sprintf(buffer, "Aux%d", i);
  2342         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2346     size_t all_region_num = _region_num_young + _region_num_tenured;
  2347     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
  2348                "Tenured %8d (%6.2lf%%)",
  2349                all_region_num,
  2350                _region_num_young,
  2351                (double) _region_num_young / (double) all_region_num * 100.0,
  2352                _region_num_tenured,
  2353                (double) _region_num_tenured / (double) all_region_num * 100.0);
  2355   if (TraceGen1Time) {
  2356     if (_all_full_gc_times_ms->num() > 0) {
  2357       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2358                  _all_full_gc_times_ms->num(),
  2359                  _all_full_gc_times_ms->sum() / 1000.0);
  2360       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2361       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2362                     _all_full_gc_times_ms->sd(),
  2363                     _all_full_gc_times_ms->maximum());
  2368 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2369 #ifndef PRODUCT
  2370   _short_lived_surv_rate_group->print_surv_rate_summary();
  2371   // add this call for any other surv rate groups
  2372 #endif // PRODUCT
  2375 void G1CollectorPolicy::update_region_num(bool young) {
  2376   if (young) {
  2377     ++_region_num_young;
  2378   } else {
  2379     ++_region_num_tenured;
  2383 #ifndef PRODUCT
  2384 // for debugging, bit of a hack...
  2385 static char*
  2386 region_num_to_mbs(int length) {
  2387   static char buffer[64];
  2388   double bytes = (double) (length * HeapRegion::GrainBytes);
  2389   double mbs = bytes / (double) (1024 * 1024);
  2390   sprintf(buffer, "%7.2lfMB", mbs);
  2391   return buffer;
  2393 #endif // PRODUCT
  2395 size_t G1CollectorPolicy::max_regions(int purpose) {
  2396   switch (purpose) {
  2397     case GCAllocForSurvived:
  2398       return _max_survivor_regions;
  2399     case GCAllocForTenured:
  2400       return REGIONS_UNLIMITED;
  2401     default:
  2402       ShouldNotReachHere();
  2403       return REGIONS_UNLIMITED;
  2404   };
  2407 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2408   size_t expansion_region_num = 0;
  2409   if (GCLockerEdenExpansionPercent > 0) {
  2410     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2411     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2412     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2413     // less than 1.0) we'll get 1.
  2414     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2415   } else {
  2416     assert(expansion_region_num == 0, "sanity");
  2418   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2419   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2422 // Calculates survivor space parameters.
  2423 void G1CollectorPolicy::update_survivors_policy() {
  2424   double max_survivor_regions_d =
  2425                  (double) _young_list_target_length / (double) SurvivorRatio;
  2426   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2427   // smaller than 1.0) we'll get 1.
  2428   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2430   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2431         HeapRegion::GrainWords * _max_survivor_regions);
  2434 #ifndef PRODUCT
  2435 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2436   CollectionSetChooser* _chooser;
  2437 public:
  2438   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2439     _chooser(chooser) {}
  2441   bool doHeapRegion(HeapRegion* r) {
  2442     if (!r->continuesHumongous()) {
  2443       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2445     return false;
  2447 };
  2449 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
  2450   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2451   _g1->heap_region_iterate(&cl);
  2452   return true;
  2454 #endif
  2456 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2457                                                      GCCause::Cause gc_cause) {
  2458   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2459   if (!during_cycle) {
  2460     ergo_verbose1(ErgoConcCycles,
  2461                   "request concurrent cycle initiation",
  2462                   ergo_format_reason("requested by GC cause")
  2463                   ergo_format_str("GC cause"),
  2464                   GCCause::to_string(gc_cause));
  2465     set_initiate_conc_mark_if_possible();
  2466     return true;
  2467   } else {
  2468     ergo_verbose1(ErgoConcCycles,
  2469                   "do not request concurrent cycle initiation",
  2470                   ergo_format_reason("concurrent cycle already in progress")
  2471                   ergo_format_str("GC cause"),
  2472                   GCCause::to_string(gc_cause));
  2473     return false;
  2477 void
  2478 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2479   // We are about to decide on whether this pause will be an
  2480   // initial-mark pause.
  2482   // First, during_initial_mark_pause() should not be already set. We
  2483   // will set it here if we have to. However, it should be cleared by
  2484   // the end of the pause (it's only set for the duration of an
  2485   // initial-mark pause).
  2486   assert(!during_initial_mark_pause(), "pre-condition");
  2488   if (initiate_conc_mark_if_possible()) {
  2489     // We had noticed on a previous pause that the heap occupancy has
  2490     // gone over the initiating threshold and we should start a
  2491     // concurrent marking cycle. So we might initiate one.
  2493     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2494     if (!during_cycle) {
  2495       // The concurrent marking thread is not "during a cycle", i.e.,
  2496       // it has completed the last one. So we can go ahead and
  2497       // initiate a new cycle.
  2499       set_during_initial_mark_pause();
  2500       // We do not allow non-full young GCs during marking.
  2501       if (!full_young_gcs()) {
  2502         set_full_young_gcs(true);
  2503         ergo_verbose0(ErgoPartiallyYoungGCs,
  2504                       "end partially-young GCs",
  2505                       ergo_format_reason("concurrent cycle is about to start"));
  2508       // And we can now clear initiate_conc_mark_if_possible() as
  2509       // we've already acted on it.
  2510       clear_initiate_conc_mark_if_possible();
  2512       ergo_verbose0(ErgoConcCycles,
  2513                   "initiate concurrent cycle",
  2514                   ergo_format_reason("concurrent cycle initiation requested"));
  2515     } else {
  2516       // The concurrent marking thread is still finishing up the
  2517       // previous cycle. If we start one right now the two cycles
  2518       // overlap. In particular, the concurrent marking thread might
  2519       // be in the process of clearing the next marking bitmap (which
  2520       // we will use for the next cycle if we start one). Starting a
  2521       // cycle now will be bad given that parts of the marking
  2522       // information might get cleared by the marking thread. And we
  2523       // cannot wait for the marking thread to finish the cycle as it
  2524       // periodically yields while clearing the next marking bitmap
  2525       // and, if it's in a yield point, it's waiting for us to
  2526       // finish. So, at this point we will not start a cycle and we'll
  2527       // let the concurrent marking thread complete the last one.
  2528       ergo_verbose0(ErgoConcCycles,
  2529                     "do not initiate concurrent cycle",
  2530                     ergo_format_reason("concurrent cycle already in progress"));
  2535 void
  2536 G1CollectorPolicy_BestRegionsFirst::
  2537 record_collection_pause_start(double start_time_sec, size_t start_used) {
  2538   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
  2541 class KnownGarbageClosure: public HeapRegionClosure {
  2542   CollectionSetChooser* _hrSorted;
  2544 public:
  2545   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2546     _hrSorted(hrSorted)
  2547   {}
  2549   bool doHeapRegion(HeapRegion* r) {
  2550     // We only include humongous regions in collection
  2551     // sets when concurrent mark shows that their contained object is
  2552     // unreachable.
  2554     // Do we have any marking information for this region?
  2555     if (r->is_marked()) {
  2556       // We don't include humongous regions in collection
  2557       // sets because we collect them immediately at the end of a marking
  2558       // cycle.  We also don't include young regions because we *must*
  2559       // include them in the next collection pause.
  2560       if (!r->isHumongous() && !r->is_young()) {
  2561         _hrSorted->addMarkedHeapRegion(r);
  2564     return false;
  2566 };
  2568 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2569   CollectionSetChooser* _hrSorted;
  2570   jint _marked_regions_added;
  2571   jint _chunk_size;
  2572   jint _cur_chunk_idx;
  2573   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2574   int _worker;
  2575   int _invokes;
  2577   void get_new_chunk() {
  2578     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2579     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2581   void add_region(HeapRegion* r) {
  2582     if (_cur_chunk_idx == _cur_chunk_end) {
  2583       get_new_chunk();
  2585     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2586     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2587     _marked_regions_added++;
  2588     _cur_chunk_idx++;
  2591 public:
  2592   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2593                            jint chunk_size,
  2594                            int worker) :
  2595     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2596     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2597     _invokes(0)
  2598   {}
  2600   bool doHeapRegion(HeapRegion* r) {
  2601     // We only include humongous regions in collection
  2602     // sets when concurrent mark shows that their contained object is
  2603     // unreachable.
  2604     _invokes++;
  2606     // Do we have any marking information for this region?
  2607     if (r->is_marked()) {
  2608       // We don't include humongous regions in collection
  2609       // sets because we collect them immediately at the end of a marking
  2610       // cycle.
  2611       // We also do not include young regions in collection sets
  2612       if (!r->isHumongous() && !r->is_young()) {
  2613         add_region(r);
  2616     return false;
  2618   jint marked_regions_added() { return _marked_regions_added; }
  2619   int invokes() { return _invokes; }
  2620 };
  2622 class ParKnownGarbageTask: public AbstractGangTask {
  2623   CollectionSetChooser* _hrSorted;
  2624   jint _chunk_size;
  2625   G1CollectedHeap* _g1;
  2626 public:
  2627   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2628     AbstractGangTask("ParKnownGarbageTask"),
  2629     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2630     _g1(G1CollectedHeap::heap())
  2631   {}
  2633   void work(int i) {
  2634     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2635     // Back to zero for the claim value.
  2636     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2637                                          HeapRegion::InitialClaimValue);
  2638     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2639     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2640     if (G1PrintParCleanupStats) {
  2641       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2642                  i, parKnownGarbageCl.invokes(), regions_added);
  2645 };
  2647 void
  2648 G1CollectorPolicy_BestRegionsFirst::
  2649 record_concurrent_mark_cleanup_end(size_t freed_bytes,
  2650                                    size_t max_live_bytes) {
  2651   double start;
  2652   if (G1PrintParCleanupStats) start = os::elapsedTime();
  2653   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  2655   _collectionSetChooser->clearMarkedHeapRegions();
  2656   double clear_marked_end;
  2657   if (G1PrintParCleanupStats) {
  2658     clear_marked_end = os::elapsedTime();
  2659     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
  2660                   (clear_marked_end - start)*1000.0);
  2662   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2663     const size_t OverpartitionFactor = 4;
  2664     const size_t MinWorkUnit = 8;
  2665     const size_t WorkUnit =
  2666       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2667            MinWorkUnit);
  2668     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2669                                                              WorkUnit);
  2670     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2671                                             (int) WorkUnit);
  2672     _g1->workers()->run_task(&parKnownGarbageTask);
  2674     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2675            "sanity check");
  2676   } else {
  2677     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2678     _g1->heap_region_iterate(&knownGarbagecl);
  2680   double known_garbage_end;
  2681   if (G1PrintParCleanupStats) {
  2682     known_garbage_end = os::elapsedTime();
  2683     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2684                   (known_garbage_end - clear_marked_end)*1000.0);
  2686   _collectionSetChooser->sortMarkedHeapRegions();
  2687   double sort_end;
  2688   if (G1PrintParCleanupStats) {
  2689     sort_end = os::elapsedTime();
  2690     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2691                   (sort_end - known_garbage_end)*1000.0);
  2694   record_concurrent_mark_cleanup_end_work2();
  2695   double work2_end;
  2696   if (G1PrintParCleanupStats) {
  2697     work2_end = os::elapsedTime();
  2698     gclog_or_tty->print_cr("  work2: %8.3f ms.",
  2699                   (work2_end - sort_end)*1000.0);
  2703 // Add the heap region at the head of the non-incremental collection set
  2704 void G1CollectorPolicy::
  2705 add_to_collection_set(HeapRegion* hr) {
  2706   assert(_inc_cset_build_state == Active, "Precondition");
  2707   assert(!hr->is_young(), "non-incremental add of young region");
  2709   if (_g1->mark_in_progress())
  2710     _g1->concurrent_mark()->registerCSetRegion(hr);
  2712   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2713   hr->set_in_collection_set(true);
  2714   hr->set_next_in_collection_set(_collection_set);
  2715   _collection_set = hr;
  2716   _collection_set_size++;
  2717   _collection_set_bytes_used_before += hr->used();
  2718   _g1->register_region_with_in_cset_fast_test(hr);
  2721 // Initialize the per-collection-set information
  2722 void G1CollectorPolicy::start_incremental_cset_building() {
  2723   assert(_inc_cset_build_state == Inactive, "Precondition");
  2725   _inc_cset_head = NULL;
  2726   _inc_cset_tail = NULL;
  2727   _inc_cset_size = 0;
  2728   _inc_cset_bytes_used_before = 0;
  2730   _inc_cset_young_index = 0;
  2732   _inc_cset_max_finger = 0;
  2733   _inc_cset_recorded_young_bytes = 0;
  2734   _inc_cset_recorded_rs_lengths = 0;
  2735   _inc_cset_predicted_elapsed_time_ms = 0;
  2736   _inc_cset_predicted_bytes_to_copy = 0;
  2737   _inc_cset_build_state = Active;
  2740 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2741   // This routine is used when:
  2742   // * adding survivor regions to the incremental cset at the end of an
  2743   //   evacuation pause,
  2744   // * adding the current allocation region to the incremental cset
  2745   //   when it is retired, and
  2746   // * updating existing policy information for a region in the
  2747   //   incremental cset via young list RSet sampling.
  2748   // Therefore this routine may be called at a safepoint by the
  2749   // VM thread, or in-between safepoints by mutator threads (when
  2750   // retiring the current allocation region) or a concurrent
  2751   // refine thread (RSet sampling).
  2753   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2754   size_t used_bytes = hr->used();
  2756   _inc_cset_recorded_rs_lengths += rs_length;
  2757   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2759   _inc_cset_bytes_used_before += used_bytes;
  2761   // Cache the values we have added to the aggregated informtion
  2762   // in the heap region in case we have to remove this region from
  2763   // the incremental collection set, or it is updated by the
  2764   // rset sampling code
  2765   hr->set_recorded_rs_length(rs_length);
  2766   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2768 #if PREDICTIONS_VERBOSE
  2769   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  2770   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  2772   // Record the number of bytes used in this region
  2773   _inc_cset_recorded_young_bytes += used_bytes;
  2775   // Cache the values we have added to the aggregated informtion
  2776   // in the heap region in case we have to remove this region from
  2777   // the incremental collection set, or it is updated by the
  2778   // rset sampling code
  2779   hr->set_predicted_bytes_to_copy(bytes_to_copy);
  2780 #endif // PREDICTIONS_VERBOSE
  2783 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2784   // This routine is currently only called as part of the updating of
  2785   // existing policy information for regions in the incremental cset that
  2786   // is performed by the concurrent refine thread(s) as part of young list
  2787   // RSet sampling. Therefore we should not be at a safepoint.
  2789   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2790   assert(hr->is_young(), "it should be");
  2792   size_t used_bytes = hr->used();
  2793   size_t old_rs_length = hr->recorded_rs_length();
  2794   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2796   // Subtract the old recorded/predicted policy information for
  2797   // the given heap region from the collection set info.
  2798   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2799   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2801   _inc_cset_bytes_used_before -= used_bytes;
  2803   // Clear the values cached in the heap region
  2804   hr->set_recorded_rs_length(0);
  2805   hr->set_predicted_elapsed_time_ms(0);
  2807 #if PREDICTIONS_VERBOSE
  2808   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  2809   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  2811   // Subtract the number of bytes used in this region
  2812   _inc_cset_recorded_young_bytes -= used_bytes;
  2814   // Clear the values cached in the heap region
  2815   hr->set_predicted_bytes_to_copy(0);
  2816 #endif // PREDICTIONS_VERBOSE
  2819 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2820   // Update the collection set information that is dependent on the new RS length
  2821   assert(hr->is_young(), "Precondition");
  2823   remove_from_incremental_cset_info(hr);
  2824   add_to_incremental_cset_info(hr, new_rs_length);
  2827 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2828   assert( hr->is_young(), "invariant");
  2829   assert( hr->young_index_in_cset() == -1, "invariant" );
  2830   assert(_inc_cset_build_state == Active, "Precondition");
  2832   // We need to clear and set the cached recorded/cached collection set
  2833   // information in the heap region here (before the region gets added
  2834   // to the collection set). An individual heap region's cached values
  2835   // are calculated, aggregated with the policy collection set info,
  2836   // and cached in the heap region here (initially) and (subsequently)
  2837   // by the Young List sampling code.
  2839   size_t rs_length = hr->rem_set()->occupied();
  2840   add_to_incremental_cset_info(hr, rs_length);
  2842   HeapWord* hr_end = hr->end();
  2843   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2845   assert(!hr->in_collection_set(), "invariant");
  2846   hr->set_in_collection_set(true);
  2847   assert( hr->next_in_collection_set() == NULL, "invariant");
  2849   _inc_cset_size++;
  2850   _g1->register_region_with_in_cset_fast_test(hr);
  2852   hr->set_young_index_in_cset((int) _inc_cset_young_index);
  2853   ++_inc_cset_young_index;
  2856 // Add the region at the RHS of the incremental cset
  2857 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2858   // We should only ever be appending survivors at the end of a pause
  2859   assert( hr->is_survivor(), "Logic");
  2861   // Do the 'common' stuff
  2862   add_region_to_incremental_cset_common(hr);
  2864   // Now add the region at the right hand side
  2865   if (_inc_cset_tail == NULL) {
  2866     assert(_inc_cset_head == NULL, "invariant");
  2867     _inc_cset_head = hr;
  2868   } else {
  2869     _inc_cset_tail->set_next_in_collection_set(hr);
  2871   _inc_cset_tail = hr;
  2874 // Add the region to the LHS of the incremental cset
  2875 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2876   // Survivors should be added to the RHS at the end of a pause
  2877   assert(!hr->is_survivor(), "Logic");
  2879   // Do the 'common' stuff
  2880   add_region_to_incremental_cset_common(hr);
  2882   // Add the region at the left hand side
  2883   hr->set_next_in_collection_set(_inc_cset_head);
  2884   if (_inc_cset_head == NULL) {
  2885     assert(_inc_cset_tail == NULL, "Invariant");
  2886     _inc_cset_tail = hr;
  2888   _inc_cset_head = hr;
  2891 #ifndef PRODUCT
  2892 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2893   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2895   st->print_cr("\nCollection_set:");
  2896   HeapRegion* csr = list_head;
  2897   while (csr != NULL) {
  2898     HeapRegion* next = csr->next_in_collection_set();
  2899     assert(csr->in_collection_set(), "bad CS");
  2900     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2901                  "age: %4d, y: %d, surv: %d",
  2902                         csr->bottom(), csr->end(),
  2903                         csr->top(),
  2904                         csr->prev_top_at_mark_start(),
  2905                         csr->next_top_at_mark_start(),
  2906                         csr->top_at_conc_mark_count(),
  2907                         csr->age_in_surv_rate_group_cond(),
  2908                         csr->is_young(),
  2909                         csr->is_survivor());
  2910     csr = next;
  2913 #endif // !PRODUCT
  2915 void
  2916 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
  2917                                                   double target_pause_time_ms) {
  2918   // Set this here - in case we're not doing young collections.
  2919   double non_young_start_time_sec = os::elapsedTime();
  2921   YoungList* young_list = _g1->young_list();
  2923   start_recording_regions();
  2925   guarantee(target_pause_time_ms > 0.0,
  2926             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2927                     target_pause_time_ms));
  2928   guarantee(_collection_set == NULL, "Precondition");
  2930   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2931   double predicted_pause_time_ms = base_time_ms;
  2933   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2935   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2936                 "start choosing CSet",
  2937                 ergo_format_ms("predicted base time")
  2938                 ergo_format_ms("remaining time")
  2939                 ergo_format_ms("target pause time"),
  2940                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2942   // the 10% and 50% values are arbitrary...
  2943   double threshold = 0.10 * target_pause_time_ms;
  2944   if (time_remaining_ms < threshold) {
  2945     double prev_time_remaining_ms = time_remaining_ms;
  2946     time_remaining_ms = 0.50 * target_pause_time_ms;
  2947     _within_target = false;
  2948     ergo_verbose3(ErgoCSetConstruction,
  2949                   "adjust remaining time",
  2950                   ergo_format_reason("remaining time lower than threshold")
  2951                   ergo_format_ms("remaining time")
  2952                   ergo_format_ms("threshold")
  2953                   ergo_format_ms("adjusted remaining time"),
  2954                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2955   } else {
  2956     _within_target = true;
  2959   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2961   HeapRegion* hr;
  2962   double young_start_time_sec = os::elapsedTime();
  2964   _collection_set_bytes_used_before = 0;
  2965   _collection_set_size = 0;
  2966   _young_cset_length  = 0;
  2967   _last_young_gc_full = full_young_gcs() ? true : false;
  2969   if (_last_young_gc_full) {
  2970     ++_full_young_pause_num;
  2971   } else {
  2972     ++_partial_young_pause_num;
  2975   // The young list is laid with the survivor regions from the previous
  2976   // pause are appended to the RHS of the young list, i.e.
  2977   //   [Newly Young Regions ++ Survivors from last pause].
  2979   size_t survivor_region_num = young_list->survivor_length();
  2980   size_t eden_region_num = young_list->length() - survivor_region_num;
  2981   size_t old_region_num = 0;
  2982   hr = young_list->first_survivor_region();
  2983   while (hr != NULL) {
  2984     assert(hr->is_survivor(), "badly formed young list");
  2985     hr->set_young();
  2986     hr = hr->get_next_young_region();
  2989   // Clear the fields that point to the survivor list - they are all young now.
  2990   young_list->clear_survivors();
  2992   if (_g1->mark_in_progress())
  2993     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2995   _young_cset_length = _inc_cset_young_index;
  2996   _collection_set = _inc_cset_head;
  2997   _collection_set_size = _inc_cset_size;
  2998   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2999   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  3000   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  3002   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  3003                 "add young regions to CSet",
  3004                 ergo_format_region("eden")
  3005                 ergo_format_region("survivors")
  3006                 ergo_format_ms("predicted young region time"),
  3007                 eden_region_num, survivor_region_num,
  3008                 _inc_cset_predicted_elapsed_time_ms);
  3010   // The number of recorded young regions is the incremental
  3011   // collection set's current size
  3012   set_recorded_young_regions(_inc_cset_size);
  3013   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  3014   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  3015 #if PREDICTIONS_VERBOSE
  3016   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  3017 #endif // PREDICTIONS_VERBOSE
  3019   assert(_inc_cset_size == young_list->length(), "Invariant");
  3021   double young_end_time_sec = os::elapsedTime();
  3022   _recorded_young_cset_choice_time_ms =
  3023     (young_end_time_sec - young_start_time_sec) * 1000.0;
  3025   // We are doing young collections so reset this.
  3026   non_young_start_time_sec = young_end_time_sec;
  3028   if (!full_young_gcs()) {
  3029     bool should_continue = true;
  3030     NumberSeq seq;
  3031     double avg_prediction = 100000000000000000.0; // something very large
  3033     size_t prev_collection_set_size = _collection_set_size;
  3034     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  3035     do {
  3036       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  3037                                                       avg_prediction);
  3038       if (hr != NULL) {
  3039         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  3040         time_remaining_ms -= predicted_time_ms;
  3041         predicted_pause_time_ms += predicted_time_ms;
  3042         add_to_collection_set(hr);
  3043         record_non_young_cset_region(hr);
  3044         seq.add(predicted_time_ms);
  3045         avg_prediction = seq.avg() + seq.sd();
  3048       should_continue = true;
  3049       if (hr == NULL) {
  3050         // No need for an ergo verbose message here,
  3051         // getNextMarkRegion() does this when it returns NULL.
  3052         should_continue = false;
  3053       } else {
  3054         if (adaptive_young_list_length()) {
  3055           if (time_remaining_ms < 0.0) {
  3056             ergo_verbose1(ErgoCSetConstruction,
  3057                           "stop adding old regions to CSet",
  3058                           ergo_format_reason("remaining time is lower than 0")
  3059                           ergo_format_ms("remaining time"),
  3060                           time_remaining_ms);
  3061             should_continue = false;
  3063         } else {
  3064           if (_collection_set_size >= _young_list_fixed_length) {
  3065             ergo_verbose2(ErgoCSetConstruction,
  3066                           "stop adding old regions to CSet",
  3067                           ergo_format_reason("CSet length reached target")
  3068                           ergo_format_region("CSet")
  3069                           ergo_format_region("young target"),
  3070                           _collection_set_size, _young_list_fixed_length);
  3071             should_continue = false;
  3075     } while (should_continue);
  3077     if (!adaptive_young_list_length() &&
  3078         _collection_set_size < _young_list_fixed_length) {
  3079       ergo_verbose2(ErgoCSetConstruction,
  3080                     "request partially-young GCs end",
  3081                     ergo_format_reason("CSet length lower than target")
  3082                     ergo_format_region("CSet")
  3083                     ergo_format_region("young target"),
  3084                     _collection_set_size, _young_list_fixed_length);
  3085       _should_revert_to_full_young_gcs  = true;
  3088     old_region_num = _collection_set_size - prev_collection_set_size;
  3090     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  3091                   "add old regions to CSet",
  3092                   ergo_format_region("old")
  3093                   ergo_format_ms("predicted old region time"),
  3094                   old_region_num,
  3095                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  3098   stop_incremental_cset_building();
  3100   count_CS_bytes_used();
  3102   end_recording_regions();
  3104   ergo_verbose5(ErgoCSetConstruction,
  3105                 "finish choosing CSet",
  3106                 ergo_format_region("eden")
  3107                 ergo_format_region("survivors")
  3108                 ergo_format_region("old")
  3109                 ergo_format_ms("predicted pause time")
  3110                 ergo_format_ms("target pause time"),
  3111                 eden_region_num, survivor_region_num, old_region_num,
  3112                 predicted_pause_time_ms, target_pause_time_ms);
  3114   double non_young_end_time_sec = os::elapsedTime();
  3115   _recorded_non_young_cset_choice_time_ms =
  3116     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
  3119 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
  3120   G1CollectorPolicy::record_full_collection_end();
  3121   _collectionSetChooser->updateAfterFullCollection();
  3124 void G1CollectorPolicy_BestRegionsFirst::
  3125 record_collection_pause_end() {
  3126   G1CollectorPolicy::record_collection_pause_end();
  3127   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");

mercurial